/* cnic.c: Broadcom CNIC core network driver.
*
* Copyright (c) 2006-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
* Modified and maintained by: Michael Chan <mchan@broadcom.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/uio_driver.h>
#include <linux/in.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/prefetch.h>
#include <linux/random.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define BCM_VLAN 1
#endif
#include <net/ip.h>
#include <net/tcp.h>
#include <net/route.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
#include <net/ip6_checksum.h>
#include <scsi/iscsi_if.h>
#include "cnic_if.h"
#include "bnx2.h"
#include "bnx2x/bnx2x_reg.h"
#include "bnx2x/bnx2x_fw_defs.h"
#include "bnx2x/bnx2x_hsi.h"
#include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
#include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
#include "cnic.h"
#include "cnic_defs.h"
#define DRV_MODULE_NAME "cnic"
static char version[] __devinitdata =
"Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
"Chen (zongxi@broadcom.com");
MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(CNIC_MODULE_VERSION);
/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
static LIST_HEAD(cnic_dev_list);
static LIST_HEAD(cnic_udev_list);
static DEFINE_RWLOCK(cnic_dev_lock);
static DEFINE_MUTEX(cnic_lock);
static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
/* helper function, assuming cnic_lock is held */
static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
{
return rcu_dereference_protected(cnic_ulp_tbl[type],
lockdep_is_held(&cnic_lock));
}
static int cnic_service_bnx2(void *, void *);
static int cnic_service_bnx2x(void *, void *);
static int cnic_ctl(void *, struct cnic_ctl_info *);
static struct cnic_ops cnic_bnx2_ops = {
.cnic_owner = THIS_MODULE,
.cnic_handler = cnic_service_bnx2,
.cnic_ctl = cnic_ctl,
};
static struct cnic_ops cnic_bnx2x_ops = {
.cnic_owner = THIS_MODULE,
.cnic_handler = cnic_service_bnx2x,
.cnic_ctl = cnic_ctl,
};
static struct workqueue_struct *cnic_wq;
static void cnic_shutdown_rings(struct cnic_dev *);
static void cnic_init_rings(struct cnic_dev *);
static int cnic_cm_set_pg(struct cnic_sock *);
static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
{
struct cnic_uio_dev *udev = uinfo->priv;
struct cnic_dev *dev;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (udev->uio_dev != -1)
return -EBUSY;
rtnl_lock();
dev = udev->dev;
if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
rtnl_unlock();
return -ENODEV;
}
udev->uio_dev = iminor(inode);
cnic_shutdown_rings(dev);
cnic_init_rings(dev);
rtnl_unlock();
return 0;
}
static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
{
struct cnic_uio_dev *udev = uinfo->priv;
udev->uio_dev = -1;
return 0;
}
static inline void cnic_hold(struct cnic_dev *dev)
{
atomic_inc(&dev->ref_count);
}
static inline void cnic_put(struct cnic_dev *dev)
{
atomic_dec(&dev->ref_count);
}
static inline void csk_hold(struct cnic_sock *csk)
{
atomic_inc(&csk->ref_count);
}
static inline void csk_put(struct cnic_sock *csk)
{
atomic_dec(&csk->ref_count);
}
static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
{
struct cnic_dev *cdev;
read_lock(&cnic_dev_lock);
list_for_each_entry(cdev, &cnic_dev_list, list) {
if (netdev == cdev->netdev) {
cnic_hold(cdev);
read_unlock(&cnic_dev_lock);
return cdev;
}
}
read_unlock(&cnic_dev_lock);
return NULL;
}
static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
{
atomic_inc(&ulp_ops->ref_count);
}
static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
{
atomic_dec(&ulp_ops->ref_count);
}
static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
struct drv_ctl_io *io = &info.data.io;
info.cmd = DRV_CTL_CTX_WR_CMD;
io->cid_addr = cid_addr;
io->offset = off;
io->data = val;
ethdev->drv_ctl(dev->netdev, &info);
}
static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
struct drv_ctl_io *io = &info.data.io;
info.cmd = DRV_CTL_CTXTBL_WR_CMD;
io->offset = off;
io->dma_addr = addr;
ethdev->drv_ctl(dev->netdev, &info);
}
static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
struct drv_ctl_l2_ring *ring = &info.data.ring;
if (start)
info.cmd = DRV_CTL_START_L2_CMD;
else
info.cmd = DRV_CTL_STOP_L2_CMD;
ring->cid = cid;
ring->client_id = cl_id;
ethdev->drv_ctl(dev->netdev, &info);
}
static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
struct drv_ctl_io *io = &info.data.io;
info.cmd = DRV_CTL_IO_WR_CMD;
io->offset = off;
io->data = val;
ethdev->drv_ctl(dev->netdev, &info);
}
static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
struct drv_ctl_io *io = &info.data.io;
info.cmd = DRV_CTL_IO_RD_CMD;
io->offset = off;
ethdev->drv_ctl(dev->netdev, &info);
return io->data;
}
static int cnic_in_use(struct cnic_sock *csk)
{
return test_bit(SK_F_INUSE, &csk->flags);
}
static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
info.cmd = cmd;
info.data.credit.credit_count = count;
ethdev->drv_ctl(dev->netdev, &info);
}
static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
{
u32 i;
for (i = 0; i < cp->max_cid_space; i++) {
if (cp->ctx_tbl[i].cid == cid) {
*l5_cid = i;
return 0;
}
}
return -EINVAL;
}
static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
struct cnic_sock *csk)
{
struct iscsi_path path_req;
char *buf = NULL;
u16 len = 0;
u32 msg_type = ISCSI_KEVENT_IF_DOWN;
struct cnic_ulp_ops *ulp_ops;
struct cnic_uio_dev *udev = cp->udev;
int rc = 0, retry = 0;
if (!udev || udev->uio_dev == -1)
return -ENODEV;
if (csk) {
len = sizeof(path_req);
buf = (char *) &path_req;
memset(&path_req, 0, len);
msg_type = ISCSI_KEVENT_PATH_REQ;
path_req.handle = (u64) csk->l5_cid;
if (test_bit(SK_F_IPV6, &csk->flags)) {
memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
sizeof(struct in6_addr));
path_req.ip_addr_len = 16;
} else {
memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
sizeof(struct in_addr));
path_req.ip_addr_len = 4;
}
path_req.vlan_id = csk->vlan_id;
path_req.pmtu = csk->mtu;
}
while (retry < 3) {
rc = 0;
rcu_read_lock();
ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
if (ulp_ops)
rc = ulp_ops->iscsi_nl_send_msg(
cp->ulp_handle[CNIC_ULP_ISCSI],
msg_type, buf, len);
rcu_read_unlock();
if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
break;
msleep(100);
retry++;
}
return rc;
}
static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
char *buf, u16 len)
{
int rc = -EINVAL;
switch (msg_type) {
case ISCSI_UEVENT_PATH_UPDATE: {
struct cnic_local *cp;
u32 l5_cid;
struct cnic_sock *csk;
struct iscsi_path *path_resp;
if (len < sizeof(*path_resp))
break;
path_resp = (struct iscsi_path *) buf;
cp = dev->cnic_priv;
l5_cid = (u32) path_resp->handle;
if (l5_cid >= MAX_CM_SK_TBL_SZ)
break;
rcu_read_lock();
if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
rc = -ENODEV;
rcu_read_unlock();
break;
}
csk = &cp->csk_tbl[l5_cid];
csk_hold(csk);
if (cnic_in_use(csk) &&
test_bit(SK_F_CONNECT_START, &csk->flags)) {
memcpy(csk->ha, path_resp->mac_addr, 6);
if (test_bit(SK_F_IPV6, &csk->flags))
memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
sizeof(struct in6_addr));
else
memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
sizeof(struct in_addr));
if (is_valid_ether_addr(csk->ha)) {
cnic_cm_set_pg(csk);
} else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
!test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
cnic_cm_upcall(cp, csk,
L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
clear_bit(SK_F_CONNECT_START, &csk->flags);
}
}
csk_put(csk);
rcu_read_unlock();
rc = 0;
}
}
return rc;
}
static int cnic_offld_prep(struct cnic_sock *csk)
{
if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
return 0;
if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
return 0;
}
return 1;
}
static int cnic_close_prep(struct cnic_sock *csk)
{
clear_bit(SK_F_CONNECT_START, &csk->flags);
smp_mb__after_clear_bit();
if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
msleep(1);
return 1;
}
return 0;
}
static int cnic_abort_prep(struct cnic_sock *csk)
{
clear_bit(SK_F_CONNECT_START, &csk->flags);
smp_mb__after_clear_bit();
while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
msleep(1);
if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
return 1;
}
return 0;
}
int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
{
struct cnic_dev *dev;
if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
pr_err("%s: Bad type %d\n", __func__, ulp_type);
return -EINVAL;
}
mutex_lock(&cnic_lock);
if (cnic_ulp_tbl_prot(ulp_type)) {
pr_err("%s: Type %d has already been registered\n",
__func__, ulp_type);
mutex_unlock(&cnic_lock);
return -EBUSY;
}
read_lock(&cnic_dev_lock);
list_for_each_entry(dev, &cnic_dev_list, list) {
struct cnic_local *cp = dev->cnic_priv;
clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
}
read_unlock(&cnic_dev_lock);
atomic_set(&ulp_ops->ref_count, 0);
rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
mutex_unlock(&cnic_lock);
/* Prevent race conditions with netdev_event */
rtnl_lock();
list_for_each_entry(dev, &cnic_dev_list, list) {
struct cnic_local *cp = dev->cnic_priv;
if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
ulp_ops->cnic_init(dev);
}
rtnl_unlock();
return 0;
}
int cnic_unregister_driver(int ulp_type)
{
struct cnic_dev *dev;
struct cnic_ulp_ops *ulp_ops;
int i = 0;
if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
pr_err("%s: Bad type %d\n", __func__, ulp_type);
return -EINVAL;
}
mutex_lock(&cnic_lock);
ulp_ops = cnic_ulp_tbl_prot(ulp_type);
if (!ulp_ops) {
pr_err("%s: Type %d has not been registered\n",
__func__, ulp_type);
goto out_unlock;
}
read_lock(&cnic_dev_lock);
list_for_each_entry(dev, &cnic_dev_list, list) {
struct cnic_local *cp = dev->cnic_priv;
if (rcu_dereference(cp->ulp_ops[ulp_type])) {
pr_err("%s: Type %d still has devices registered\n",
__func__, ulp_type);
read_unlock(&cnic_dev_lock);
goto out_unlock;
}
}
read_unlock(&cnic_dev_lock);
rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
mutex_unlock(&cnic_lock);
synchronize_rcu();
while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
msleep(100);
i++;
}
if (atomic_read(&ulp_ops->ref_count) != 0)
netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
return 0;
out_unlock:
mutex_unlock(&cnic_lock);
return -EINVAL;
}
static int cnic_start_hw(struct cnic_dev *);
static void cnic_stop_hw(struct cnic_dev *);
static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
void *ulp_ctx)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_ulp_ops *ulp_ops;
if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
pr_err("%s: Bad type %d\n", __func__, ulp_type);
return -EINVAL;
}
mutex_lock(&cnic_lock);
if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
pr_err("%s: Driver with type %d has not been registered\n",
__func__, ulp_type);
mutex_unlock(&cnic_lock);
return -EAGAIN;
}
if (rcu_dereference(cp->ulp_ops[ulp_type])) {
pr_err("%s: Type %d has already been registered to this device\n",
__func__, ulp_type);
mutex_unlock(&cnic_lock);
return -EBUSY;
}
clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
cp->ulp_handle[ulp_type] = ulp_ctx;
ulp_ops = cnic_ulp_tbl_prot(ulp_type);
rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
cnic_hold(dev);
if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
mutex_unlock(&cnic_lock);
return 0;
}
EXPORT_SYMBOL(cnic_register_driver);
static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
{
struct cnic_local *cp = dev->cnic_priv;
int i = 0;
if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
pr_err("%s: Bad type %d\n", __func__, ulp_type);
return -EINVAL;
}
mutex_lock(&cnic_lock);
if (rcu_dereference(cp->ulp_ops[ulp_type])) {
rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
cnic_put(dev);
} else {
pr_err("%s: device not registered to this ulp type %d\n",
__func__, ulp_type);
mutex_unlock(&cnic_lock);
return -EINVAL;
}
mutex_unlock(&cnic_lock);
if (ulp_type == CNIC_ULP_ISCSI)
cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
synchronize_rcu();
while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
i < 20) {
msleep(100);
i++;
}
if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
return 0;
}
EXPORT_SYMBOL(cnic_unregister_driver);
static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
u32 next)
{
id_tbl->start = start_id;
id_tbl->max = size;
id_tbl->next = next;
spin_lock_init(&id_tbl->lock);
id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
if (!id_tbl->table)
return -ENOMEM;
return 0;
}
static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
{
kfree(id_tbl->table);
id_tbl->table = NULL;
}
static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
{
int ret = -1;
id -= id_tbl->start;
if (id >= id_tbl->max)
return ret;
spin_lock(&id_tbl->lock);
if (!test_bit(id, id_tbl->table)) {
set_bit(id, id_tbl->table);
ret = 0;
}
spin_unlock(&id_tbl->lock);
return ret;
}
/* Returns -1 if not successful */
static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
{
u32 id;
spin_lock(&id_tbl->lock);
id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
if (id >= id_tbl->max) {
id = -1;
if (id_tbl->next != 0) {
id = find_first_zero_bit(id_tbl->table, id_tbl->next);
if (id >= id_tbl->next)
id = -1;
}
}
if (id < id_tbl->max) {
set_bit(id, id_tbl->table);
id_tbl->next = (id + 1) & (id_tbl->max - 1);
id += id_tbl->start;
}
spin_unlock(&id_tbl->lock);
return id;
}
static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
{
if (id == -1)
return;
id -= id_tbl->start;
if (id >= id_tbl->max)
return;
clear_bit(id, id_tbl->table);
}
static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
{
int i;
if (!dma->pg_arr)
return;
for (i = 0; i < dma->num_pages; i++) {
if (dma->pg_arr[i]) {
dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE,
dma->pg_arr[i], dma->pg_map_arr[i]);
dma->pg_arr[i] = NULL;
}
}
if (dma->pgtbl) {
dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
dma->pgtbl, dma->pgtbl_map);
dma->pgtbl = NULL;
}
kfree(dma->pg_arr);
dma->pg_arr = NULL;
dma->num_pages = 0;
}
static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
{
int i;
__le32 *page_table = (__le32 *) dma->pgtbl;
for (i = 0; i < dma->num_pages; i++) {
/* Each entry needs to be in big endian format. */
*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
page_table++;
*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
page_table++;
}
}
static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
{
int i;
__le32 *page_table = (__le32 *) dma->pgtbl;
for (i = 0; i < dma->num_pages; i++) {
/* Each entry needs to be in little endian format. */
*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
page_table++;
*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
page_table++;
}
}
static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
int pages, int use_pg_tbl)
{
int i, size;
struct cnic_local *cp = dev->cnic_priv;
size = pages * (sizeof(void *) + sizeof(dma_addr_t));
dma->pg_arr = kzalloc(size, GFP_ATOMIC);
if (dma->pg_arr == NULL)
return -ENOMEM;
dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
dma->num_pages = pages;
for (i = 0; i < pages; i++) {
dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
BCM_PAGE_SIZE,
&dma->pg_map_arr[i],
GFP_ATOMIC);
if (dma->pg_arr[i] == NULL)
goto error;
}
if (!use_pg_tbl)
return 0;
dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
~(BCM_PAGE_SIZE - 1);
dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
&dma->pgtbl_map, GFP_ATOMIC);
if (dma->pgtbl == NULL)
goto error;
cp->setup_pgtbl(dev, dma);
return 0;
error:
cnic_free_dma(dev, dma);
return -ENOMEM;
}
static void cnic_free_context(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
int i;
for (i = 0; i < cp->ctx_blks; i++) {
if (cp->ctx_arr[i].ctx) {
dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
cp->ctx_arr[i].ctx,
cp->ctx_arr[i].mapping);
cp->ctx_arr[i].ctx = NULL;
}
}
}
static void __cnic_free_uio(struct cnic_uio_dev *udev)
{
uio_unregister_device(&udev->cnic_uinfo);
if (udev->l2_buf) {
dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
udev->l2_buf, udev->l2_buf_map);
udev->l2_buf = NULL;
}
if (udev->l2_ring) {
dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
udev->l2_ring, udev->l2_ring_map);
udev->l2_ring = NULL;
}
pci_dev_put(udev->pdev);
kfree(udev);
}
static void cnic_free_uio(struct cnic_uio_dev *udev)
{
if (!udev)
return;
write_lock(&cnic_dev_lock);
list_del_init(&udev->list);
write_unlock(&cnic_dev_lock);
__cnic_free_uio(udev);
}
static void cnic_free_resc(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_uio_dev *udev = cp->udev;
if (udev) {
udev->dev = NULL;
cp->udev = NULL;
}
cnic_free_context(dev);
kfree(cp->ctx_arr);
cp->ctx_arr = NULL;
cp->ctx_blks = 0;
cnic_free_dma(dev, &cp->gbl_buf_info);
cnic_free_dma(dev, &cp->kwq_info);
cnic_free_dma(dev, &cp->kwq_16_data_info);
cnic_free_dma(dev, &cp->kcq2.dma);
cnic_free_dma(dev, &cp->kcq1.dma);
kfree(cp->iscsi_tbl);
cp->iscsi_tbl = NULL;
kfree(cp->ctx_tbl);
cp->ctx_tbl = NULL;
cnic_free_id_tbl(&cp->fcoe_cid_tbl);
cnic_free_id_tbl(&cp->cid_tbl);
}
static int cnic_alloc_context(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
if (CHIP_NUM(cp) == CHIP_NUM_5709) {
int i, k, arr_size;
cp->ctx_blk_size = BCM_PAGE_SIZE;
cp->cids_per_blk = BCM_PAGE_SIZE / 128;
arr_size = BNX2_MAX_CID / cp->cids_per_blk *
sizeof(struct cnic_ctx);
cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
if (cp->ctx_arr == NULL)
return -ENOMEM;
k = 0;
for (i = 0; i < 2; i++) {
u32 j, reg, off, lo, hi;
if (i == 0)
off = BNX2_PG_CTX_MAP;
else
off = BNX2_ISCSI_CTX_MAP;
reg = cnic_reg_rd_ind(dev, off);
lo = reg >> 16;
hi = reg & 0xffff;
for (j = lo; j < hi; j += cp->cids_per_blk, k++)
cp->ctx_arr[k].cid = j;
}
cp->ctx_blks = k;
if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
cp->ctx_blks = 0;
return -ENOMEM;
}
for (i = 0; i < cp->ctx_blks; i++) {
cp->ctx_arr[i].ctx =
dma_alloc_coherent(&dev->pcidev->dev,
BCM_PAGE_SIZE,
&cp->ctx_arr[i].mapping,
GFP_KERNEL);
if (cp->ctx_arr[i].ctx == NULL)
return -ENOMEM;
}
}
return 0;
}
static u16 cnic_bnx2_next_idx(u16 idx)
{
return idx + 1;
}
static u16 cnic_bnx2_hw_idx(u16 idx)
{
return idx;
}
static u16 cnic_bnx2x_next_idx(u16 idx)
{
idx++;
if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
idx++;
return idx;
}
static u16 cnic_bnx2x_hw_idx(u16 idx)
{
if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
idx++;
return idx;
}
static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
bool use_pg_tbl)
{
int err, i, use_page_tbl = 0;
struct kcqe **kcq;
if (use_pg_tbl)
use_page_tbl = 1;
err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
if (err)
return err;
kcq = (struct kcqe **) info->dma.pg_arr;
info->kcq = kcq;
info->next_idx = cnic_bnx2_next_idx;
info->hw_idx = cnic_bnx2_hw_idx;
if (use_pg_tbl)
return 0;
info->next_idx = cnic_bnx2x_next_idx;
info->hw_idx = cnic_bnx2x_hw_idx;
for (i = 0; i < KCQ_PAGE_CNT; i++) {
struct bnx2x_bd_chain_next *next =
(struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
int j = i + 1;
if (j >= KCQ_PAGE_CNT)
j = 0;
next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
}
return 0;
}
static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_uio_dev *udev;
read_lock(&cnic_dev_lock);
list_for_each_entry(udev, &cnic_udev_list, list) {
if (udev->pdev == dev->pcidev) {
udev->dev = dev;
cp->udev = udev;
read_unlock(&cnic_dev_lock);
return 0;
}
}
read_unlock(&cnic_dev_lock);
udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
if (!udev)
return -ENOMEM;
udev->uio_dev = -1;
udev->dev = dev;
udev->pdev = dev->pcidev;
udev->l2_ring_size = pages * BCM_PAGE_SIZE;
udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
&udev->l2_ring_map,
GFP_KERNEL | __GFP_COMP);
if (!udev->l2_ring)
goto err_udev;
udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
&udev->l2_buf_map,
GFP_KERNEL | __GFP_COMP);
if (!udev->l2_buf)
goto err_dma;
write_lock(&cnic_dev_lock);
list_add(&udev->list, &cnic_udev_list);
write_unlock(&cnic_dev_lock);
pci_dev_get(udev->pdev);
cp->udev = udev;
return 0;
err_dma:
dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
udev->l2_ring, udev->l2_ring_map);
err_udev:
kfree(udev);
return -ENOMEM;
}
static int cnic_init_uio(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_uio_dev *udev = cp->udev;
struct uio_info *uinfo;
int ret = 0;
if (!udev)
return -ENOMEM;
uinfo = &udev->cnic_uinfo;
uinfo->mem[0].addr = dev->netdev->base_addr;
uinfo->mem[0].internal_addr = dev->regview;
uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
uinfo->mem[0].memtype = UIO_MEM_PHYS;
if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
PAGE_MASK;
if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
else
uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
uinfo->name = "bnx2_cnic";
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
PAGE_MASK;
uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
uinfo->name = "bnx2x_cnic";
}
uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
uinfo->mem[2].size = udev->l2_ring_size;
uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
uinfo->mem[3].size = udev->l2_buf_size;
uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
uinfo->version = CNIC_MODULE_VERSION;
uinfo->irq = UIO_IRQ_CUSTOM;
uinfo->open = cnic_uio_open;
uinfo->release = cnic_uio_close;
if (udev->uio_dev == -1) {
if (!uinfo->priv) {
uinfo->priv = udev;
ret = uio_register_device(&udev->pdev->dev, uinfo);
}
} else {
cnic_init_rings(dev);
}
return ret;
}
static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
int ret;
ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
if (ret)
goto error;
cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
if (ret)
goto error;
ret = cnic_alloc_context(dev);
if (ret)
goto error;
ret = cnic_alloc_uio_rings(dev, 2);
if (ret)
goto error;
ret = cnic_init_uio(dev);
if (ret)
goto error;
return 0;
error:
cnic_free_resc(dev);
return ret;
}
static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
int ctx_blk_size = cp->ethdev->ctx_blk_size;
int total_mem, blks, i;
total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
blks = total_mem / ctx_blk_size;
if (total_mem % ctx_blk_size)
blks++;
if (blks > cp->ethdev->ctx_tbl_len)
return -ENOMEM;
cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
if (cp->ctx_arr == NULL)
return -ENOMEM;
cp->ctx_blks = blks;
cp->ctx_blk_size = ctx_blk_size;
if (!BNX2X_CHIP_IS_57710(cp->chip_id))
cp->ctx_align = 0;
else
cp->ctx_align = ctx_blk_size;
cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
for (i = 0; i < blks; i++) {
cp->ctx_arr[i].ctx =
dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
&cp->ctx_arr[i].mapping,
GFP_KERNEL);
if (cp->ctx_arr[i].ctx == NULL)
return -ENOMEM;
if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
cnic_free_context(dev);
cp->ctx_blk_size += cp->ctx_align;
i = -1;
continue;
}
}
}
return 0;
}
static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
u32 start_cid = ethdev->starting_cid;
int i, j, n, ret, pages;
struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
cp->iro_arr = ethdev->iro_arr;
cp->max_cid_space = MAX_ISCSI_TBL_SZ;
cp->iscsi_start_cid = start_cid;
cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
cp->max_cid_space += dev->max_fcoe_conn;
cp->fcoe_init_cid = ethdev->fcoe_init_cid;
if (!cp->fcoe_init_cid)
cp->fcoe_init_cid = 0x10;
}
cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
GFP_KERNEL);
if (!cp->iscsi_tbl)
goto error;
cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
cp->max_cid_space, GFP_KERNEL);
if (!cp->ctx_tbl)
goto error;
for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
}
for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
PAGE_SIZE;
ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
if (ret)
return -ENOMEM;
n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
for (i = 0, j = 0; i < cp->max_cid_space; i++) {
long off = CNIC_KWQ16_DATA_SIZE * (i % n);
cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
off;
if ((i % n) == (n - 1))
j++;
}
ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
if (ret)
goto error;
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
if (ret)
goto error;
}
pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
if (ret)
goto error;
ret = cnic_alloc_bnx2x_context(dev);
if (ret)
goto error;
cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
cp->l2_rx_ring_size = 15;
ret = cnic_alloc_uio_rings(dev, 4);
if (ret)
goto error;
ret = cnic_init_uio(dev);
if (ret)
goto error;
return 0;
error:
cnic_free_resc(dev);
return -ENOMEM;
}
static inline u32 cnic_kwq_avail(struct cnic_local *cp)
{
return cp->max_kwq_idx -
((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
}
static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num_wqes)
{
struct cnic_local *cp = dev->cnic_priv;
struct kwqe *prod_qe;
u16 prod, sw_prod, i;
if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
return -EAGAIN; /* bnx2 is down */
spin_lock_bh(&cp->cnic_ulp_lock);
if (num_wqes > cnic_kwq_avail(cp) &&
!test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
spin_unlock_bh(&cp->cnic_ulp_lock);
return -EAGAIN;
}
clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
prod = cp->kwq_prod_idx;
sw_prod = prod & MAX_KWQ_IDX;
for (i = 0; i < num_wqes; i++) {
prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
prod++;
sw_prod = prod & MAX_KWQ_IDX;
}
cp->kwq_prod_idx = prod;
CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
spin_unlock_bh(&cp->cnic_ulp_lock);
return 0;
}
static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
union l5cm_specific_data *l5_data)
{
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
dma_addr_t map;
map = ctx->kwqe_data_mapping;
l5_data->phy_address.lo = (u64) map & 0xffffffff;
l5_data->phy_address.hi = (u64) map >> 32;
return ctx->kwqe_data;
}
static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
u32 type, union l5cm_specific_data *l5_data)
{
struct cnic_local *cp = dev->cnic_priv;
struct l5cm_spe kwqe;
struct kwqe_16 *kwq[1];
u16 type_16;
int ret;
kwqe.hdr.conn_and_cmd_data =
cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
BNX2X_HW_CID(cp, cid)));
type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
SPE_HDR_FUNCTION_ID;
kwqe.hdr.type = cpu_to_le16(type_16);
kwqe.hdr.reserved1 = 0;
kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
kwq[0] = (struct kwqe_16 *) &kwqe;
spin_lock_bh(&cp->cnic_ulp_lock);
ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
spin_unlock_bh(&cp->cnic_ulp_lock);
if (ret == 1)
return 0;
return -EBUSY;
}
static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
struct kcqe *cqes[], u32 num_cqes)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_ulp_ops *ulp_ops;
rcu_read_lock();
ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
if (likely(ulp_ops)) {
ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
cqes, num_cqes);
}
rcu_read_unlock();
}
static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct cnic_local *cp = dev->cnic_priv;
struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
int hq_bds, pages;
u32 pfid = cp->pfid;
cp->num_iscsi_tasks = req1->num_tasks_per_conn;
cp->num_ccells = req1->num_ccells_per_conn;
cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
cp->num_iscsi_tasks;
cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
BNX2X_ISCSI_R2TQE_SIZE;
cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
cp->num_cqs = req1->num_cqs;
if (!dev->max_iscsi_conn)
return 0;
/* init Tstorm RAM */
CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
req1->rq_num_wqes);
CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
PAGE_SIZE);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
/* init Ustorm RAM */
CNIC_WR16(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
req1->rq_buffer_size);
CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
PAGE_SIZE);
CNIC_WR8(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
CNIC_WR16(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
req1->rq_num_wqes);
CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
req1->cq_num_wqes);
CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
/* init Xstorm RAM */
CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
PAGE_SIZE);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
hq_bds);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
req1->num_tasks_per_conn);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
/* init Cstorm RAM */
CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
PAGE_SIZE);
CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
req1->cq_num_wqes);
CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
hq_bds);
return 0;
}
static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
struct cnic_local *cp = dev->cnic_priv;
u32 pfid = cp->pfid;
struct iscsi_kcqe kcqe;
struct kcqe *cqes[1];
memset(&kcqe, 0, sizeof(kcqe));
if (!dev->max_iscsi_conn) {
kcqe.completion_status =
ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
goto done;
}
CNIC_WR(dev, BAR_TSTRORM_INTMEM +