1 /* cnic.c: Broadcom CNIC core network driver.
3 * Copyright (c) 2006-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/list.h>
20 #include <linux/slab.h>
21 #include <linux/pci.h>
22 #include <linux/init.h>
23 #include <linux/netdevice.h>
24 #include <linux/uio_driver.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/delay.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_vlan.h>
30 #include <linux/prefetch.h>
31 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
36 #include <net/route.h>
38 #include <net/ip6_route.h>
39 #include <net/ip6_checksum.h>
40 #include <scsi/iscsi_if.h>
44 #include "bnx2x/bnx2x_reg.h"
45 #include "bnx2x/bnx2x_fw_defs.h"
46 #include "bnx2x/bnx2x_hsi.h"
47 #include "../scsi/bnx2i/57xx_iscsi_constants.h"
48 #include "../scsi/bnx2i/57xx_iscsi_hsi.h"
50 #include "cnic_defs.h"
52 #define DRV_MODULE_NAME "cnic"
54 static char version[] __devinitdata =
55 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
57 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
58 "Chen (zongxi@broadcom.com");
59 MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
60 MODULE_LICENSE("GPL");
61 MODULE_VERSION(CNIC_MODULE_VERSION);
63 /* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
64 static LIST_HEAD(cnic_dev_list);
65 static LIST_HEAD(cnic_udev_list);
66 static DEFINE_RWLOCK(cnic_dev_lock);
67 static DEFINE_MUTEX(cnic_lock);
69 static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
71 /* helper function, assuming cnic_lock is held */
72 static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
74 return rcu_dereference_protected(cnic_ulp_tbl[type],
75 lockdep_is_held(&cnic_lock));
78 static int cnic_service_bnx2(void *, void *);
79 static int cnic_service_bnx2x(void *, void *);
80 static int cnic_ctl(void *, struct cnic_ctl_info *);
82 static struct cnic_ops cnic_bnx2_ops = {
83 .cnic_owner = THIS_MODULE,
84 .cnic_handler = cnic_service_bnx2,
88 static struct cnic_ops cnic_bnx2x_ops = {
89 .cnic_owner = THIS_MODULE,
90 .cnic_handler = cnic_service_bnx2x,
94 static struct workqueue_struct *cnic_wq;
96 static void cnic_shutdown_rings(struct cnic_dev *);
97 static void cnic_init_rings(struct cnic_dev *);
98 static int cnic_cm_set_pg(struct cnic_sock *);
100 static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
102 struct cnic_uio_dev *udev = uinfo->priv;
103 struct cnic_dev *dev;
105 if (!capable(CAP_NET_ADMIN))
108 if (udev->uio_dev != -1)
114 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
119 udev->uio_dev = iminor(inode);
121 cnic_shutdown_rings(dev);
122 cnic_init_rings(dev);
128 static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
130 struct cnic_uio_dev *udev = uinfo->priv;
136 static inline void cnic_hold(struct cnic_dev *dev)
138 atomic_inc(&dev->ref_count);
141 static inline void cnic_put(struct cnic_dev *dev)
143 atomic_dec(&dev->ref_count);
146 static inline void csk_hold(struct cnic_sock *csk)
148 atomic_inc(&csk->ref_count);
151 static inline void csk_put(struct cnic_sock *csk)
153 atomic_dec(&csk->ref_count);
156 static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
158 struct cnic_dev *cdev;
160 read_lock(&cnic_dev_lock);
161 list_for_each_entry(cdev, &cnic_dev_list, list) {
162 if (netdev == cdev->netdev) {
164 read_unlock(&cnic_dev_lock);
168 read_unlock(&cnic_dev_lock);
172 static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
174 atomic_inc(&ulp_ops->ref_count);
177 static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
179 atomic_dec(&ulp_ops->ref_count);
182 static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
184 struct cnic_local *cp = dev->cnic_priv;
185 struct cnic_eth_dev *ethdev = cp->ethdev;
186 struct drv_ctl_info info;
187 struct drv_ctl_io *io = &info.data.io;
189 info.cmd = DRV_CTL_CTX_WR_CMD;
190 io->cid_addr = cid_addr;
193 ethdev->drv_ctl(dev->netdev, &info);
196 static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
198 struct cnic_local *cp = dev->cnic_priv;
199 struct cnic_eth_dev *ethdev = cp->ethdev;
200 struct drv_ctl_info info;
201 struct drv_ctl_io *io = &info.data.io;
203 info.cmd = DRV_CTL_CTXTBL_WR_CMD;
206 ethdev->drv_ctl(dev->netdev, &info);
209 static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
211 struct cnic_local *cp = dev->cnic_priv;
212 struct cnic_eth_dev *ethdev = cp->ethdev;
213 struct drv_ctl_info info;
214 struct drv_ctl_l2_ring *ring = &info.data.ring;
217 info.cmd = DRV_CTL_START_L2_CMD;
219 info.cmd = DRV_CTL_STOP_L2_CMD;
222 ring->client_id = cl_id;
223 ethdev->drv_ctl(dev->netdev, &info);
226 static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
228 struct cnic_local *cp = dev->cnic_priv;
229 struct cnic_eth_dev *ethdev = cp->ethdev;
230 struct drv_ctl_info info;
231 struct drv_ctl_io *io = &info.data.io;
233 info.cmd = DRV_CTL_IO_WR_CMD;
236 ethdev->drv_ctl(dev->netdev, &info);
239 static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
241 struct cnic_local *cp = dev->cnic_priv;
242 struct cnic_eth_dev *ethdev = cp->ethdev;
243 struct drv_ctl_info info;
244 struct drv_ctl_io *io = &info.data.io;
246 info.cmd = DRV_CTL_IO_RD_CMD;
248 ethdev->drv_ctl(dev->netdev, &info);
252 static int cnic_in_use(struct cnic_sock *csk)
254 return test_bit(SK_F_INUSE, &csk->flags);
257 static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
259 struct cnic_local *cp = dev->cnic_priv;
260 struct cnic_eth_dev *ethdev = cp->ethdev;
261 struct drv_ctl_info info;
264 info.data.credit.credit_count = count;
265 ethdev->drv_ctl(dev->netdev, &info);
268 static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
272 for (i = 0; i < cp->max_cid_space; i++) {
273 if (cp->ctx_tbl[i].cid == cid) {
281 static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
282 struct cnic_sock *csk)
284 struct iscsi_path path_req;
287 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
288 struct cnic_ulp_ops *ulp_ops;
289 struct cnic_uio_dev *udev = cp->udev;
290 int rc = 0, retry = 0;
292 if (!udev || udev->uio_dev == -1)
296 len = sizeof(path_req);
297 buf = (char *) &path_req;
298 memset(&path_req, 0, len);
300 msg_type = ISCSI_KEVENT_PATH_REQ;
301 path_req.handle = (u64) csk->l5_cid;
302 if (test_bit(SK_F_IPV6, &csk->flags)) {
303 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
304 sizeof(struct in6_addr));
305 path_req.ip_addr_len = 16;
307 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
308 sizeof(struct in_addr));
309 path_req.ip_addr_len = 4;
311 path_req.vlan_id = csk->vlan_id;
312 path_req.pmtu = csk->mtu;
318 ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
320 rc = ulp_ops->iscsi_nl_send_msg(
321 cp->ulp_handle[CNIC_ULP_ISCSI],
324 if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
333 static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
335 static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
341 case ISCSI_UEVENT_PATH_UPDATE: {
342 struct cnic_local *cp;
344 struct cnic_sock *csk;
345 struct iscsi_path *path_resp;
347 if (len < sizeof(*path_resp))
350 path_resp = (struct iscsi_path *) buf;
352 l5_cid = (u32) path_resp->handle;
353 if (l5_cid >= MAX_CM_SK_TBL_SZ)
357 if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
362 csk = &cp->csk_tbl[l5_cid];
364 if (cnic_in_use(csk) &&
365 test_bit(SK_F_CONNECT_START, &csk->flags)) {
367 memcpy(csk->ha, path_resp->mac_addr, 6);
368 if (test_bit(SK_F_IPV6, &csk->flags))
369 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
370 sizeof(struct in6_addr));
372 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
373 sizeof(struct in_addr));
375 if (is_valid_ether_addr(csk->ha)) {
377 } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
378 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
380 cnic_cm_upcall(cp, csk,
381 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
382 clear_bit(SK_F_CONNECT_START, &csk->flags);
394 static int cnic_offld_prep(struct cnic_sock *csk)
396 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
399 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
400 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
407 static int cnic_close_prep(struct cnic_sock *csk)
409 clear_bit(SK_F_CONNECT_START, &csk->flags);
410 smp_mb__after_clear_bit();
412 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
413 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
421 static int cnic_abort_prep(struct cnic_sock *csk)
423 clear_bit(SK_F_CONNECT_START, &csk->flags);
424 smp_mb__after_clear_bit();
426 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
429 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
430 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
437 int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
439 struct cnic_dev *dev;
441 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
442 pr_err("%s: Bad type %d\n", __func__, ulp_type);
445 mutex_lock(&cnic_lock);
446 if (cnic_ulp_tbl_prot(ulp_type)) {
447 pr_err("%s: Type %d has already been registered\n",
449 mutex_unlock(&cnic_lock);
453 read_lock(&cnic_dev_lock);
454 list_for_each_entry(dev, &cnic_dev_list, list) {
455 struct cnic_local *cp = dev->cnic_priv;
457 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
459 read_unlock(&cnic_dev_lock);
461 atomic_set(&ulp_ops->ref_count, 0);
462 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
463 mutex_unlock(&cnic_lock);
465 /* Prevent race conditions with netdev_event */
467 list_for_each_entry(dev, &cnic_dev_list, list) {
468 struct cnic_local *cp = dev->cnic_priv;
470 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
471 ulp_ops->cnic_init(dev);
478 int cnic_unregister_driver(int ulp_type)
480 struct cnic_dev *dev;
481 struct cnic_ulp_ops *ulp_ops;
484 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
485 pr_err("%s: Bad type %d\n", __func__, ulp_type);
488 mutex_lock(&cnic_lock);
489 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
491 pr_err("%s: Type %d has not been registered\n",
495 read_lock(&cnic_dev_lock);
496 list_for_each_entry(dev, &cnic_dev_list, list) {
497 struct cnic_local *cp = dev->cnic_priv;
499 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
500 pr_err("%s: Type %d still has devices registered\n",
502 read_unlock(&cnic_dev_lock);
506 read_unlock(&cnic_dev_lock);
508 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
510 mutex_unlock(&cnic_lock);
512 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
517 if (atomic_read(&ulp_ops->ref_count) != 0)
518 netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
522 mutex_unlock(&cnic_lock);
526 static int cnic_start_hw(struct cnic_dev *);
527 static void cnic_stop_hw(struct cnic_dev *);
529 static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
532 struct cnic_local *cp = dev->cnic_priv;
533 struct cnic_ulp_ops *ulp_ops;
535 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
536 pr_err("%s: Bad type %d\n", __func__, ulp_type);
539 mutex_lock(&cnic_lock);
540 if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
541 pr_err("%s: Driver with type %d has not been registered\n",
543 mutex_unlock(&cnic_lock);
546 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
547 pr_err("%s: Type %d has already been registered to this device\n",
549 mutex_unlock(&cnic_lock);
553 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
554 cp->ulp_handle[ulp_type] = ulp_ctx;
555 ulp_ops = cnic_ulp_tbl_prot(ulp_type);
556 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
559 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
560 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
561 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
563 mutex_unlock(&cnic_lock);
568 EXPORT_SYMBOL(cnic_register_driver);
570 static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
572 struct cnic_local *cp = dev->cnic_priv;
575 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
576 pr_err("%s: Bad type %d\n", __func__, ulp_type);
579 mutex_lock(&cnic_lock);
580 if (rcu_dereference(cp->ulp_ops[ulp_type])) {
581 rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
584 pr_err("%s: device not registered to this ulp type %d\n",
586 mutex_unlock(&cnic_lock);
589 mutex_unlock(&cnic_lock);
591 if (ulp_type == CNIC_ULP_ISCSI)
592 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
596 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
601 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
602 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
606 EXPORT_SYMBOL(cnic_unregister_driver);
608 static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
611 id_tbl->start = start_id;
614 spin_lock_init(&id_tbl->lock);
615 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
622 static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
624 kfree(id_tbl->table);
625 id_tbl->table = NULL;
628 static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
633 if (id >= id_tbl->max)
636 spin_lock(&id_tbl->lock);
637 if (!test_bit(id, id_tbl->table)) {
638 set_bit(id, id_tbl->table);
641 spin_unlock(&id_tbl->lock);
645 /* Returns -1 if not successful */
646 static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
650 spin_lock(&id_tbl->lock);
651 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
652 if (id >= id_tbl->max) {
654 if (id_tbl->next != 0) {
655 id = find_first_zero_bit(id_tbl->table, id_tbl->next);
656 if (id >= id_tbl->next)
661 if (id < id_tbl->max) {
662 set_bit(id, id_tbl->table);
663 id_tbl->next = (id + 1) & (id_tbl->max - 1);
667 spin_unlock(&id_tbl->lock);
672 static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
678 if (id >= id_tbl->max)
681 clear_bit(id, id_tbl->table);
684 static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
691 for (i = 0; i < dma->num_pages; i++) {
692 if (dma->pg_arr[i]) {
693 dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE,
694 dma->pg_arr[i], dma->pg_map_arr[i]);
695 dma->pg_arr[i] = NULL;
699 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
700 dma->pgtbl, dma->pgtbl_map);
708 static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
711 __le32 *page_table = (__le32 *) dma->pgtbl;
713 for (i = 0; i < dma->num_pages; i++) {
714 /* Each entry needs to be in big endian format. */
715 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
717 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
722 static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
725 __le32 *page_table = (__le32 *) dma->pgtbl;
727 for (i = 0; i < dma->num_pages; i++) {
728 /* Each entry needs to be in little endian format. */
729 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
731 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
736 static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
737 int pages, int use_pg_tbl)
740 struct cnic_local *cp = dev->cnic_priv;
742 size = pages * (sizeof(void *) + sizeof(dma_addr_t));
743 dma->pg_arr = kzalloc(size, GFP_ATOMIC);
744 if (dma->pg_arr == NULL)
747 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
748 dma->num_pages = pages;
750 for (i = 0; i < pages; i++) {
751 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
755 if (dma->pg_arr[i] == NULL)
761 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
762 ~(BCM_PAGE_SIZE - 1);
763 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
764 &dma->pgtbl_map, GFP_ATOMIC);
765 if (dma->pgtbl == NULL)
768 cp->setup_pgtbl(dev, dma);
773 cnic_free_dma(dev, dma);
777 static void cnic_free_context(struct cnic_dev *dev)
779 struct cnic_local *cp = dev->cnic_priv;
782 for (i = 0; i < cp->ctx_blks; i++) {
783 if (cp->ctx_arr[i].ctx) {
784 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
786 cp->ctx_arr[i].mapping);
787 cp->ctx_arr[i].ctx = NULL;
792 static void __cnic_free_uio(struct cnic_uio_dev *udev)
794 uio_unregister_device(&udev->cnic_uinfo);
797 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
798 udev->l2_buf, udev->l2_buf_map);
803 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
804 udev->l2_ring, udev->l2_ring_map);
805 udev->l2_ring = NULL;
808 pci_dev_put(udev->pdev);
812 static void cnic_free_uio(struct cnic_uio_dev *udev)
817 write_lock(&cnic_dev_lock);
818 list_del_init(&udev->list);
819 write_unlock(&cnic_dev_lock);
820 __cnic_free_uio(udev);
823 static void cnic_free_resc(struct cnic_dev *dev)
825 struct cnic_local *cp = dev->cnic_priv;
826 struct cnic_uio_dev *udev = cp->udev;
833 cnic_free_context(dev);
838 cnic_free_dma(dev, &cp->gbl_buf_info);
839 cnic_free_dma(dev, &cp->conn_buf_info);
840 cnic_free_dma(dev, &cp->kwq_info);
841 cnic_free_dma(dev, &cp->kwq_16_data_info);
842 cnic_free_dma(dev, &cp->kcq2.dma);
843 cnic_free_dma(dev, &cp->kcq1.dma);
844 kfree(cp->iscsi_tbl);
845 cp->iscsi_tbl = NULL;
849 cnic_free_id_tbl(&cp->fcoe_cid_tbl);
850 cnic_free_id_tbl(&cp->cid_tbl);
853 static int cnic_alloc_context(struct cnic_dev *dev)
855 struct cnic_local *cp = dev->cnic_priv;
857 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
860 cp->ctx_blk_size = BCM_PAGE_SIZE;
861 cp->cids_per_blk = BCM_PAGE_SIZE / 128;
862 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
863 sizeof(struct cnic_ctx);
864 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
865 if (cp->ctx_arr == NULL)
869 for (i = 0; i < 2; i++) {
870 u32 j, reg, off, lo, hi;
873 off = BNX2_PG_CTX_MAP;
875 off = BNX2_ISCSI_CTX_MAP;
877 reg = cnic_reg_rd_ind(dev, off);
880 for (j = lo; j < hi; j += cp->cids_per_blk, k++)
881 cp->ctx_arr[k].cid = j;
885 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
890 for (i = 0; i < cp->ctx_blks; i++) {
892 dma_alloc_coherent(&dev->pcidev->dev,
894 &cp->ctx_arr[i].mapping,
896 if (cp->ctx_arr[i].ctx == NULL)
903 static u16 cnic_bnx2_next_idx(u16 idx)
908 static u16 cnic_bnx2_hw_idx(u16 idx)
913 static u16 cnic_bnx2x_next_idx(u16 idx)
916 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
922 static u16 cnic_bnx2x_hw_idx(u16 idx)
924 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
929 static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
932 int err, i, use_page_tbl = 0;
938 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
942 kcq = (struct kcqe **) info->dma.pg_arr;
945 info->next_idx = cnic_bnx2_next_idx;
946 info->hw_idx = cnic_bnx2_hw_idx;
950 info->next_idx = cnic_bnx2x_next_idx;
951 info->hw_idx = cnic_bnx2x_hw_idx;
953 for (i = 0; i < KCQ_PAGE_CNT; i++) {
954 struct bnx2x_bd_chain_next *next =
955 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
958 if (j >= KCQ_PAGE_CNT)
960 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
961 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
966 static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
968 struct cnic_local *cp = dev->cnic_priv;
969 struct cnic_uio_dev *udev;
971 read_lock(&cnic_dev_lock);
972 list_for_each_entry(udev, &cnic_udev_list, list) {
973 if (udev->pdev == dev->pcidev) {
976 read_unlock(&cnic_dev_lock);
980 read_unlock(&cnic_dev_lock);
982 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
989 udev->pdev = dev->pcidev;
990 udev->l2_ring_size = pages * BCM_PAGE_SIZE;
991 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
993 GFP_KERNEL | __GFP_COMP);
997 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
998 udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
999 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1001 GFP_KERNEL | __GFP_COMP);
1005 write_lock(&cnic_dev_lock);
1006 list_add(&udev->list, &cnic_udev_list);
1007 write_unlock(&cnic_dev_lock);
1009 pci_dev_get(udev->pdev);
1015 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
1016 udev->l2_ring, udev->l2_ring_map);
1022 static int cnic_init_uio(struct cnic_dev *dev)
1024 struct cnic_local *cp = dev->cnic_priv;
1025 struct cnic_uio_dev *udev = cp->udev;
1026 struct uio_info *uinfo;
1032 uinfo = &udev->cnic_uinfo;
1034 uinfo->mem[0].addr = dev->netdev->base_addr;
1035 uinfo->mem[0].internal_addr = dev->regview;
1036 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
1037 uinfo->mem[0].memtype = UIO_MEM_PHYS;
1039 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
1040 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
1042 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1043 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
1045 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
1047 uinfo->name = "bnx2_cnic";
1048 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1049 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1051 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
1053 uinfo->name = "bnx2x_cnic";
1056 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
1058 uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1059 uinfo->mem[2].size = udev->l2_ring_size;
1060 uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
1062 uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1063 uinfo->mem[3].size = udev->l2_buf_size;
1064 uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
1066 uinfo->version = CNIC_MODULE_VERSION;
1067 uinfo->irq = UIO_IRQ_CUSTOM;
1069 uinfo->open = cnic_uio_open;
1070 uinfo->release = cnic_uio_close;
1072 if (udev->uio_dev == -1) {
1076 ret = uio_register_device(&udev->pdev->dev, uinfo);
1079 cnic_init_rings(dev);
1085 static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1087 struct cnic_local *cp = dev->cnic_priv;
1090 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1093 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1095 ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
1099 ret = cnic_alloc_context(dev);
1103 ret = cnic_alloc_uio_rings(dev, 2);
1107 ret = cnic_init_uio(dev);
1114 cnic_free_resc(dev);
1118 static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1120 struct cnic_local *cp = dev->cnic_priv;
1121 int ctx_blk_size = cp->ethdev->ctx_blk_size;
1122 int total_mem, blks, i;
1124 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
1125 blks = total_mem / ctx_blk_size;
1126 if (total_mem % ctx_blk_size)
1129 if (blks > cp->ethdev->ctx_tbl_len)
1132 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
1133 if (cp->ctx_arr == NULL)
1136 cp->ctx_blks = blks;
1137 cp->ctx_blk_size = ctx_blk_size;
1138 if (!BNX2X_CHIP_IS_57710(cp->chip_id))
1141 cp->ctx_align = ctx_blk_size;
1143 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1145 for (i = 0; i < blks; i++) {
1146 cp->ctx_arr[i].ctx =
1147 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1148 &cp->ctx_arr[i].mapping,
1150 if (cp->ctx_arr[i].ctx == NULL)
1153 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1154 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1155 cnic_free_context(dev);
1156 cp->ctx_blk_size += cp->ctx_align;
1165 static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1167 struct cnic_local *cp = dev->cnic_priv;
1168 struct cnic_eth_dev *ethdev = cp->ethdev;
1169 u32 start_cid = ethdev->starting_cid;
1170 int i, j, n, ret, pages;
1171 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1173 cp->iro_arr = ethdev->iro_arr;
1175 cp->max_cid_space = MAX_ISCSI_TBL_SZ + BNX2X_FCOE_NUM_CONNECTIONS;
1176 cp->iscsi_start_cid = start_cid;
1177 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1179 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
1180 cp->max_cid_space += BNX2X_FCOE_NUM_CONNECTIONS;
1181 cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1182 if (!cp->fcoe_init_cid)
1183 cp->fcoe_init_cid = 0x10;
1186 if (start_cid < BNX2X_ISCSI_START_CID) {
1187 u32 delta = BNX2X_ISCSI_START_CID - start_cid;
1189 cp->iscsi_start_cid = BNX2X_ISCSI_START_CID;
1190 cp->fcoe_start_cid += delta;
1191 cp->max_cid_space += delta;
1194 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
1199 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
1200 cp->max_cid_space, GFP_KERNEL);
1204 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1205 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1206 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1209 for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1210 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1212 pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1215 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1219 n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1220 for (i = 0, j = 0; i < cp->max_cid_space; i++) {
1221 long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1223 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1224 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1227 if ((i % n) == (n - 1))
1231 ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
1235 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
1236 ret = cnic_alloc_kcq(dev, &cp->kcq2, false);
1241 pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS *
1242 BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE;
1243 ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1);
1247 pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
1248 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1252 ret = cnic_alloc_bnx2x_context(dev);
1256 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1258 cp->l2_rx_ring_size = 15;
1260 ret = cnic_alloc_uio_rings(dev, 4);
1264 ret = cnic_init_uio(dev);
1271 cnic_free_resc(dev);
1275 static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1277 return cp->max_kwq_idx -
1278 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1281 static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1284 struct cnic_local *cp = dev->cnic_priv;
1285 struct kwqe *prod_qe;
1286 u16 prod, sw_prod, i;
1288 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1289 return -EAGAIN; /* bnx2 is down */
1291 spin_lock_bh(&cp->cnic_ulp_lock);
1292 if (num_wqes > cnic_kwq_avail(cp) &&
1293 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
1294 spin_unlock_bh(&cp->cnic_ulp_lock);
1298 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
1300 prod = cp->kwq_prod_idx;
1301 sw_prod = prod & MAX_KWQ_IDX;
1302 for (i = 0; i < num_wqes; i++) {
1303 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1304 memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1306 sw_prod = prod & MAX_KWQ_IDX;
1308 cp->kwq_prod_idx = prod;
1310 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1312 spin_unlock_bh(&cp->cnic_ulp_lock);
1316 static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1317 union l5cm_specific_data *l5_data)
1319 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1322 map = ctx->kwqe_data_mapping;
1323 l5_data->phy_address.lo = (u64) map & 0xffffffff;
1324 l5_data->phy_address.hi = (u64) map >> 32;
1325 return ctx->kwqe_data;
1328 static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1329 u32 type, union l5cm_specific_data *l5_data)
1331 struct cnic_local *cp = dev->cnic_priv;
1332 struct l5cm_spe kwqe;
1333 struct kwqe_16 *kwq[1];
1337 kwqe.hdr.conn_and_cmd_data =
1338 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1339 BNX2X_HW_CID(cp, cid)));
1341 type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
1342 type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1343 SPE_HDR_FUNCTION_ID;
1345 kwqe.hdr.type = cpu_to_le16(type_16);
1346 kwqe.hdr.reserved1 = 0;
1347 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1348 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1350 kwq[0] = (struct kwqe_16 *) &kwqe;
1352 spin_lock_bh(&cp->cnic_ulp_lock);
1353 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1354 spin_unlock_bh(&cp->cnic_ulp_lock);
1362 static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1363 struct kcqe *cqes[], u32 num_cqes)
1365 struct cnic_local *cp = dev->cnic_priv;
1366 struct cnic_ulp_ops *ulp_ops;
1369 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1370 if (likely(ulp_ops)) {
1371 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1377 static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1379 struct cnic_local *cp = dev->cnic_priv;
1380 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1382 u32 pfid = cp->pfid;
1384 cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1385 cp->num_ccells = req1->num_ccells_per_conn;
1386 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1387 cp->num_iscsi_tasks;
1388 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1389 BNX2X_ISCSI_R2TQE_SIZE;
1390 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1391 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1392 hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1393 cp->num_cqs = req1->num_cqs;
1395 if (!dev->max_iscsi_conn)
1398 /* init Tstorm RAM */
1399 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1401 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1403 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1404 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1405 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1406 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1407 req1->num_tasks_per_conn);
1409 /* init Ustorm RAM */
1410 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1411 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
1412 req1->rq_buffer_size);
1413 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1415 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1416 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1417 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1418 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1419 req1->num_tasks_per_conn);
1420 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1422 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1424 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1425 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1427 /* init Xstorm RAM */
1428 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1430 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1431 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1432 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1433 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1434 req1->num_tasks_per_conn);
1435 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1437 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
1438 req1->num_tasks_per_conn);
1439 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1440 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1442 /* init Cstorm RAM */
1443 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1445 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1446 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1447 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1448 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1449 req1->num_tasks_per_conn);
1450 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1452 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1458 static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1460 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1461 struct cnic_local *cp = dev->cnic_priv;
1462 u32 pfid = cp->pfid;
1463 struct iscsi_kcqe kcqe;
1464 struct kcqe *cqes[1];
1466 memset(&kcqe, 0, sizeof(kcqe));
1467 if (!dev->max_iscsi_conn) {
1468 kcqe.completion_status =
1469 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1473 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1474 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1475 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1476 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1477 req2->error_bit_map[1]);
1479 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1480 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1481 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1482 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1483 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1484 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1485 req2->error_bit_map[1]);
1487 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1488 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1490 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1493 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1494 cqes[0] = (struct kcqe *) &kcqe;
1495 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1500 static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1502 struct cnic_local *cp = dev->cnic_priv;
1503 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1505 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1506 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1508 cnic_free_dma(dev, &iscsi->hq_info);
1509 cnic_free_dma(dev, &iscsi->r2tq_info);
1510 cnic_free_dma(dev, &iscsi->task_array_info);
1511 cnic_free_id(&cp->cid_tbl, ctx->cid);
1513 cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
1519 static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1523 struct cnic_local *cp = dev->cnic_priv;
1524 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1525 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1527 if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1528 cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1537 cid = cnic_alloc_new_id(&cp->cid_tbl);
1544 pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
1546 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1550 pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
1551 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1555 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1556 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1563 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1567 static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1568 struct regpair *ctx_addr)
1570 struct cnic_local *cp = dev->cnic_priv;
1571 struct cnic_eth_dev *ethdev = cp->ethdev;
1572 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1573 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1574 unsigned long align_off = 0;
1578 if (cp->ctx_align) {
1579 unsigned long mask = cp->ctx_align - 1;
1581 if (cp->ctx_arr[blk].mapping & mask)
1582 align_off = cp->ctx_align -
1583 (cp->ctx_arr[blk].mapping & mask);
1585 ctx_map = cp->ctx_arr[blk].mapping + align_off +
1586 (off * BNX2X_CONTEXT_MEM_SIZE);
1587 ctx = cp->ctx_arr[blk].ctx + align_off +
1588 (off * BNX2X_CONTEXT_MEM_SIZE);
1590 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1592 ctx_addr->lo = ctx_map & 0xffffffff;
1593 ctx_addr->hi = (u64) ctx_map >> 32;
1597 static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1600 struct cnic_local *cp = dev->cnic_priv;
1601 struct iscsi_kwqe_conn_offload1 *req1 =
1602 (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1603 struct iscsi_kwqe_conn_offload2 *req2 =
1604 (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1605 struct iscsi_kwqe_conn_offload3 *req3;
1606 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1607 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1609 u32 hw_cid = BNX2X_HW_CID(cp, cid);
1610 struct iscsi_context *ictx;
1611 struct regpair context_addr;
1612 int i, j, n = 2, n_max;
1615 if (!req2->num_additional_wqes)
1618 n_max = req2->num_additional_wqes + 2;
1620 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1624 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1626 ictx->xstorm_ag_context.hq_prod = 1;
1628 ictx->xstorm_st_context.iscsi.first_burst_length =
1629 ISCSI_DEF_FIRST_BURST_LEN;
1630 ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1631 ISCSI_DEF_MAX_RECV_SEG_LEN;
1632 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1633 req1->sq_page_table_addr_lo;
1634 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1635 req1->sq_page_table_addr_hi;
1636 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1637 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1638 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1639 iscsi->hq_info.pgtbl_map & 0xffffffff;
1640 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1641 (u64) iscsi->hq_info.pgtbl_map >> 32;
1642 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1643 iscsi->hq_info.pgtbl[0];
1644 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1645 iscsi->hq_info.pgtbl[1];
1646 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1647 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1648 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1649 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1650 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1651 iscsi->r2tq_info.pgtbl[0];
1652 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1653 iscsi->r2tq_info.pgtbl[1];
1654 ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1655 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1656 ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1657 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1658 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1659 BNX2X_ISCSI_PBL_NOT_CACHED;
1660 ictx->xstorm_st_context.iscsi.flags.flags |=
1661 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1662 ictx->xstorm_st_context.iscsi.flags.flags |=
1663 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1665 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1666 /* TSTORM requires the base address of RQ DB & not PTE */
1667 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1668 req2->rq_page_table_addr_lo & PAGE_MASK;
1669 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1670 req2->rq_page_table_addr_hi;
1671 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1672 ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1673 ictx->tstorm_st_context.tcp.flags2 |=
1674 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1675 ictx->tstorm_st_context.tcp.ooo_support_mode =
1676 TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
1678 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1680 ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1681 req2->rq_page_table_addr_lo;
1682 ictx->ustorm_st_context.ring.rq.pbl_base.hi =
1683 req2->rq_page_table_addr_hi;
1684 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1685 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1686 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1687 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1688 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1689 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1690 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1691 iscsi->r2tq_info.pgtbl[0];
1692 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1693 iscsi->r2tq_info.pgtbl[1];
1694 ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1695 req1->cq_page_table_addr_lo;
1696 ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1697 req1->cq_page_table_addr_hi;
1698 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1699 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1700 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1701 ictx->ustorm_st_context.task_pbe_cache_index =
1702 BNX2X_ISCSI_PBL_NOT_CACHED;
1703 ictx->ustorm_st_context.task_pdu_cache_index =
1704 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1706 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1710 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1713 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1714 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1715 req3->qp_first_pte[j].hi;
1716 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1717 req3->qp_first_pte[j].lo;
1720 ictx->ustorm_st_context.task_pbl_base.lo =
1721 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1722 ictx->ustorm_st_context.task_pbl_base.hi =
1723 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1724 ictx->ustorm_st_context.tce_phy_addr.lo =
1725 iscsi->task_array_info.pgtbl[0];
1726 ictx->ustorm_st_context.tce_phy_addr.hi =
1727 iscsi->task_array_info.pgtbl[1];
1728 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1729 ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1730 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1731 ictx->ustorm_st_context.negotiated_rx_and_flags |=
1732 ISCSI_DEF_MAX_BURST_LEN;
1733 ictx->ustorm_st_context.negotiated_rx |=
1734 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1735 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1737 ictx->cstorm_st_context.hq_pbl_base.lo =
1738 iscsi->hq_info.pgtbl_map & 0xffffffff;
1739 ictx->cstorm_st_context.hq_pbl_base.hi =
1740 (u64) iscsi->hq_info.pgtbl_map >> 32;
1741 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1742 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1743 ictx->cstorm_st_context.task_pbl_base.lo =
1744 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1745 ictx->cstorm_st_context.task_pbl_base.hi =
1746 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1747 /* CSTORM and USTORM initialization is different, CSTORM requires
1748 * CQ DB base & not PTE addr */
1749 ictx->cstorm_st_context.cq_db_base.lo =
1750 req1->cq_page_table_addr_lo & PAGE_MASK;
1751 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1752 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1753 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1754 for (i = 0; i < cp->num_cqs; i++) {
1755 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1757 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1761 ictx->xstorm_ag_context.cdu_reserved =
1762 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1763 ISCSI_CONNECTION_TYPE);
1764 ictx->ustorm_ag_context.cdu_usage =
1765 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1766 ISCSI_CONNECTION_TYPE);
1771 static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1774 struct iscsi_kwqe_conn_offload1 *req1;
1775 struct iscsi_kwqe_conn_offload2 *req2;
1776 struct cnic_local *cp = dev->cnic_priv;
1777 struct cnic_context *ctx;
1778 struct iscsi_kcqe kcqe;
1779 struct kcqe *cqes[1];
1788 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1789 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1790 if ((num - 2) < req2->num_additional_wqes) {
1794 *work = 2 + req2->num_additional_wqes;
1796 l5_cid = req1->iscsi_conn_id;
1797 if (l5_cid >= MAX_ISCSI_TBL_SZ)
1800 memset(&kcqe, 0, sizeof(kcqe));
1801 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1802 kcqe.iscsi_conn_id = l5_cid;
1803 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1805 ctx = &cp->ctx_tbl[l5_cid];
1806 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1807 kcqe.completion_status =
1808 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1812 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1813 atomic_dec(&cp->iscsi_conn);
1816 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1818 atomic_dec(&cp->iscsi_conn);
1822 ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1824 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1825 atomic_dec(&cp->iscsi_conn);
1829 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1830 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid);
1833 cqes[0] = (struct kcqe *) &kcqe;
1834 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1839 static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1841 struct cnic_local *cp = dev->cnic_priv;
1842 struct iscsi_kwqe_conn_update *req =
1843 (struct iscsi_kwqe_conn_update *) kwqe;
1845 union l5cm_specific_data l5_data;
1846 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1849 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1852 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1856 memcpy(data, kwqe, sizeof(struct kwqe));
1858 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1859 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1863 static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1865 struct cnic_local *cp = dev->cnic_priv;
1866 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1867 union l5cm_specific_data l5_data;
1871 init_waitqueue_head(&ctx->waitq);
1873 memset(&l5_data, 0, sizeof(l5_data));
1874 hw_cid = BNX2X_HW_CID(cp, ctx->cid);
1876 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1877 hw_cid, NONE_CONNECTION_TYPE, &l5_data);
1880 wait_event(ctx->waitq, ctx->wait_cond);
1885 static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1887 struct cnic_local *cp = dev->cnic_priv;
1888 struct iscsi_kwqe_conn_destroy *req =
1889 (struct iscsi_kwqe_conn_destroy *) kwqe;
1890 u32 l5_cid = req->reserved0;
1891 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1893 struct iscsi_kcqe kcqe;
1894 struct kcqe *cqes[1];
1896 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
1897 goto skip_cfc_delete;
1899 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
1900 unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
1902 if (delta > (2 * HZ))
1905 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
1906 queue_delayed_work(cnic_wq, &cp->delete_task, delta);
1910 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
1913 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1915 atomic_dec(&cp->iscsi_conn);
1916 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
1919 memset(&kcqe, 0, sizeof(kcqe));
1920 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
1921 kcqe.iscsi_conn_id = l5_cid;
1922 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1923 kcqe.iscsi_conn_context_id = req->context_id;
1925 cqes[0] = (struct kcqe *) &kcqe;
1926 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1931 static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
1932 struct l4_kwq_connect_req1 *kwqe1,
1933 struct l4_kwq_connect_req3 *kwqe3,
1934 struct l5cm_active_conn_buffer *conn_buf)
1936 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
1937 struct l5cm_xstorm_conn_buffer *xstorm_buf =
1938 &conn_buf->xstorm_conn_buffer;
1939 struct l5cm_tstorm_conn_buffer *tstorm_buf =
1940 &conn_buf->tstorm_conn_buffer;
1941 struct regpair context_addr;
1942 u32 cid = BNX2X_SW_CID(kwqe1->cid);
1943 struct in6_addr src_ip, dst_ip;
1947 addrp = (u32 *) &conn_addr->local_ip_addr;
1948 for (i = 0; i < 4; i++, addrp++)
1949 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1951 addrp = (u32 *) &conn_addr->remote_ip_addr;
1952 for (i = 0; i < 4; i++, addrp++)
1953 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1955 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
1957 xstorm_buf->context_addr.hi = context_addr.hi;
1958 xstorm_buf->context_addr.lo = context_addr.lo;
1959 xstorm_buf->mss = 0xffff;
1960 xstorm_buf->rcv_buf = kwqe3->rcv_buf;
1961 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
1962 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
1963 xstorm_buf->pseudo_header_checksum =
1964 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
1966 if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
1967 tstorm_buf->params |=
1968 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
1969 if (kwqe3->ka_timeout) {
1970 tstorm_buf->ka_enable = 1;
1971 tstorm_buf->ka_timeout = kwqe3->ka_timeout;
1972 tstorm_buf->ka_interval = kwqe3->ka_interval;
1973 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
1975 tstorm_buf->rcv_buf = kwqe3->rcv_buf;
1976 tstorm_buf->snd_buf = kwqe3->snd_buf;
1977 tstorm_buf->max_rt_time = 0xffffffff;
1980 static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
1982 struct cnic_local *cp = dev->cnic_priv;
1983 u32 pfid = cp->pfid;
1984 u8 *mac = dev->mac_addr;
1986 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1987 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
1988 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1989 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
1990 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1991 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
1992 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1993 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
1994 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1995 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
1996 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1997 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
1999 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2000 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
2001 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2002 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2004 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2005 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
2006 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2007 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2009 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2010 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 2,
2012 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2013 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 3,
2017 static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
2019 struct cnic_local *cp = dev->cnic_priv;
2020 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
2021 u16 tstorm_flags = 0;
2024 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
2025 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
2028 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2029 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
2031 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
2032 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
2035 static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2038 struct cnic_local *cp = dev->cnic_priv;
2039 struct l4_kwq_connect_req1 *kwqe1 =
2040 (struct l4_kwq_connect_req1 *) wqes[0];
2041 struct l4_kwq_connect_req3 *kwqe3;
2042 struct l5cm_active_conn_buffer *conn_buf;
2043 struct l5cm_conn_addr_params *conn_addr;
2044 union l5cm_specific_data l5_data;
2045 u32 l5_cid = kwqe1->pg_cid;
2046 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
2047 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2055 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2065 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
2066 netdev_err(dev->netdev, "conn_buf size too big\n");
2069 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2073 memset(conn_buf, 0, sizeof(*conn_buf));
2075 conn_addr = &conn_buf->conn_addr_buf;
2076 conn_addr->remote_addr_0 = csk->ha[0];
2077 conn_addr->remote_addr_1 = csk->ha[1];
2078 conn_addr->remote_addr_2 = csk->ha[2];
2079 conn_addr->remote_addr_3 = csk->ha[3];
2080 conn_addr->remote_addr_4 = csk->ha[4];
2081 conn_addr->remote_addr_5 = csk->ha[5];
2083 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2084 struct l4_kwq_connect_req2 *kwqe2 =
2085 (struct l4_kwq_connect_req2 *) wqes[1];
2087 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2088 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2089 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2091 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2092 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2093 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2094 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2096 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2098 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2099 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2100 conn_addr->local_tcp_port = kwqe1->src_port;
2101 conn_addr->remote_tcp_port = kwqe1->dst_port;
2103 conn_addr->pmtu = kwqe3->pmtu;
2104 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2106 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
2107 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id);
2109 cnic_bnx2x_set_tcp_timestamp(dev,
2110 kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
2112 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2113 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2115 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2120 static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2122 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2123 union l5cm_specific_data l5_data;
2126 memset(&l5_data, 0, sizeof(l5_data));
2127 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2128 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2132 static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2134 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2135 union l5cm_specific_data l5_data;
2138 memset(&l5_data, 0, sizeof(l5_data));
2139 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2140 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2143 static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2145 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2147 struct kcqe *cqes[1];
2149 memset(&kcqe, 0, sizeof(kcqe));
2150 kcqe.pg_host_opaque = req->host_opaque;
2151 kcqe.pg_cid = req->host_opaque;
2152 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2153 cqes[0] = (struct kcqe *) &kcqe;
2154 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2158 static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2160 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2162 struct kcqe *cqes[1];
2164 memset(&kcqe, 0, sizeof(kcqe));
2165 kcqe.pg_host_opaque = req->pg_host_opaque;
2166 kcqe.pg_cid = req->pg_cid;
2167 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2168 cqes[0] = (struct kcqe *) &kcqe;
2169 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2173 static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2175 struct fcoe_kwqe_stat *req;
2176 struct fcoe_stat_ramrod_params *fcoe_stat;
2177 union l5cm_specific_data l5_data;
2178 struct cnic_local *cp = dev->cnic_priv;
2182 req = (struct fcoe_kwqe_stat *) kwqe;
2183 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2185 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2189 memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2190 memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2192 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT, cid,
2193 FCOE_CONNECTION_TYPE, &l5_data);
2197 static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2201 struct cnic_local *cp = dev->cnic_priv;
2203 struct fcoe_init_ramrod_params *fcoe_init;
2204 struct fcoe_kwqe_init1 *req1;
2205 struct fcoe_kwqe_init2 *req2;
2206 struct fcoe_kwqe_init3 *req3;
2207 union l5cm_specific_data l5_data;
2213 req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2214 req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2215 req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2216 if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2220 if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2225 if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2226 netdev_err(dev->netdev, "fcoe_init size too big\n");
2229 fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2233 memset(fcoe_init, 0, sizeof(*fcoe_init));
2234 memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2235 memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2236 memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
2237 fcoe_init->eq_addr.lo = cp->kcq2.dma.pg_map_arr[0] & 0xffffffff;
2238 fcoe_init->eq_addr.hi = (u64) cp->kcq2.dma.pg_map_arr[0] >> 32;
2239 fcoe_init->eq_next_page_addr.lo =
2240 cp->kcq2.dma.pg_map_arr[1] & 0xffffffff;
2241 fcoe_init->eq_next_page_addr.hi =
2242 (u64) cp->kcq2.dma.pg_map_arr[1] >> 32;
2244 fcoe_init->sb_num = cp->status_blk_num;
2245 fcoe_init->eq_prod = MAX_KCQ_IDX;
2246 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2247 cp->kcq2.sw_prod_idx = 0;
2249 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2250 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT, cid,
2251 FCOE_CONNECTION_TYPE, &l5_data);
2256 static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2260 u32 cid = -1, l5_cid;
2261 struct cnic_local *cp = dev->cnic_priv;
2262 struct fcoe_kwqe_conn_offload1 *req1;
2263 struct fcoe_kwqe_conn_offload2 *req2;
2264 struct fcoe_kwqe_conn_offload3 *req3;
2265 struct fcoe_kwqe_conn_offload4 *req4;
2266 struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2267 struct cnic_context *ctx;
2268 struct fcoe_context *fctx;
2269 struct regpair ctx_addr;
2270 union l5cm_specific_data l5_data;
2271 struct fcoe_kcqe kcqe;
2272 struct kcqe *cqes[1];
2278 req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2279 req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2280 req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2281 req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2285 l5_cid = req1->fcoe_conn_id;
2286 if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
2289 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2291 ctx = &cp->ctx_tbl[l5_cid];
2292 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2295 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2302 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2304 u32 hw_cid = BNX2X_HW_CID(cp, cid);
2307 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2308 FCOE_CONNECTION_TYPE);
2309 fctx->xstorm_ag_context.cdu_reserved = val;
2310 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2311 FCOE_CONNECTION_TYPE);
2312 fctx->ustorm_ag_context.cdu_usage = val;
2314 if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2315 netdev_err(dev->netdev, "fcoe_offload size too big\n");
2318 fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2322 memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2323 memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2324 memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2325 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2326 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2328 cid = BNX2X_HW_CID(cp, cid);
2329 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2330 FCOE_CONNECTION_TYPE, &l5_data);
2332 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2338 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2340 memset(&kcqe, 0, sizeof(kcqe));
2341 kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2342 kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2343 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2345 cqes[0] = (struct kcqe *) &kcqe;
2346 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2350 static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2352 struct fcoe_kwqe_conn_enable_disable *req;
2353 struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2354 union l5cm_specific_data l5_data;
2357 struct cnic_local *cp = dev->cnic_priv;
2359 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2360 cid = req->context_id;
2361 l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2363 if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2364 netdev_err(dev->netdev, "fcoe_enable size too big\n");
2367 fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2371 memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2372 memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2373 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2374 FCOE_CONNECTION_TYPE, &l5_data);
2378 static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2380 struct fcoe_kwqe_conn_enable_disable *req;
2381 struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2382 union l5cm_specific_data l5_data;
2385 struct cnic_local *cp = dev->cnic_priv;
2387 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2388 cid = req->context_id;
2389 l5_cid = req->conn_id;
2390 if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
2393 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2395 if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2396 netdev_err(dev->netdev, "fcoe_disable size too big\n");
2399 fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2403 memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2404 memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2405 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2406 FCOE_CONNECTION_TYPE, &l5_data);
2410 static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2412 struct fcoe_kwqe_conn_destroy *req;
2413 union l5cm_specific_data l5_data;
2416 struct cnic_local *cp = dev->cnic_priv;
2417 struct cnic_context *ctx;
2418 struct fcoe_kcqe kcqe;
2419 struct kcqe *cqes[1];
2421 req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2422 cid = req->context_id;
2423 l5_cid = req->conn_id;
2424 if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
2427 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2429 ctx = &cp->ctx_tbl[l5_cid];
2431 init_waitqueue_head(&ctx->waitq);
2434 memset(&l5_data, 0, sizeof(l5_data));
2435 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2436 FCOE_CONNECTION_TYPE, &l5_data);
2438 wait_event(ctx->waitq, ctx->wait_cond);
2439 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2440 queue_delayed_work(cnic_wq, &cp->delete_task,
2441 msecs_to_jiffies(2000));
2444 memset(&kcqe, 0, sizeof(kcqe));
2445 kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2446 kcqe.fcoe_conn_id = req->conn_id;
2447 kcqe.fcoe_conn_context_id = cid;
2449 cqes[0] = (struct kcqe *) &kcqe;
2450 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2454 static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2456 struct fcoe_kwqe_destroy *req;
2457 union l5cm_specific_data l5_data;
2458 struct cnic_local *cp = dev->cnic_priv;
2462 req = (struct fcoe_kwqe_destroy *) kwqe;
2463 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2465 memset(&l5_data, 0, sizeof(l5_data));
2466 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY, cid,
2467 FCOE_CONNECTION_TYPE, &l5_data);
2471 static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2472 struct kwqe *wqes[], u32 num_wqes)
2478 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2479 return -EAGAIN; /* bnx2 is down */
2481 for (i = 0; i < num_wqes; ) {
2483 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2487 case ISCSI_KWQE_OPCODE_INIT1:
2488 ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2490 case ISCSI_KWQE_OPCODE_INIT2:
2491 ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2493 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2494 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2495 num_wqes - i, &work);
2497 case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2498 ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2500 case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2501 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2503 case L4_KWQE_OPCODE_VALUE_CONNECT1:
2504 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2507 case L4_KWQE_OPCODE_VALUE_CLOSE:
2508 ret = cnic_bnx2x_close(dev, kwqe);
2510 case L4_KWQE_OPCODE_VALUE_RESET:
2511 ret = cnic_bnx2x_reset(dev, kwqe);
2513 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2514 ret = cnic_bnx2x_offload_pg(dev, kwqe);
2516 case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2517 ret = cnic_bnx2x_update_pg(dev, kwqe);
2519 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2524 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2529 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2536 static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2537 struct kwqe *wqes[], u32 num_wqes)
2539 struct cnic_local *cp = dev->cnic_priv;
2544 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2545 return -EAGAIN; /* bnx2 is down */
2547 if (BNX2X_CHIP_NUM(cp->chip_id) == BNX2X_CHIP_NUM_57710)
2550 for (i = 0; i < num_wqes; ) {
2552 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2556 case FCOE_KWQE_OPCODE_INIT1:
2557 ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2558 num_wqes - i, &work);
2560 case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2561 ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2562 num_wqes - i, &work);
2564 case FCOE_KWQE_OPCODE_ENABLE_CONN:
2565 ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2567 case FCOE_KWQE_OPCODE_DISABLE_CONN:
2568 ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2570 case FCOE_KWQE_OPCODE_DESTROY_CONN:
2571 ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2573 case FCOE_KWQE_OPCODE_DESTROY:
2574 ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2576 case FCOE_KWQE_OPCODE_STAT:
2577 ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2581 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2586 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2593 static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2599 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2600 return -EAGAIN; /* bnx2x is down */
2605 layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2606 switch (layer_code) {
2607 case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2608 case KWQE_FLAGS_LAYER_MASK_L4:
2609 case KWQE_FLAGS_LAYER_MASK_L2:
2610 ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2613 case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2614 ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2620 static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2622 if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2623 return KCQE_FLAGS_LAYER_MASK_L4;
2625 return opflag & KCQE_FLAGS_LAYER_MASK;
2628 static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2630 struct cnic_local *cp = dev->cnic_priv;
2636 struct cnic_ulp_ops *ulp_ops;
2638 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
2639 u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
2641 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
2644 while (j < num_cqes) {
2645 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2647 if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
2650 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
2655 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2656 ulp_type = CNIC_ULP_RDMA;
2657 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2658 ulp_type = CNIC_ULP_ISCSI;
2659 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2660 ulp_type = CNIC_ULP_FCOE;
2661 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2662 ulp_type = CNIC_ULP_L4;
2663 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2666 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2672 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2673 if (likely(ulp_ops)) {
2674 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2675 cp->completed_kcq + i, j);
2684 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
2687 static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
2689 struct cnic_local *cp = dev->cnic_priv;
2690 u16 i, ri, hw_prod, last;
2692 int kcqe_cnt = 0, last_cnt = 0;
2694 i = ri = last = info->sw_prod_idx;
2696 hw_prod = *info->hw_prod_idx_ptr;
2697 hw_prod = info->hw_idx(hw_prod);
2699 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
2700 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
2701 cp->completed_kcq[kcqe_cnt++] = kcqe;
2702 i = info->next_idx(i);
2703 ri = i & MAX_KCQ_IDX;
2704 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2705 last_cnt = kcqe_cnt;
2710 info->sw_prod_idx = last;
2714 static int cnic_l2_completion(struct cnic_local *cp)
2716 u16 hw_cons, sw_cons;
2717 struct cnic_uio_dev *udev = cp->udev;
2718 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2719 (udev->l2_ring + (2 * BCM_PAGE_SIZE));
2723 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2726 hw_cons = *cp->rx_cons_ptr;
2727 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2730 sw_cons = cp->rx_cons;
2731 while (sw_cons != hw_cons) {
2734 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2735 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2736 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2737 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2738 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2739 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2740 cmd == RAMROD_CMD_ID_ETH_HALT)
2743 sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2748 static void cnic_chk_pkt_rings(struct cnic_local *cp)
2750 u16 rx_cons, tx_cons;
2753 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
2756 rx_cons = *cp->rx_cons_ptr;
2757 tx_cons = *cp->tx_cons_ptr;
2758 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2759 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2760 comp = cnic_l2_completion(cp);
2762 cp->tx_cons = tx_cons;
2763 cp->rx_cons = rx_cons;
2766 uio_event_notify(&cp->udev->cnic_uinfo);
2769 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
2772 static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2774 struct cnic_local *cp = dev->cnic_priv;
2775 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2778 /* status block index must be read before reading other fields */
2780 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2782 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
2784 service_kcqes(dev, kcqe_cnt);
2786 /* Tell compiler that status_blk fields can change. */
2788 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2789 /* status block index must be read first */
2791 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2794 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
2796 cnic_chk_pkt_rings(cp);
2801 static int cnic_service_bnx2(void *data, void *status_blk)
2803 struct cnic_dev *dev = data;
2805 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2806 struct status_block *sblk = status_blk;
2808 return sblk->status_idx;
2811 return cnic_service_bnx2_queues(dev);
2814 static void cnic_service_bnx2_msix(unsigned long data)
2816 struct cnic_dev *dev = (struct cnic_dev *) data;
2817 struct cnic_local *cp = dev->cnic_priv;
2819 cp->last_status_idx = cnic_service_bnx2_queues(dev);
2821 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2822 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
2825 static void cnic_doirq(struct cnic_dev *dev)
2827 struct cnic_local *cp = dev->cnic_priv;
2829 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2830 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
2832 prefetch(cp->status_blk.gen);
2833 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
2835 tasklet_schedule(&cp->cnic_irq_task);
2839 static irqreturn_t cnic_irq(int irq, void *dev_instance)
2841 struct cnic_dev *dev = dev_instance;
2842 struct cnic_local *cp = dev->cnic_priv;
2852 static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
2853 u16 index, u8 op, u8 update)
2855 struct cnic_local *cp = dev->cnic_priv;
2856 u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
2857 COMMAND_REG_INT_ACK);
2858 struct igu_ack_register igu_ack;
2860 igu_ack.status_block_index = index;
2861 igu_ack.sb_id_and_flags =
2862 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
2863 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
2864 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
2865 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
2867 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
2870 static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
2871 u16 index, u8 op, u8 update)
2873 struct igu_regular cmd_data;
2874 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
2876 cmd_data.sb_id_and_flags =
2877 (index << IGU_REGULAR_SB_INDEX_SHIFT) |
2878 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
2879 (update << IGU_REGULAR_BUPDATE_SHIFT) |
2880 (op << IGU_REGULAR_ENABLE_INT_SHIFT);
2883 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
2886 static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
2888 struct cnic_local *cp = dev->cnic_priv;
2890 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
2891 IGU_INT_DISABLE, 0);
2894 static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
2896 struct cnic_local *cp = dev->cnic_priv;
2898 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
2899 IGU_INT_DISABLE, 0);
2902 static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
2904 u32 last_status = *info->status_idx_ptr;
2907 /* status block index must be read before reading the KCQ */
2909 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
2911 service_kcqes(dev, kcqe_cnt);
2913 /* Tell compiler that sblk fields can change. */
2916 last_status = *info->status_idx_ptr;
2917 /* status block index must be read before reading the KCQ */
2923 static void cnic_service_bnx2x_bh(unsigned long data)
2925 struct cnic_dev *dev = (struct cnic_dev *) data;
2926 struct cnic_local *cp = dev->cnic_priv;
2927 u32 status_idx, new_status_idx;
2929 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2933 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
2935 CNIC_WR16(dev, cp->kcq1.io_addr,
2936 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
2938 if (!BNX2X_CHIP_IS_E2(cp->chip_id)) {
2939 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
2940 status_idx, IGU_INT_ENABLE, 1);
2944 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
2946 if (new_status_idx != status_idx)
2949 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
2952 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
2953 status_idx, IGU_INT_ENABLE, 1);
2959 static int cnic_service_bnx2x(void *data, void *status_blk)
2961 struct cnic_dev *dev = data;
2962 struct cnic_local *cp = dev->cnic_priv;
2964 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
2967 cnic_chk_pkt_rings(cp);
2972 static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
2974 struct cnic_ulp_ops *ulp_ops;
2976 if (if_type == CNIC_ULP_ISCSI)
2977 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
2979 mutex_lock(&cnic_lock);
2980 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
2981 lockdep_is_held(&cnic_lock));
2983 mutex_unlock(&cnic_lock);
2986 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2987 mutex_unlock(&cnic_lock);
2989 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
2990 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
2992 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
2995 static void cnic_ulp_stop(struct cnic_dev *dev)
2997 struct cnic_local *cp = dev->cnic_priv;
3000 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
3001 cnic_ulp_stop_one(cp, if_type);
3004 static void cnic_ulp_start(struct cnic_dev *dev)
3006 struct cnic_local *cp = dev->cnic_priv;
3009 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
3010 struct cnic_ulp_ops *ulp_ops;
3012 mutex_lock(&cnic_lock);
3013 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3014 lockdep_is_held(&cnic_lock));
3015 if (!ulp_ops || !ulp_ops->cnic_start) {
3016 mutex_unlock(&cnic_lock);
3019 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3020 mutex_unlock(&cnic_lock);
3022 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3023 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
3025 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3029 static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3031 struct cnic_dev *dev = data;
3033 switch (info->cmd) {
3034 case CNIC_CTL_STOP_CMD:
3042 case CNIC_CTL_START_CMD:
3045 if (!cnic_start_hw(dev))
3046 cnic_ulp_start(dev);
3050 case CNIC_CTL_STOP_ISCSI_CMD: {
3051 struct cnic_local *cp = dev->cnic_priv;
3052 set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
3053 queue_delayed_work(cnic_wq, &cp->delete_task, 0);
3056 case CNIC_CTL_COMPLETION_CMD: {
3057 u32 cid = BNX2X_SW_CID(info->data.comp.cid);
3059 struct cnic_local *cp = dev->cnic_priv;
3061 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3062 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3065 wake_up(&ctx->waitq);
3075 static void cnic_ulp_init(struct cnic_dev *dev)
3078 struct cnic_local *cp = dev->cnic_priv;
3080 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3081 struct cnic_ulp_ops *ulp_ops;
3083 mutex_lock(&cnic_lock);
3084 ulp_ops = cnic_ulp_tbl_prot(i);
3085 if (!ulp_ops || !ulp_ops->cnic_init) {
3086 mutex_unlock(&cnic_lock);
3090 mutex_unlock(&cnic_lock);
3092 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3093 ulp_ops->cnic_init(dev);
3099 static void cnic_ulp_exit(struct cnic_dev *dev)
3102 struct cnic_local *cp = dev->cnic_priv;
3104 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3105 struct cnic_ulp_ops *ulp_ops;
3107 mutex_lock(&cnic_lock);
3108 ulp_ops = cnic_ulp_tbl_prot(i);
3109 if (!ulp_ops || !ulp_ops->cnic_exit) {
3110 mutex_unlock(&cnic_lock);
3114 mutex_unlock(&cnic_lock);
3116 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3117 ulp_ops->cnic_exit(dev);
3123 static int cnic_cm_offload_pg(struct cnic_sock *csk)
3125 struct cnic_dev *dev = csk->dev;
3126 struct l4_kwq_offload_pg *l4kwqe;
3127 struct kwqe *wqes[1];
3129 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
3130 memset(l4kwqe, 0, sizeof(*l4kwqe));
3131 wqes[0] = (struct kwqe *) l4kwqe;
3133 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
3135 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
3136 l4kwqe->l2hdr_nbytes = ETH_HLEN;
3138 l4kwqe->da0 = csk->ha[0];
3139 l4kwqe->da1 = csk->ha[1];
3140 l4kwqe->da2 = csk->ha[2];
3141 l4kwqe->da3 = csk->ha[3];
3142 l4kwqe->da4 = csk->ha[4];
3143 l4kwqe->da5 = csk->ha[5];
3145 l4kwqe->sa0 = dev->mac_addr[0];
3146 l4kwqe->sa1 = dev->mac_addr[1];
3147 l4kwqe->sa2 = dev->mac_addr[2];
3148 l4kwqe->sa3 = dev->mac_addr[3];
3149 l4kwqe->sa4 = dev->mac_addr[4];
3150 l4kwqe->sa5 = dev->mac_addr[5];
3152 l4kwqe->etype = ETH_P_IP;
3153 l4kwqe->ipid_start = DEF_IPID_START;
3154 l4kwqe->host_opaque = csk->l5_cid;
3157 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
3158 l4kwqe->vlan_tag = csk->vlan_id;
3159 l4kwqe->l2hdr_nbytes += 4;
3162 return dev->submit_kwqes(dev, wqes, 1);
3165 static int cnic_cm_update_pg(struct cnic_sock *csk)
3167 struct cnic_dev *dev = csk->dev;
3168 struct l4_kwq_update_pg *l4kwqe;
3169 struct kwqe *wqes[1];
3171 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
3172 memset(l4kwqe, 0, sizeof(*l4kwqe));
3173 wqes[0] = (struct kwqe *) l4kwqe;
3175 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
3177 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
3178 l4kwqe->pg_cid = csk->pg_cid;
3180 l4kwqe->da0 = csk->ha[0];
3181 l4kwqe->da1 = csk->ha[1];
3182 l4kwqe->da2 = csk->ha[2];
3183 l4kwqe->da3 = csk->ha[3];
3184 l4kwqe->da4 = csk->ha[4];
3185 l4kwqe->da5 = csk->ha[5];
3187 l4kwqe->pg_host_opaque = csk->l5_cid;
3188 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
3190 return dev->submit_kwqes(dev, wqes, 1);
3193 static int cnic_cm_upload_pg(struct cnic_sock *csk)
3195 struct cnic_dev *dev = csk->dev;
3196 struct l4_kwq_upload *l4kwqe;
3197 struct kwqe *wqes[1];
3199 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
3200 memset(l4kwqe, 0, sizeof(*l4kwqe));
3201 wqes[0] = (struct kwqe *) l4kwqe;
3203 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
3205 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
3206 l4kwqe->cid = csk->pg_cid;
3208 return dev->submit_kwqes(dev, wqes, 1);
3211 static int cnic_cm_conn_req(struct cnic_sock *csk)
3213 struct cnic_dev *dev = csk->dev;
3214 struct l4_kwq_connect_req1 *l4kwqe1;
3215 struct l4_kwq_connect_req2 *l4kwqe2;
3216 struct l4_kwq_connect_req3 *l4kwqe3;
3217 struct kwqe *wqes[3];
3221 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
3222 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
3223 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
3224 memset(l4kwqe1, 0, sizeof(*l4kwqe1));
3225 memset(l4kwqe2, 0, sizeof(*l4kwqe2));
3226 memset(l4kwqe3, 0, sizeof(*l4kwqe3));
3228 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
3230 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
3231 l4kwqe3->ka_timeout = csk->ka_timeout;
3232 l4kwqe3->ka_interval = csk->ka_interval;
3233 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
3234 l4kwqe3->tos = csk->tos;
3235 l4kwqe3->ttl = csk->ttl;
3236 l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
3237 l4kwqe3->pmtu = csk->mtu;
3238 l4kwqe3->rcv_buf = csk->rcv_buf;
3239 l4kwqe3->snd_buf = csk->snd_buf;
3240 l4kwqe3->seed = csk->seed;
3242 wqes[0] = (struct kwqe *) l4kwqe1;
3243 if (test_bit(SK_F_IPV6, &csk->flags)) {
3244 wqes[1] = (struct kwqe *) l4kwqe2;
3245 wqes[2] = (struct kwqe *) l4kwqe3;
3248 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
3249 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
3251 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
3252 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
3253 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
3254 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
3255 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
3256 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
3257 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
3258 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
3259 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
3260 sizeof(struct tcphdr);
3262 wqes[1] = (struct kwqe *) l4kwqe3;
3263 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
3264 sizeof(struct tcphdr);
3267 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
3269 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
3270 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
3271 l4kwqe1->cid = csk->cid;
3272 l4kwqe1->pg_cid = csk->pg_cid;
3273 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
3274 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
3275 l4kwqe1->src_port = be16_to_cpu(csk->src_port);
3276 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
3277 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
3278 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
3279 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
3280 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
3281 if (csk->tcp_flags & SK_TCP_NAGLE)
3282 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
3283 if (csk->tcp_flags & SK_TCP_TIMESTAMP)
3284 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
3285 if (csk->tcp_flags & SK_TCP_SACK)
3286 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
3287 if (csk->tcp_flags & SK_TCP_SEG_SCALING)
3288 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
3290 l4kwqe1->tcp_flags = tcp_flags;
3292 return dev->submit_kwqes(dev, wqes, num_wqes);
3295 static int cnic_cm_close_req(struct cnic_sock *csk)
3297 struct cnic_dev *dev = csk->dev;
3298 struct l4_kwq_close_req *l4kwqe;
3299 struct kwqe *wqes[1];
3301 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
3302 memset(l4kwqe, 0, sizeof(*l4kwqe));
3303 wqes[0] = (struct kwqe *) l4kwqe;
3305 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
3306 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
3307 l4kwqe->cid = csk->cid;
3309 return dev->submit_kwqes(dev, wqes, 1);
3312 static int cnic_cm_abort_req(struct cnic_sock *csk)
3314 struct cnic_dev *dev = csk->dev;
3315 struct l4_kwq_reset_req *l4kwqe;
3316 struct kwqe *wqes[1];
3318 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
3319 memset(l4kwqe, 0, sizeof(*l4kwqe));
3320 wqes[0] = (struct kwqe *) l4kwqe;
3322 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
3323 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
3324 l4kwqe->cid = csk->cid;
3326 return dev->submit_kwqes(dev, wqes, 1);
3329 static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3330 u32 l5_cid, struct cnic_sock **csk, void *context)
3332 struct cnic_local *cp = dev->cnic_priv;
3333 struct cnic_sock *csk1;
3335 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3339 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3341 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3345 csk1 = &cp->csk_tbl[l5_cid];
3346 if (atomic_read(&csk1->ref_count))
3349 if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
3354 csk1->l5_cid = l5_cid;
3355 csk1->ulp_type = ulp_type;
3356 csk1->context = context;
3358 csk1->ka_timeout = DEF_KA_TIMEOUT;
3359 csk1->ka_interval = DEF_KA_INTERVAL;
3360 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
3361 csk1->tos = DEF_TOS;
3362 csk1->ttl = DEF_TTL;
3363 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
3364 csk1->rcv_buf = DEF_RCV_BUF;
3365 csk1->snd_buf = DEF_SND_BUF;
3366 csk1->seed = DEF_SEED;
3372 static void cnic_cm_cleanup(struct cnic_sock *csk)
3374 if (csk->src_port) {
3375 struct cnic_dev *dev = csk->dev;
3376 struct cnic_local *cp = dev->cnic_priv;
3378 cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
3383 static void cnic_close_conn(struct cnic_sock *csk)
3385 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
3386 cnic_cm_upload_pg(csk);
3387 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3389 cnic_cm_cleanup(csk);
3392 static int cnic_cm_destroy(struct cnic_sock *csk)
3394 if (!cnic_in_use(csk))
3398 clear_bit(SK_F_INUSE, &csk->flags);
3399 smp_mb__after_clear_bit();
3400 while (atomic_read(&csk->ref_count) != 1)
3402 cnic_cm_cleanup(csk);
3409 static inline u16 cnic_get_vlan(struct net_device *dev,
3410 struct net_device **vlan_dev)
3412 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3413 *vlan_dev = vlan_dev_real_dev(dev);
3414 return vlan_dev_vlan_id(dev);
3420 static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3421 struct dst_entry **dst)
3423 #if defined(CONFIG_INET)
3426 rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
3433 return -ENETUNREACH;
3437 static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3438 struct dst_entry **dst)
3440 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
3443 memset(&fl6, 0, sizeof(fl6));
3444 ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr);
3445 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
3446 fl6.flowi6_oif = dst_addr->sin6_scope_id;
3448 *dst = ip6_route_output(&init_net, NULL, &fl6);
3453 return -ENETUNREACH;
3456 static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
3459 struct cnic_dev *dev = NULL;
3460 struct dst_entry *dst;
3461 struct net_device *netdev = NULL;
3462 int err = -ENETUNREACH;
3464 if (dst_addr->sin_family == AF_INET)
3465 err = cnic_get_v4_route(dst_addr, &dst);
3466 else if (dst_addr->sin_family == AF_INET6) {
3467 struct sockaddr_in6 *dst_addr6 =
3468 (struct sockaddr_in6 *) dst_addr;
3470 err = cnic_get_v6_route(dst_addr6, &dst);
3480 cnic_get_vlan(dst->dev, &netdev);
3482 dev = cnic_from_netdev(netdev);
3491 static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3493 struct cnic_dev *dev = csk->dev;
3494 struct cnic_local *cp = dev->cnic_priv;
3496 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3499 static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3501 struct cnic_dev *dev = csk->dev;
3502 struct cnic_local *cp = dev->cnic_priv;
3504 struct dst_entry *dst = NULL;
3505 struct net_device *realdev;
3509 if (saddr->local.v6.sin6_family == AF_INET6 &&
3510 saddr->remote.v6.sin6_family == AF_INET6)
3512 else if (saddr->local.v4.sin_family == AF_INET &&
3513 saddr->remote.v4.sin_family == AF_INET)
3518 clear_bit(SK_F_IPV6, &csk->flags);
3521 set_bit(SK_F_IPV6, &csk->flags);
3522 cnic_get_v6_route(&saddr->remote.v6, &dst);
3524 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3525 sizeof(struct in6_addr));
3526 csk->dst_port = saddr->remote.v6.sin6_port;
3527 local_port = saddr->local.v6.sin6_port;
3530 cnic_get_v4_route(&saddr->remote.v4, &dst);
3532 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3533 csk->dst_port = saddr->remote.v4.sin_port;
3534 local_port = saddr->local.v4.sin_port;
3538 csk->mtu = dev->netdev->mtu;
3539 if (dst && dst->dev) {
3540 u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3541 if (realdev == dev->netdev) {
3542 csk->vlan_id = vlan;
3543 csk->mtu = dst_mtu(dst);
3547 port_id = be16_to_cpu(local_port);
3548 if (port_id >= CNIC_LOCAL_PORT_MIN &&
3549 port_id < CNIC_LOCAL_PORT_MAX) {
3550 if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3556 port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3557 if (port_id == -1) {
3561 local_port = cpu_to_be16(port_id);
3563 csk->src_port = local_port;
3570 static void cnic_init_csk_state(struct cnic_sock *csk)
3573 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3574 clear_bit(SK_F_CLOSING, &csk->flags);
3577 static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3579 struct cnic_local *cp = csk->dev->cnic_priv;
3582 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
3585 if (!cnic_in_use(csk))
3588 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3591 cnic_init_csk_state(csk);
3593 err = cnic_get_route(csk, saddr);
3597 err = cnic_resolve_addr(csk, saddr);
3602 clear_bit(SK_F_CONNECT_START, &csk->flags);
3606 static int cnic_cm_abort(struct cnic_sock *csk)
3608 struct cnic_local *cp = csk->dev->cnic_priv;
3609 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
3611 if (!cnic_in_use(csk))
3614 if (cnic_abort_prep(csk))
3615 return cnic_cm_abort_req(csk);
3617 /* Getting here means that we haven't started connect, or
3618 * connect was not successful.
3621 cp->close_conn(csk, opcode);
3622 if (csk->state != opcode)
3628 static int cnic_cm_close(struct cnic_sock *csk)
3630 if (!cnic_in_use(csk))
3633 if (cnic_close_prep(csk)) {
3634 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3635 return cnic_cm_close_req(csk);
3642 static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3645 struct cnic_ulp_ops *ulp_ops;
3646 int ulp_type = csk->ulp_type;
3649 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3651 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3652 ulp_ops->cm_connect_complete(csk);
3653 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3654 ulp_ops->cm_close_complete(csk);
3655 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3656 ulp_ops->cm_remote_abort(csk);
3657 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3658 ulp_ops->cm_abort_complete(csk);
3659 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3660 ulp_ops->cm_remote_close(csk);
3665 static int cnic_cm_set_pg(struct cnic_sock *csk)
3667 if (cnic_offld_prep(csk)) {
3668 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3669 cnic_cm_update_pg(csk);
3671 cnic_cm_offload_pg(csk);
3676 static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3678 struct cnic_local *cp = dev->cnic_priv;
3679 u32 l5_cid = kcqe->pg_host_opaque;
3680 u8 opcode = kcqe->op_code;
3681 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3684 if (!cnic_in_use(csk))
3687 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3688 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3691 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3692 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3693 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3694 cnic_cm_upcall(cp, csk,
3695 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3699 csk->pg_cid = kcqe->pg_cid;
3700 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3701 cnic_cm_conn_req(csk);
3707 static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3709 struct cnic_local *cp = dev->cnic_priv;
3710 struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
3711 u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
3712 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3714 ctx->timestamp = jiffies;
3716 wake_up(&ctx->waitq);
3719 static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3721 struct cnic_local *cp = dev->cnic_priv;
3722 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3723 u8 opcode = l4kcqe->op_code;
3725 struct cnic_sock *csk;
3727 if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
3728 cnic_process_fcoe_term_conn(dev, kcqe);
3731 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3732 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3733 cnic_cm_process_offld_pg(dev, l4kcqe);
3737 l5_cid = l4kcqe->conn_id;
3739 l5_cid = l4kcqe->cid;
3740 if (l5_cid >= MAX_CM_SK_TBL_SZ)
3743 csk = &cp->csk_tbl[l5_cid];
3746 if (!cnic_in_use(csk)) {
3752 case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
3753 if (l4kcqe->status != 0) {
3754 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3755 cnic_cm_upcall(cp, csk,
3756 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3759 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
3760 if (l4kcqe->status == 0)
3761 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
3763 smp_mb__before_clear_bit();
3764 clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3765 cnic_cm_upcall(cp, csk, opcode);
3768 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3769 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3770 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3771 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3772 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
3773 cp->close_conn(csk, opcode);
3776 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
3777 /* after we already sent CLOSE_REQ */
3778 if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
3779 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
3780 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3781 cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
3783 cnic_cm_upcall(cp, csk, opcode);
3789 static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
3791 struct cnic_dev *dev = data;
3794 for (i = 0; i < num; i++)
3795 cnic_cm_process_kcqe(dev, kcqe[i]);
3798 static struct cnic_ulp_ops cm_ulp_ops = {
3799 .indicate_kcqes = cnic_cm_indicate_kcqe,
3802 static void cnic_cm_free_mem(struct cnic_dev *dev)
3804 struct cnic_local *cp = dev->cnic_priv;
3808 cnic_free_id_tbl(&cp->csk_port_tbl);
3811 static int cnic_cm_alloc_mem(struct cnic_dev *dev)
3813 struct cnic_local *cp = dev->cnic_priv;
3816 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
3821 get_random_bytes(&port_id, sizeof(port_id));
3822 port_id %= CNIC_LOCAL_PORT_RANGE;
3823 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
3824 CNIC_LOCAL_PORT_MIN, port_id)) {
3825 cnic_cm_free_mem(dev);
3831 static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
3833 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
3834 /* Unsolicited RESET_COMP or RESET_RECEIVED */
3835 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
3836 csk->state = opcode;
3839 /* 1. If event opcode matches the expected event in csk->state
3840 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
3842 * 3. If the expected event is 0, meaning the connection was never
3843 * never established, we accept the opcode from cm_abort.
3845 if (opcode == csk->state || csk->state == 0 ||
3846 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
3847 csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
3848 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
3849 if (csk->state == 0)
3850 csk->state = opcode;
3857 static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
3859 struct cnic_dev *dev = csk->dev;
3860 struct cnic_local *cp = dev->cnic_priv;
3862 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
3863 cnic_cm_upcall(cp, csk, opcode);
3867 clear_bit(SK_F_CONNECT_START, &csk->flags);
3868 cnic_close_conn(csk);
3869 csk->state = opcode;
3870 cnic_cm_upcall(cp, csk, opcode);
3873 static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
3877 static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
3881 get_random_bytes(&seed, 4);
3882 cnic_ctx_wr(dev, 45, 0, seed);
3886 static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
3888 struct cnic_dev *dev = csk->dev;
3889 struct cnic_local *cp = dev->cnic_priv;
3890 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
3891 union l5cm_specific_data l5_data;
3893 int close_complete = 0;
3896 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3897 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3898 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3899 if (cnic_ready_to_close(csk, opcode)) {
3900 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3901 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
3906 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3907 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
3909 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
3914 memset(&l5_data, 0, sizeof(l5_data));
3916 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
3918 } else if (close_complete) {
3919 ctx->timestamp = jiffies;
3920 cnic_close_conn(csk);
3921 cnic_cm_upcall(cp, csk, csk->state);
3925 static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
3927 struct cnic_local *cp = dev->cnic_priv;
3933 if (!netif_running(dev->netdev))
3936 for (i = 0; i < cp->max_cid_space; i++) {
3937 struct cnic_context *ctx = &cp->ctx_tbl[i];
3939 while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
3942 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3943 netdev_warn(dev->netdev, "CID %x not deleted\n",
3947 cancel_delayed_work(&cp->delete_task);
3948 flush_workqueue(cnic_wq);
3950 if (atomic_read(&cp->iscsi_conn) != 0)
3951 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
3952 atomic_read(&cp->iscsi_conn));
3955 static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
3957 struct cnic_local *cp = dev->cnic_priv;
3958 u32 pfid = cp->pfid;
3959 u32 port = CNIC_PORT(cp);
3961 cnic_init_bnx2x_mac(dev);
3962 cnic_bnx2x_set_tcp_timestamp(dev, 1);
3964 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
3965 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
3967 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3968 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
3969 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3970 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
3973 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3974 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
3975 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3976 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
3977 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3978 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
3979 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3980 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
3982 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
3987 static void cnic_delete_task(struct work_struct *work)
3989 struct cnic_local *cp;
3990 struct cnic_dev *dev;
3992 int need_resched = 0;
3994 cp = container_of(work, struct cnic_local, delete_task.work);
3997 if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
3998 struct drv_ctl_info info;
4000 cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
4002 info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
4003 cp->ethdev->drv_ctl(dev->netdev, &info);
4006 for (i = 0; i < cp->max_cid_space; i++) {
4007 struct cnic_context *ctx = &cp->ctx_tbl[i];
4009 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
4010 !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4013 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
4018 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4021 cnic_bnx2x_destroy_ramrod(dev, i);
4023 cnic_free_bnx2x_conn_resc(dev, i);
4024 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
4025 atomic_dec(&cp->iscsi_conn);
4027 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
4031 queue_delayed_work(cnic_wq, &cp->delete_task,
4032 msecs_to_jiffies(10));
4036 static int cnic_cm_open(struct cnic_dev *dev)
4038 struct cnic_local *cp = dev->cnic_priv;
4041 err = cnic_cm_alloc_mem(dev);
4045 err = cp->start_cm(dev);
4050 INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
4052 dev->cm_create = cnic_cm_create;
4053 dev->cm_destroy = cnic_cm_destroy;
4054 dev->cm_connect = cnic_cm_connect;
4055 dev->cm_abort = cnic_cm_abort;
4056 dev->cm_close = cnic_cm_close;
4057 dev->cm_select_dev = cnic_cm_select_dev;
4059 cp->ulp_handle[CNIC_ULP_L4] = dev;
4060 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
4064 cnic_cm_free_mem(dev);
4068 static int cnic_cm_shutdown(struct cnic_dev *dev)
4070 struct cnic_local *cp = dev->cnic_priv;
4078 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
4079 struct cnic_sock *csk = &cp->csk_tbl[i];
4081 clear_bit(SK_F_INUSE, &csk->flags);
4082 cnic_cm_cleanup(csk);
4084 cnic_cm_free_mem(dev);
4089 static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4094 cid_addr = GET_CID_ADDR(cid);
4096 for (i = 0; i < CTX_SIZE; i += 4)
4097 cnic_ctx_wr(dev, cid_addr, i, 0);
4100 static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4102 struct cnic_local *cp = dev->cnic_priv;
4104 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
4106 if (CHIP_NUM(cp) != CHIP_NUM_5709)
4109 for (i = 0; i < cp->ctx_blks; i++) {
4111 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4114 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
4116 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4117 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
4118 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4119 (u64) cp->ctx_arr[i].mapping >> 32);
4120 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4121 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4122 for (j = 0; j < 10; j++) {
4124 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4125 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
4129 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
4137 static void cnic_free_irq(struct cnic_dev *dev)
4139 struct cnic_local *cp = dev->cnic_priv;
4140 struct cnic_eth_dev *ethdev = cp->ethdev;
4142 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4143 cp->disable_int_sync(dev);
4144 tasklet_kill(&cp->cnic_irq_task);
4145 free_irq(ethdev->irq_arr[0].vector, dev);
4149 static int cnic_request_irq(struct cnic_dev *dev)
4151 struct cnic_local *cp = dev->cnic_priv;
4152 struct cnic_eth_dev *ethdev = cp->ethdev;
4155 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4157 tasklet_disable(&cp->cnic_irq_task);
4162 static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4164 struct cnic_local *cp = dev->cnic_priv;
4165 struct cnic_eth_dev *ethdev = cp->ethdev;
4167 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4169 int sblk_num = cp->status_blk_num;
4170 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4171 BNX2_HC_SB_CONFIG_1;
4173 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4175 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4176 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4177 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4179 cp->last_status_idx = cp->status_blk.bnx2->status_idx;
4180 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
4181 (unsigned long) dev);
4182 err = cnic_request_irq(dev);
4186 while (cp->status_blk.bnx2->status_completion_producer_index &&
4188 CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4189 1 << (11 + sblk_num));
4194 if (cp->status_blk.bnx2->status_completion_producer_index) {
4200 struct status_block *sblk = cp->status_blk.gen;
4201 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4204 while (sblk->status_completion_producer_index && i < 10) {
4205 CNIC_WR(dev, BNX2_HC_COMMAND,
4206 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4211 if (sblk->status_completion_producer_index)
4218 netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
4222 static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4224 struct cnic_local *cp = dev->cnic_priv;
4225 struct cnic_eth_dev *ethdev = cp->ethdev;
4227 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4230 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4231 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4234 static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4236 struct cnic_local *cp = dev->cnic_priv;
4237 struct cnic_eth_dev *ethdev = cp->ethdev;
4239 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4242 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4243 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4244 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4245 synchronize_irq(ethdev->irq_arr[0].vector);
4248 static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4250 struct cnic_local *cp = dev->cnic_priv;
4251 struct cnic_eth_dev *ethdev = cp->ethdev;
4252 struct cnic_uio_dev *udev = cp->udev;
4253 u32 cid_addr, tx_cid, sb_id;
4254 u32 val, offset0, offset1, offset2, offset3;
4257 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4258 struct status_block *s_blk = cp->status_blk.gen;
4260 sb_id = cp->status_blk_num;
4262 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
4263 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4264 struct status_block_msix *sblk = cp->status_blk.bnx2;
4266 tx_cid = TX_TSS_CID + sb_id - 1;
4267 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4269 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
4271 cp->tx_cons = *cp->tx_cons_ptr;
4273 cid_addr = GET_CID_ADDR(tx_cid);
4274 if (CHIP_NUM(cp) == CHIP_NUM_5709) {
4275 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
4277 for (i = 0; i < PHY_CTX_SIZE; i += 4)
4278 cnic_ctx_wr(dev, cid_addr2, i, 0);
4280 offset0 = BNX2_L2CTX_TYPE_XI;
4281 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4282 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4283 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4285 cnic_init_context(dev, tx_cid);
4286 cnic_init_context(dev, tx_cid + 1);
4288 offset0 = BNX2_L2CTX_TYPE;
4289 offset1 = BNX2_L2CTX_CMD_TYPE;
4290 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4291 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4293 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4294 cnic_ctx_wr(dev, cid_addr, offset0, val);
4296 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4297 cnic_ctx_wr(dev, cid_addr, offset1, val);
4299 txbd = (struct tx_bd *) udev->l2_ring;
4301 buf_map = udev->l2_buf_map;
4302 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
4303 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4304 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4306 val = (u64) ring_map >> 32;
4307 cnic_ctx_wr(dev, cid_addr, offset2, val);
4308 txbd->tx_bd_haddr_hi = val;
4310 val = (u64) ring_map & 0xffffffff;
4311 cnic_ctx_wr(dev, cid_addr, offset3, val);
4312 txbd->tx_bd_haddr_lo = val;
4315 static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4317 struct cnic_local *cp = dev->cnic_priv;
4318 struct cnic_eth_dev *ethdev = cp->ethdev;
4319 struct cnic_uio_dev *udev = cp->udev;
4320 u32 cid_addr, sb_id, val, coal_reg, coal_val;
4323 struct status_block *s_blk = cp->status_blk.gen;
4324 dma_addr_t ring_map = udev->l2_ring_map;
4326 sb_id = cp->status_blk_num;
4327 cnic_init_context(dev, 2);
4328 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
4329 coal_reg = BNX2_HC_COMMAND;
4330 coal_val = CNIC_RD(dev, coal_reg);
4331 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4332 struct status_block_msix *sblk = cp->status_blk.bnx2;
4334 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
4335 coal_reg = BNX2_HC_COALESCE_NOW;
4336 coal_val = 1 << (11 + sb_id);
4339 while (!(*cp->rx_cons_ptr != 0) && i < 10) {
4340 CNIC_WR(dev, coal_reg, coal_val);
4345 cp->rx_cons = *cp->rx_cons_ptr;
4347 cid_addr = GET_CID_ADDR(2);
4348 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4349 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4350 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4353 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
4355 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
4356 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4358 rxbd = (struct rx_bd *) (udev->l2_ring + BCM_PAGE_SIZE);
4359 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
4361 int n = (i % cp->l2_rx_ring_size) + 1;
4363 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4364 rxbd->rx_bd_len = cp->l2_single_buf_size;
4365 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4366 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4367 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4369 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
4370 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4371 rxbd->rx_bd_haddr_hi = val;
4373 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
4374 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4375 rxbd->rx_bd_haddr_lo = val;
4377 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4378 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4381 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4383 struct kwqe *wqes[1], l2kwqe;
4385 memset(&l2kwqe, 0, sizeof(l2kwqe));
4387 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
4388 (L2_KWQE_OPCODE_VALUE_FLUSH <<
4389 KWQE_OPCODE_SHIFT) | 2;
4390 dev->submit_kwqes(dev, wqes, 1);
4393 static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4395 struct cnic_local *cp = dev->cnic_priv;
4398 val = cp->func << 2;
4400 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4402 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4403 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
4404 dev->mac_addr[0] = (u8) (val >> 8);
4405 dev->mac_addr[1] = (u8) val;
4407 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4409 val = cnic_reg_rd_ind(dev, cp->shmem_base +
4410 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
4411 dev->mac_addr[2] = (u8) (val >> 24);
4412 dev->mac_addr[3] = (u8) (val >> 16);
4413 dev->mac_addr[4] = (u8) (val >> 8);
4414 dev->mac_addr[5] = (u8) val;
4416 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4418 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
4419 if (CHIP_NUM(cp) != CHIP_NUM_5709)
4420 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
4422 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4423 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4424 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4427 static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4429 struct cnic_local *cp = dev->cnic_priv;
4430 struct cnic_eth_dev *ethdev = cp->ethdev;
4431 struct status_block *sblk = cp->status_blk.gen;
4432 u32 val, kcq_cid_addr, kwq_cid_addr;
4435 cnic_set_bnx2_mac(dev);
4437 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4438 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4439 if (BCM_PAGE_BITS > 12)
4440 val |= (12 - 8) << 4;
4442 val |= (BCM_PAGE_BITS - 8) << 4;
4444 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4446 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4447 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4448 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4450 err = cnic_setup_5709_context(dev, 1);
4454 cnic_init_context(dev, KWQ_CID);
4455 cnic_init_context(dev, KCQ_CID);
4457 kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
4458 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
4460 cp->max_kwq_idx = MAX_KWQ_IDX;
4461 cp->kwq_prod_idx = 0;
4462 cp->kwq_con_idx = 0;
4463 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
4465 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
4466 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
4468 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
4470 /* Initialize the kernel work queue context. */
4471 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4472 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4473 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
4475 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
4476 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4478 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
4479 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4481 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
4482 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4484 val = (u32) cp->kwq_info.pgtbl_map;
4485 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4487 kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
4488 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
4490 cp->kcq1.sw_prod_idx = 0;
4491 cp->kcq1.hw_prod_idx_ptr =
4492 (u16 *) &sblk->status_completion_producer_index;
4494 cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx;
4496 /* Initialize the kernel complete queue context. */
4497 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4498 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4499 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
4501 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
4502 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4504 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
4505 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4507 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
4508 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4510 val = (u32) cp->kcq1.dma.pgtbl_map;
4511 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4514 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4515 struct status_block_msix *msblk = cp->status_blk.bnx2;
4516 u32 sb_id = cp->status_blk_num;
4517 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
4519 cp->kcq1.hw_prod_idx_ptr =
4520 (u16 *) &msblk->status_completion_producer_index;
4521 cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx;
4522 cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index;
4523 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
4524 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4525 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4528 /* Enable Commnad Scheduler notification when we write to the
4529 * host producer index of the kernel contexts. */
4530 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4532 /* Enable Command Scheduler notification when we write to either
4533 * the Send Queue or Receive Queue producer indexes of the kernel
4534 * bypass contexts. */
4535 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4536 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4538 /* Notify COM when the driver post an application buffer. */
4539 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4541 /* Set the CP and COM doorbells. These two processors polls the
4542 * doorbell for a non zero value before running. This must be done
4543 * after setting up the kernel queue contexts. */
4544 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4545 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4547 cnic_init_bnx2_tx_ring(dev);
4548 cnic_init_bnx2_rx_ring(dev);
4550 err = cnic_init_bnx2_irq(dev);
4552 netdev_err(dev->netdev, "cnic_init_irq failed\n");
4553 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4554 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4561 static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4563 struct cnic_local *cp = dev->cnic_priv;
4564 struct cnic_eth_dev *ethdev = cp->ethdev;
4565 u32 start_offset = ethdev->ctx_tbl_offset;
4568 for (i = 0; i < cp->ctx_blks; i++) {
4569 struct cnic_ctx *ctx = &cp->ctx_arr[i];
4570 dma_addr_t map = ctx->mapping;
4572 if (cp->ctx_align) {
4573 unsigned long mask = cp->ctx_align - 1;
4575 map = (map + mask) & ~mask;
4578 cnic_ctx_tbl_wr(dev, start_offset + i, map);
4582 static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4584 struct cnic_local *cp = dev->cnic_priv;
4585 struct cnic_eth_dev *ethdev = cp->ethdev;
4588 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
4589 (unsigned long) dev);
4590 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4591 err = cnic_request_irq(dev);
4596 static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4597 u16 sb_id, u8 sb_index,
4601 u32 addr = BAR_CSTRORM_INTMEM +
4602 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4603 offsetof(struct hc_status_block_data_e1x, index_data) +
4604 sizeof(struct hc_index_data)*sb_index +
4605 offsetof(struct hc_index_data, flags);
4606 u16 flags = CNIC_RD16(dev, addr);
4608 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4609 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4610 HC_INDEX_DATA_HC_ENABLED);
4611 CNIC_WR16(dev, addr, flags);
4614 static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4616 struct cnic_local *cp = dev->cnic_priv;
4617 u8 sb_id = cp->status_blk_num;
4619 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4620 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4621 offsetof(struct hc_status_block_data_e1x, index_data) +
4622 sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
4623 offsetof(struct hc_index_data, timeout), 64 / 12);
4624 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
4627 static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4631 static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4632 struct client_init_ramrod_data *data)
4634 struct cnic_local *cp = dev->cnic_priv;
4635 struct cnic_uio_dev *udev = cp->udev;
4636 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4637 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4638 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4639 int port = CNIC_PORT(cp);
4641 u32 cli = cp->ethdev->iscsi_l2_client_id;
4644 memset(txbd, 0, BCM_PAGE_SIZE);
4646 buf_map = udev->l2_buf_map;
4647 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4648 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
4649 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4651 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4652 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4653 reg_bd->addr_hi = start_bd->addr_hi;
4654 reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4655 start_bd->nbytes = cpu_to_le16(0x10);
4656 start_bd->nbd = cpu_to_le16(3);
4657 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
4658 start_bd->general_data = (UNICAST_ADDRESS <<
4659 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
4660 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4664 val = (u64) ring_map >> 32;
4665 txbd->next_bd.addr_hi = cpu_to_le32(val);
4667 data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
4669 val = (u64) ring_map & 0xffffffff;
4670 txbd->next_bd.addr_lo = cpu_to_le32(val);
4672 data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
4674 /* Other ramrod params */
4675 data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4676 data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
4678 /* reset xstorm per client statistics */
4679 if (cli < MAX_STAT_COUNTER_ID) {
4680 val = BAR_XSTRORM_INTMEM +
4681 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4682 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++)
4683 CNIC_WR(dev, val + i * 4, 0);
4687 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
4690 static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4691 struct client_init_ramrod_data *data)
4693 struct cnic_local *cp = dev->cnic_priv;
4694 struct cnic_uio_dev *udev = cp->udev;
4695 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
4697 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
4698 (udev->l2_ring + (2 * BCM_PAGE_SIZE));
4699 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4701 int port = CNIC_PORT(cp);
4702 u32 cli = cp->ethdev->iscsi_l2_client_id;
4703 int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
4705 dma_addr_t ring_map = udev->l2_ring_map;
4708 data->general.client_id = cli;
4709 data->general.statistics_en_flg = 1;
4710 data->general.statistics_counter_id = cli;
4711 data->general.activate_flg = 1;
4712 data->general.sp_client_id = cli;
4714 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
4716 int n = (i % cp->l2_rx_ring_size) + 1;
4718 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4719 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4720 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4723 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
4724 rxbd->addr_hi = cpu_to_le32(val);
4725 data->rx.bd_page_base.hi = cpu_to_le32(val);
4727 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
4728 rxbd->addr_lo = cpu_to_le32(val);
4729 data->rx.bd_page_base.lo = cpu_to_le32(val);
4731 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
4732 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
4733 rxcqe->addr_hi = cpu_to_le32(val);
4734 data->rx.cqe_page_base.hi = cpu_to_le32(val);
4736 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
4737 rxcqe->addr_lo = cpu_to_le32(val);
4738 data->rx.cqe_page_base.lo = cpu_to_le32(val);
4740 /* Other ramrod params */
4741 data->rx.client_qzone_id = cl_qzone_id;
4742 data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
4743 data->rx.status_block_id = BNX2X_DEF_SB_ID;
4745 data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
4746 data->rx.bd_buff_size = cpu_to_le16(cp->l2_single_buf_size);
4748 data->rx.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
4749 data->rx.outer_vlan_removal_enable_flg = 1;
4751 /* reset tstorm and ustorm per client statistics */
4752 if (cli < MAX_STAT_COUNTER_ID) {
4753 val = BAR_TSTRORM_INTMEM +
4754 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4755 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++)
4756 CNIC_WR(dev, val + i * 4, 0);
4758 val = BAR_USTRORM_INTMEM +
4759 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
4760 for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++)
4761 CNIC_WR(dev, val + i * 4, 0);
4765 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
4766 cp->rx_cons = *cp->rx_cons_ptr;
4769 static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
4771 struct cnic_local *cp = dev->cnic_priv;
4772 u32 pfid = cp->pfid;
4774 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
4775 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
4776 cp->kcq1.sw_prod_idx = 0;
4778 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4779 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
4781 cp->kcq1.hw_prod_idx_ptr =
4782 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4783 cp->kcq1.status_idx_ptr =
4784 &sb->sb.running_index[SM_RX_ID];
4786 struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
4788 cp->kcq1.hw_prod_idx_ptr =
4789 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4790 cp->kcq1.status_idx_ptr =
4791 &sb->sb.running_index[SM_RX_ID];
4794 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4795 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
4797 cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
4798 USTORM_FCOE_EQ_PROD_OFFSET(pfid);
4799 cp->kcq2.sw_prod_idx = 0;
4800 cp->kcq2.hw_prod_idx_ptr =
4801 &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
4802 cp->kcq2.status_idx_ptr =
4803 &sb->sb.running_index[SM_RX_ID];
4807 static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4809 struct cnic_local *cp = dev->cnic_priv;
4810 struct cnic_eth_dev *ethdev = cp->ethdev;
4811 int func = CNIC_FUNC(cp), ret, i;
4814 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4815 u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
4818 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
4820 val = (val >> 1) & 1;
4823 cp->pfid = func >> 1;
4825 cp->pfid = func & 0x6;
4831 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
4832 cp->iscsi_start_cid, 0);
4837 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4838 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl,
4839 BNX2X_FCOE_NUM_CONNECTIONS,
4840 cp->fcoe_start_cid, 0);
4846 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
4848 cnic_init_bnx2x_kcq(dev);
4851 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
4852 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4853 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
4854 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4855 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
4856 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
4857 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4858 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
4859 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
4860 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4861 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
4862 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
4863 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4864 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
4865 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
4866 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4867 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
4868 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
4869 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
4870 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4871 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
4872 HC_INDEX_ISCSI_EQ_CONS);
4874 for (i = 0; i < cp->conn_buf_info.num_pages; i++) {
4875 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4876 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i),
4877 cp->conn_buf_info.pgtbl[2 * i]);
4878 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4879 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(pfid, i) + 4,
4880 cp->conn_buf_info.pgtbl[(2 * i) + 1]);
4883 CNIC_WR(dev, BAR_USTRORM_INTMEM +
4884 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
4885 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
4886 CNIC_WR(dev, BAR_USTRORM_INTMEM +
4887 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
4888 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
4890 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4891 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
4893 cnic_setup_bnx2x_context(dev);
4895 ret = cnic_init_bnx2x_irq(dev);
4902 static void cnic_init_rings(struct cnic_dev *dev)
4904 struct cnic_local *cp = dev->cnic_priv;
4905 struct cnic_uio_dev *udev = cp->udev;
4907 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
4910 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
4911 cnic_init_bnx2_tx_ring(dev);
4912 cnic_init_bnx2_rx_ring(dev);
4913 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
4914 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
4915 u32 cli = cp->ethdev->iscsi_l2_client_id;
4916 u32 cid = cp->ethdev->iscsi_l2_cid;
4918 struct client_init_ramrod_data *data;
4919 union l5cm_specific_data l5_data;
4920 struct ustorm_eth_rx_producers rx_prods = {0};
4923 rx_prods.bd_prod = 0;
4924 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
4927 cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
4929 off = BAR_USTRORM_INTMEM +
4930 (BNX2X_CHIP_IS_E2(cp->chip_id) ?
4931 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
4932 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
4934 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
4935 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
4937 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
4939 data = udev->l2_buf;
4941 memset(data, 0, sizeof(*data));
4943 cnic_init_bnx2x_tx_ring(dev, data);
4944 cnic_init_bnx2x_rx_ring(dev, data);
4946 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
4947 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
4949 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
4951 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
4952 cid, ETH_CONNECTION_TYPE, &l5_data);
4955 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
4959 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
4960 netdev_err(dev->netdev,
4961 "iSCSI CLIENT_SETUP did not complete\n");
4962 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
4963 cnic_ring_ctl(dev, cid, cli, 1);
4967 static void cnic_shutdown_rings(struct cnic_dev *dev)
4969 struct cnic_local *cp = dev->cnic_priv;
4971 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
4974 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
4975 cnic_shutdown_bnx2_rx_ring(dev);
4976 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
4977 struct cnic_local *cp = dev->cnic_priv;
4978 u32 cli = cp->ethdev->iscsi_l2_client_id;
4979 u32 cid = cp->ethdev->iscsi_l2_cid;
4980 union l5cm_specific_data l5_data;
4983 cnic_ring_ctl(dev, cid, cli, 0);
4985 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
4987 l5_data.phy_address.lo = cli;
4988 l5_data.phy_address.hi = 0;
4989 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
4990 cid, ETH_CONNECTION_TYPE, &l5_data);
4992 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
4996 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
4997 netdev_err(dev->netdev,
4998 "iSCSI CLIENT_HALT did not complete\n");
4999 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5001 memset(&l5_data, 0, sizeof(l5_data));
5002 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
5003 cid, NONE_CONNECTION_TYPE, &l5_data);
5006 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5009 static int cnic_register_netdev(struct cnic_dev *dev)
5011 struct cnic_local *cp = dev->cnic_priv;
5012 struct cnic_eth_dev *ethdev = cp->ethdev;
5018 if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
5021 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5023 netdev_err(dev->netdev, "register_cnic failed\n");
5028 static void cnic_unregister_netdev(struct cnic_dev *dev)
5030 struct cnic_local *cp = dev->cnic_priv;
5031 struct cnic_eth_dev *ethdev = cp->ethdev;
5036 ethdev->drv_unregister_cnic(dev->netdev);
5039 static int cnic_start_hw(struct cnic_dev *dev)
5041 struct cnic_local *cp = dev->cnic_priv;
5042 struct cnic_eth_dev *ethdev = cp->ethdev;
5045 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5048 dev->regview = ethdev->io_base;
5049 pci_dev_get(dev->pcidev);
5050 cp->func = PCI_FUNC(dev->pcidev->devfn);
5051 cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
5052 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
5054 err = cp->alloc_resc(dev);
5056 netdev_err(dev->netdev, "allocate resource failure\n");
5060 err = cp->start_hw(dev);
5064 err = cnic_cm_open(dev);
5068 set_bit(CNIC_F_CNIC_UP, &dev->flags);
5070 cp->enable_int(dev);
5076 pci_dev_put(dev->pcidev);
5080 static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5082 cnic_disable_bnx2_int_sync(dev);
5084 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5085 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5087 cnic_init_context(dev, KWQ_CID);
5088 cnic_init_context(dev, KCQ_CID);
5090 cnic_setup_5709_context(dev, 0);
5093 cnic_free_resc(dev);
5097 static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5099 struct cnic_local *cp = dev->cnic_priv;
5102 *cp->kcq1.hw_prod_idx_ptr = 0;
5103 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5104 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
5105 CNIC_WR16(dev, cp->kcq1.io_addr, 0);
5106 cnic_free_resc(dev);
5109 static void cnic_stop_hw(struct cnic_dev *dev)
5111 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5112 struct cnic_local *cp = dev->cnic_priv;
5115 /* Need to wait for the ring shutdown event to complete
5116 * before clearing the CNIC_UP flag.
5118 while (cp->udev->uio_dev != -1 && i < 15) {
5122 cnic_shutdown_rings(dev);
5123 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
5124 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
5126 cnic_cm_shutdown(dev);
5128 pci_dev_put(dev->pcidev);
5132 static void cnic_free_dev(struct cnic_dev *dev)
5136 while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5140 if (atomic_read(&dev->ref_count) != 0)
5141 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
5143 netdev_info(dev->netdev, "Removed CNIC device\n");
5144 dev_put(dev->netdev);
5148 static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5149 struct pci_dev *pdev)
5151 struct cnic_dev *cdev;
5152 struct cnic_local *cp;
5155 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
5157 cdev = kzalloc(alloc_size , GFP_KERNEL);
5159 netdev_err(dev, "allocate dev struct failure\n");
5164 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
5165 cdev->register_device = cnic_register_device;
5166 cdev->unregister_device = cnic_unregister_device;
5167 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
5169 cp = cdev->cnic_priv;
5171 cp->l2_single_buf_size = 0x400;
5172 cp->l2_rx_ring_size = 3;
5174 spin_lock_init(&cp->cnic_ulp_lock);
5176 netdev_info(dev, "Added CNIC device\n");
5181 static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5183 struct pci_dev *pdev;
5184 struct cnic_dev *cdev;
5185 struct cnic_local *cp;
5186 struct cnic_eth_dev *ethdev = NULL;
5187 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
5189 probe = symbol_get(bnx2_cnic_probe);
5191 ethdev = (*probe)(dev);
5192 symbol_put(bnx2_cnic_probe);
5197 pdev = ethdev->pdev;
5203 if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5204 pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5205 (pdev->revision < 0x10)) {
5211 cdev = cnic_alloc_dev(dev, pdev);
5215 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
5216 cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
5218 cp = cdev->cnic_priv;
5219 cp->ethdev = ethdev;
5220 cdev->pcidev = pdev;
5221 cp->chip_id = ethdev->chip_id;
5223 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5225 cp->cnic_ops = &cnic_bnx2_ops;
5226 cp->start_hw = cnic_start_bnx2_hw;
5227 cp->stop_hw = cnic_stop_bnx2_hw;
5228 cp->setup_pgtbl = cnic_setup_page_tbl;
5229 cp->alloc_resc = cnic_alloc_bnx2_resc;
5230 cp->free_resc = cnic_free_resc;
5231 cp->start_cm = cnic_cm_init_bnx2_hw;
5232 cp->stop_cm = cnic_cm_stop_bnx2_hw;
5233 cp->enable_int = cnic_enable_bnx2_int;
5234 cp->disable_int_sync = cnic_disable_bnx2_int_sync;
5235 cp->close_conn = cnic_close_bnx2_conn;
5243 static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5245 struct pci_dev *pdev;
5246 struct cnic_dev *cdev;
5247 struct cnic_local *cp;
5248 struct cnic_eth_dev *ethdev = NULL;
5249 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
5251 probe = symbol_get(bnx2x_cnic_probe);
5253 ethdev = (*probe)(dev);
5254 symbol_put(bnx2x_cnic_probe);
5259 pdev = ethdev->pdev;
5264 cdev = cnic_alloc_dev(dev, pdev);
5270 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
5271 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
5273 cp = cdev->cnic_priv;
5274 cp->ethdev = ethdev;
5275 cdev->pcidev = pdev;
5276 cp->chip_id = ethdev->chip_id;
5278 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5279 cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5280 if (BNX2X_CHIP_IS_E2(cp->chip_id) &&
5281 !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
5282 cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5284 memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
5286 cp->cnic_ops = &cnic_bnx2x_ops;
5287 cp->start_hw = cnic_start_bnx2x_hw;
5288 cp->stop_hw = cnic_stop_bnx2x_hw;
5289 cp->setup_pgtbl = cnic_setup_page_tbl_le;
5290 cp->alloc_resc = cnic_alloc_bnx2x_resc;
5291 cp->free_resc = cnic_free_resc;
5292 cp->start_cm = cnic_cm_init_bnx2x_hw;
5293 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5294 cp->enable_int = cnic_enable_bnx2x_int;
5295 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
5296 if (BNX2X_CHIP_IS_E2(cp->chip_id))
5297 cp->ack_int = cnic_ack_bnx2x_e2_msix;
5299 cp->ack_int = cnic_ack_bnx2x_msix;
5300 cp->close_conn = cnic_close_bnx2x_conn;
5304 static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5306 struct ethtool_drvinfo drvinfo;
5307 struct cnic_dev *cdev = NULL;
5309 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5310 memset(&drvinfo, 0, sizeof(drvinfo));
5311 dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5313 if (!strcmp(drvinfo.driver, "bnx2"))
5314 cdev = init_bnx2_cnic(dev);
5315 if (!strcmp(drvinfo.driver, "bnx2x"))
5316 cdev = init_bnx2x_cnic(dev);
5318 write_lock(&cnic_dev_lock);
5319 list_add(&cdev->list, &cnic_dev_list);
5320 write_unlock(&cnic_dev_lock);
5327 * netdev event handler
5329 static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5332 struct net_device *netdev = ptr;
5333 struct cnic_dev *dev;
5337 dev = cnic_from_netdev(netdev);
5339 if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) {
5340 /* Check for the hot-plug device */
5341 dev = is_cnic_dev(netdev);
5348 struct cnic_local *cp = dev->cnic_priv;
5352 else if (event == NETDEV_UNREGISTER)
5355 if (event == NETDEV_UP || (new_dev && netif_running(netdev))) {
5356 if (cnic_register_netdev(dev) != 0) {
5360 if (!cnic_start_hw(dev))
5361 cnic_ulp_start(dev);
5365 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5366 struct cnic_ulp_ops *ulp_ops;
5369 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
5370 if (!ulp_ops || !ulp_ops->indicate_netevent)
5373 ctx = cp->ulp_handle[if_type];
5375 ulp_ops->indicate_netevent(ctx, event);
5379 if (event == NETDEV_GOING_DOWN) {
5382 cnic_unregister_netdev(dev);
5383 } else if (event == NETDEV_UNREGISTER) {
5384 write_lock(&cnic_dev_lock);
5385 list_del_init(&dev->list);
5386 write_unlock(&cnic_dev_lock);
5398 static struct notifier_block cnic_netdev_notifier = {
5399 .notifier_call = cnic_netdev_event
5402 static void cnic_release(void)
5404 struct cnic_dev *dev;
5405 struct cnic_uio_dev *udev;
5407 while (!list_empty(&cnic_dev_list)) {
5408 dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
5409 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5415 cnic_unregister_netdev(dev);
5416 list_del_init(&dev->list);
5419 while (!list_empty(&cnic_udev_list)) {
5420 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5422 cnic_free_uio(udev);
5426 static int __init cnic_init(void)
5430 pr_info("%s", version);
5432 rc = register_netdevice_notifier(&cnic_netdev_notifier);
5438 cnic_wq = create_singlethread_workqueue("cnic_wq");
5441 unregister_netdevice_notifier(&cnic_netdev_notifier);
5448 static void __exit cnic_exit(void)
5450 unregister_netdevice_notifier(&cnic_netdev_notifier);
5452 destroy_workqueue(cnic_wq);
5455 module_init(cnic_init);
5456 module_exit(cnic_exit);