1 /* cnic.c: Broadcom CNIC core network driver.
3 * Copyright (c) 2006-2012 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/list.h>
20 #include <linux/slab.h>
21 #include <linux/pci.h>
22 #include <linux/init.h>
23 #include <linux/netdevice.h>
24 #include <linux/uio_driver.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/delay.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_vlan.h>
30 #include <linux/prefetch.h>
31 #include <linux/random.h>
32 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
37 #include <net/route.h>
39 #include <net/ip6_route.h>
40 #include <net/ip6_checksum.h>
41 #include <scsi/iscsi_if.h>
46 #include "bnx2x/bnx2x.h"
47 #include "bnx2x/bnx2x_reg.h"
48 #include "bnx2x/bnx2x_fw_defs.h"
49 #include "bnx2x/bnx2x_hsi.h"
50 #include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
51 #include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
52 #include "../../../scsi/bnx2fc/bnx2fc_constants.h"
54 #include "cnic_defs.h"
56 #define CNIC_MODULE_NAME "cnic"
58 static char version
[] =
59 "Broadcom NetXtreme II CNIC Driver " CNIC_MODULE_NAME
" v" CNIC_MODULE_VERSION
" (" CNIC_MODULE_RELDATE
")\n";
61 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
62 "Chen (zongxi@broadcom.com");
63 MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
64 MODULE_LICENSE("GPL");
65 MODULE_VERSION(CNIC_MODULE_VERSION
);
67 /* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
68 static LIST_HEAD(cnic_dev_list
);
69 static LIST_HEAD(cnic_udev_list
);
70 static DEFINE_RWLOCK(cnic_dev_lock
);
71 static DEFINE_MUTEX(cnic_lock
);
73 static struct cnic_ulp_ops __rcu
*cnic_ulp_tbl
[MAX_CNIC_ULP_TYPE
];
75 /* helper function, assuming cnic_lock is held */
76 static inline struct cnic_ulp_ops
*cnic_ulp_tbl_prot(int type
)
78 return rcu_dereference_protected(cnic_ulp_tbl
[type
],
79 lockdep_is_held(&cnic_lock
));
82 static int cnic_service_bnx2(void *, void *);
83 static int cnic_service_bnx2x(void *, void *);
84 static int cnic_ctl(void *, struct cnic_ctl_info
*);
86 static struct cnic_ops cnic_bnx2_ops
= {
87 .cnic_owner
= THIS_MODULE
,
88 .cnic_handler
= cnic_service_bnx2
,
92 static struct cnic_ops cnic_bnx2x_ops
= {
93 .cnic_owner
= THIS_MODULE
,
94 .cnic_handler
= cnic_service_bnx2x
,
98 static struct workqueue_struct
*cnic_wq
;
100 static void cnic_shutdown_rings(struct cnic_dev
*);
101 static void cnic_init_rings(struct cnic_dev
*);
102 static int cnic_cm_set_pg(struct cnic_sock
*);
104 static int cnic_uio_open(struct uio_info
*uinfo
, struct inode
*inode
)
106 struct cnic_uio_dev
*udev
= uinfo
->priv
;
107 struct cnic_dev
*dev
;
109 if (!capable(CAP_NET_ADMIN
))
112 if (udev
->uio_dev
!= -1)
118 if (!dev
|| !test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
123 udev
->uio_dev
= iminor(inode
);
125 cnic_shutdown_rings(dev
);
126 cnic_init_rings(dev
);
132 static int cnic_uio_close(struct uio_info
*uinfo
, struct inode
*inode
)
134 struct cnic_uio_dev
*udev
= uinfo
->priv
;
140 static inline void cnic_hold(struct cnic_dev
*dev
)
142 atomic_inc(&dev
->ref_count
);
145 static inline void cnic_put(struct cnic_dev
*dev
)
147 atomic_dec(&dev
->ref_count
);
150 static inline void csk_hold(struct cnic_sock
*csk
)
152 atomic_inc(&csk
->ref_count
);
155 static inline void csk_put(struct cnic_sock
*csk
)
157 atomic_dec(&csk
->ref_count
);
160 static struct cnic_dev
*cnic_from_netdev(struct net_device
*netdev
)
162 struct cnic_dev
*cdev
;
164 read_lock(&cnic_dev_lock
);
165 list_for_each_entry(cdev
, &cnic_dev_list
, list
) {
166 if (netdev
== cdev
->netdev
) {
168 read_unlock(&cnic_dev_lock
);
172 read_unlock(&cnic_dev_lock
);
176 static inline void ulp_get(struct cnic_ulp_ops
*ulp_ops
)
178 atomic_inc(&ulp_ops
->ref_count
);
181 static inline void ulp_put(struct cnic_ulp_ops
*ulp_ops
)
183 atomic_dec(&ulp_ops
->ref_count
);
186 static void cnic_ctx_wr(struct cnic_dev
*dev
, u32 cid_addr
, u32 off
, u32 val
)
188 struct cnic_local
*cp
= dev
->cnic_priv
;
189 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
190 struct drv_ctl_info info
;
191 struct drv_ctl_io
*io
= &info
.data
.io
;
193 info
.cmd
= DRV_CTL_CTX_WR_CMD
;
194 io
->cid_addr
= cid_addr
;
197 ethdev
->drv_ctl(dev
->netdev
, &info
);
200 static void cnic_ctx_tbl_wr(struct cnic_dev
*dev
, u32 off
, dma_addr_t addr
)
202 struct cnic_local
*cp
= dev
->cnic_priv
;
203 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
204 struct drv_ctl_info info
;
205 struct drv_ctl_io
*io
= &info
.data
.io
;
207 info
.cmd
= DRV_CTL_CTXTBL_WR_CMD
;
210 ethdev
->drv_ctl(dev
->netdev
, &info
);
213 static void cnic_ring_ctl(struct cnic_dev
*dev
, u32 cid
, u32 cl_id
, int start
)
215 struct cnic_local
*cp
= dev
->cnic_priv
;
216 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
217 struct drv_ctl_info info
;
218 struct drv_ctl_l2_ring
*ring
= &info
.data
.ring
;
221 info
.cmd
= DRV_CTL_START_L2_CMD
;
223 info
.cmd
= DRV_CTL_STOP_L2_CMD
;
226 ring
->client_id
= cl_id
;
227 ethdev
->drv_ctl(dev
->netdev
, &info
);
230 static void cnic_reg_wr_ind(struct cnic_dev
*dev
, u32 off
, u32 val
)
232 struct cnic_local
*cp
= dev
->cnic_priv
;
233 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
234 struct drv_ctl_info info
;
235 struct drv_ctl_io
*io
= &info
.data
.io
;
237 info
.cmd
= DRV_CTL_IO_WR_CMD
;
240 ethdev
->drv_ctl(dev
->netdev
, &info
);
243 static u32
cnic_reg_rd_ind(struct cnic_dev
*dev
, u32 off
)
245 struct cnic_local
*cp
= dev
->cnic_priv
;
246 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
247 struct drv_ctl_info info
;
248 struct drv_ctl_io
*io
= &info
.data
.io
;
250 info
.cmd
= DRV_CTL_IO_RD_CMD
;
252 ethdev
->drv_ctl(dev
->netdev
, &info
);
256 static void cnic_ulp_ctl(struct cnic_dev
*dev
, int ulp_type
, bool reg
)
258 struct cnic_local
*cp
= dev
->cnic_priv
;
259 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
260 struct drv_ctl_info info
;
261 struct fcoe_capabilities
*fcoe_cap
=
262 &info
.data
.register_data
.fcoe_features
;
265 info
.cmd
= DRV_CTL_ULP_REGISTER_CMD
;
266 if (ulp_type
== CNIC_ULP_FCOE
&& dev
->fcoe_cap
)
267 memcpy(fcoe_cap
, dev
->fcoe_cap
, sizeof(*fcoe_cap
));
269 info
.cmd
= DRV_CTL_ULP_UNREGISTER_CMD
;
272 info
.data
.ulp_type
= ulp_type
;
273 ethdev
->drv_ctl(dev
->netdev
, &info
);
276 static int cnic_in_use(struct cnic_sock
*csk
)
278 return test_bit(SK_F_INUSE
, &csk
->flags
);
281 static void cnic_spq_completion(struct cnic_dev
*dev
, int cmd
, u32 count
)
283 struct cnic_local
*cp
= dev
->cnic_priv
;
284 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
285 struct drv_ctl_info info
;
288 info
.data
.credit
.credit_count
= count
;
289 ethdev
->drv_ctl(dev
->netdev
, &info
);
292 static int cnic_get_l5_cid(struct cnic_local
*cp
, u32 cid
, u32
*l5_cid
)
299 for (i
= 0; i
< cp
->max_cid_space
; i
++) {
300 if (cp
->ctx_tbl
[i
].cid
== cid
) {
308 static int cnic_send_nlmsg(struct cnic_local
*cp
, u32 type
,
309 struct cnic_sock
*csk
)
311 struct iscsi_path path_req
;
314 u32 msg_type
= ISCSI_KEVENT_IF_DOWN
;
315 struct cnic_ulp_ops
*ulp_ops
;
316 struct cnic_uio_dev
*udev
= cp
->udev
;
317 int rc
= 0, retry
= 0;
319 if (!udev
|| udev
->uio_dev
== -1)
323 len
= sizeof(path_req
);
324 buf
= (char *) &path_req
;
325 memset(&path_req
, 0, len
);
327 msg_type
= ISCSI_KEVENT_PATH_REQ
;
328 path_req
.handle
= (u64
) csk
->l5_cid
;
329 if (test_bit(SK_F_IPV6
, &csk
->flags
)) {
330 memcpy(&path_req
.dst
.v6_addr
, &csk
->dst_ip
[0],
331 sizeof(struct in6_addr
));
332 path_req
.ip_addr_len
= 16;
334 memcpy(&path_req
.dst
.v4_addr
, &csk
->dst_ip
[0],
335 sizeof(struct in_addr
));
336 path_req
.ip_addr_len
= 4;
338 path_req
.vlan_id
= csk
->vlan_id
;
339 path_req
.pmtu
= csk
->mtu
;
345 ulp_ops
= rcu_dereference(cnic_ulp_tbl
[CNIC_ULP_ISCSI
]);
347 rc
= ulp_ops
->iscsi_nl_send_msg(
348 cp
->ulp_handle
[CNIC_ULP_ISCSI
],
351 if (rc
== 0 || msg_type
!= ISCSI_KEVENT_PATH_REQ
)
360 static void cnic_cm_upcall(struct cnic_local
*, struct cnic_sock
*, u8
);
362 static int cnic_iscsi_nl_msg_recv(struct cnic_dev
*dev
, u32 msg_type
,
368 case ISCSI_UEVENT_PATH_UPDATE
: {
369 struct cnic_local
*cp
;
371 struct cnic_sock
*csk
;
372 struct iscsi_path
*path_resp
;
374 if (len
< sizeof(*path_resp
))
377 path_resp
= (struct iscsi_path
*) buf
;
379 l5_cid
= (u32
) path_resp
->handle
;
380 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
384 if (!rcu_dereference(cp
->ulp_ops
[CNIC_ULP_L4
])) {
389 csk
= &cp
->csk_tbl
[l5_cid
];
391 if (cnic_in_use(csk
) &&
392 test_bit(SK_F_CONNECT_START
, &csk
->flags
)) {
394 csk
->vlan_id
= path_resp
->vlan_id
;
396 memcpy(csk
->ha
, path_resp
->mac_addr
, 6);
397 if (test_bit(SK_F_IPV6
, &csk
->flags
))
398 memcpy(&csk
->src_ip
[0], &path_resp
->src
.v6_addr
,
399 sizeof(struct in6_addr
));
401 memcpy(&csk
->src_ip
[0], &path_resp
->src
.v4_addr
,
402 sizeof(struct in_addr
));
404 if (is_valid_ether_addr(csk
->ha
)) {
406 } else if (!test_bit(SK_F_OFFLD_SCHED
, &csk
->flags
) &&
407 !test_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
409 cnic_cm_upcall(cp
, csk
,
410 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
);
411 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
423 static int cnic_offld_prep(struct cnic_sock
*csk
)
425 if (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
428 if (!test_bit(SK_F_CONNECT_START
, &csk
->flags
)) {
429 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
436 static int cnic_close_prep(struct cnic_sock
*csk
)
438 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
439 smp_mb__after_clear_bit();
441 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
442 while (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
450 static int cnic_abort_prep(struct cnic_sock
*csk
)
452 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
453 smp_mb__after_clear_bit();
455 while (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
458 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
459 csk
->state
= L4_KCQE_OPCODE_VALUE_RESET_COMP
;
466 int cnic_register_driver(int ulp_type
, struct cnic_ulp_ops
*ulp_ops
)
468 struct cnic_dev
*dev
;
470 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
471 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
474 mutex_lock(&cnic_lock
);
475 if (cnic_ulp_tbl_prot(ulp_type
)) {
476 pr_err("%s: Type %d has already been registered\n",
478 mutex_unlock(&cnic_lock
);
482 read_lock(&cnic_dev_lock
);
483 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
484 struct cnic_local
*cp
= dev
->cnic_priv
;
486 clear_bit(ULP_F_INIT
, &cp
->ulp_flags
[ulp_type
]);
488 read_unlock(&cnic_dev_lock
);
490 atomic_set(&ulp_ops
->ref_count
, 0);
491 rcu_assign_pointer(cnic_ulp_tbl
[ulp_type
], ulp_ops
);
492 mutex_unlock(&cnic_lock
);
494 /* Prevent race conditions with netdev_event */
496 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
497 struct cnic_local
*cp
= dev
->cnic_priv
;
499 if (!test_and_set_bit(ULP_F_INIT
, &cp
->ulp_flags
[ulp_type
]))
500 ulp_ops
->cnic_init(dev
);
507 int cnic_unregister_driver(int ulp_type
)
509 struct cnic_dev
*dev
;
510 struct cnic_ulp_ops
*ulp_ops
;
513 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
514 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
517 mutex_lock(&cnic_lock
);
518 ulp_ops
= cnic_ulp_tbl_prot(ulp_type
);
520 pr_err("%s: Type %d has not been registered\n",
524 read_lock(&cnic_dev_lock
);
525 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
526 struct cnic_local
*cp
= dev
->cnic_priv
;
528 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
529 pr_err("%s: Type %d still has devices registered\n",
531 read_unlock(&cnic_dev_lock
);
535 read_unlock(&cnic_dev_lock
);
537 RCU_INIT_POINTER(cnic_ulp_tbl
[ulp_type
], NULL
);
539 mutex_unlock(&cnic_lock
);
541 while ((atomic_read(&ulp_ops
->ref_count
) != 0) && (i
< 20)) {
546 if (atomic_read(&ulp_ops
->ref_count
) != 0)
547 pr_warn("%s: Failed waiting for ref count to go to zero\n",
552 mutex_unlock(&cnic_lock
);
556 static int cnic_start_hw(struct cnic_dev
*);
557 static void cnic_stop_hw(struct cnic_dev
*);
559 static int cnic_register_device(struct cnic_dev
*dev
, int ulp_type
,
562 struct cnic_local
*cp
= dev
->cnic_priv
;
563 struct cnic_ulp_ops
*ulp_ops
;
565 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
566 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
569 mutex_lock(&cnic_lock
);
570 if (cnic_ulp_tbl_prot(ulp_type
) == NULL
) {
571 pr_err("%s: Driver with type %d has not been registered\n",
573 mutex_unlock(&cnic_lock
);
576 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
577 pr_err("%s: Type %d has already been registered to this device\n",
579 mutex_unlock(&cnic_lock
);
583 clear_bit(ULP_F_START
, &cp
->ulp_flags
[ulp_type
]);
584 cp
->ulp_handle
[ulp_type
] = ulp_ctx
;
585 ulp_ops
= cnic_ulp_tbl_prot(ulp_type
);
586 rcu_assign_pointer(cp
->ulp_ops
[ulp_type
], ulp_ops
);
589 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
590 if (!test_and_set_bit(ULP_F_START
, &cp
->ulp_flags
[ulp_type
]))
591 ulp_ops
->cnic_start(cp
->ulp_handle
[ulp_type
]);
593 mutex_unlock(&cnic_lock
);
595 cnic_ulp_ctl(dev
, ulp_type
, true);
600 EXPORT_SYMBOL(cnic_register_driver
);
602 static int cnic_unregister_device(struct cnic_dev
*dev
, int ulp_type
)
604 struct cnic_local
*cp
= dev
->cnic_priv
;
607 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
608 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
611 mutex_lock(&cnic_lock
);
612 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
613 RCU_INIT_POINTER(cp
->ulp_ops
[ulp_type
], NULL
);
616 pr_err("%s: device not registered to this ulp type %d\n",
618 mutex_unlock(&cnic_lock
);
621 mutex_unlock(&cnic_lock
);
623 if (ulp_type
== CNIC_ULP_ISCSI
)
624 cnic_send_nlmsg(cp
, ISCSI_KEVENT_IF_DOWN
, NULL
);
625 else if (ulp_type
== CNIC_ULP_FCOE
)
626 dev
->fcoe_cap
= NULL
;
630 while (test_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[ulp_type
]) &&
635 if (test_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[ulp_type
]))
636 netdev_warn(dev
->netdev
, "Failed waiting for ULP up call to complete\n");
638 cnic_ulp_ctl(dev
, ulp_type
, false);
642 EXPORT_SYMBOL(cnic_unregister_driver
);
644 static int cnic_init_id_tbl(struct cnic_id_tbl
*id_tbl
, u32 size
, u32 start_id
,
647 id_tbl
->start
= start_id
;
650 spin_lock_init(&id_tbl
->lock
);
651 id_tbl
->table
= kzalloc(DIV_ROUND_UP(size
, 32) * 4, GFP_KERNEL
);
658 static void cnic_free_id_tbl(struct cnic_id_tbl
*id_tbl
)
660 kfree(id_tbl
->table
);
661 id_tbl
->table
= NULL
;
664 static int cnic_alloc_id(struct cnic_id_tbl
*id_tbl
, u32 id
)
669 if (id
>= id_tbl
->max
)
672 spin_lock(&id_tbl
->lock
);
673 if (!test_bit(id
, id_tbl
->table
)) {
674 set_bit(id
, id_tbl
->table
);
677 spin_unlock(&id_tbl
->lock
);
681 /* Returns -1 if not successful */
682 static u32
cnic_alloc_new_id(struct cnic_id_tbl
*id_tbl
)
686 spin_lock(&id_tbl
->lock
);
687 id
= find_next_zero_bit(id_tbl
->table
, id_tbl
->max
, id_tbl
->next
);
688 if (id
>= id_tbl
->max
) {
690 if (id_tbl
->next
!= 0) {
691 id
= find_first_zero_bit(id_tbl
->table
, id_tbl
->next
);
692 if (id
>= id_tbl
->next
)
697 if (id
< id_tbl
->max
) {
698 set_bit(id
, id_tbl
->table
);
699 id_tbl
->next
= (id
+ 1) & (id_tbl
->max
- 1);
703 spin_unlock(&id_tbl
->lock
);
708 static void cnic_free_id(struct cnic_id_tbl
*id_tbl
, u32 id
)
714 if (id
>= id_tbl
->max
)
717 clear_bit(id
, id_tbl
->table
);
720 static void cnic_free_dma(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
727 for (i
= 0; i
< dma
->num_pages
; i
++) {
728 if (dma
->pg_arr
[i
]) {
729 dma_free_coherent(&dev
->pcidev
->dev
, BNX2_PAGE_SIZE
,
730 dma
->pg_arr
[i
], dma
->pg_map_arr
[i
]);
731 dma
->pg_arr
[i
] = NULL
;
735 dma_free_coherent(&dev
->pcidev
->dev
, dma
->pgtbl_size
,
736 dma
->pgtbl
, dma
->pgtbl_map
);
744 static void cnic_setup_page_tbl(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
747 __le32
*page_table
= (__le32
*) dma
->pgtbl
;
749 for (i
= 0; i
< dma
->num_pages
; i
++) {
750 /* Each entry needs to be in big endian format. */
751 *page_table
= cpu_to_le32((u64
) dma
->pg_map_arr
[i
] >> 32);
753 *page_table
= cpu_to_le32(dma
->pg_map_arr
[i
] & 0xffffffff);
758 static void cnic_setup_page_tbl_le(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
761 __le32
*page_table
= (__le32
*) dma
->pgtbl
;
763 for (i
= 0; i
< dma
->num_pages
; i
++) {
764 /* Each entry needs to be in little endian format. */
765 *page_table
= cpu_to_le32(dma
->pg_map_arr
[i
] & 0xffffffff);
767 *page_table
= cpu_to_le32((u64
) dma
->pg_map_arr
[i
] >> 32);
772 static int cnic_alloc_dma(struct cnic_dev
*dev
, struct cnic_dma
*dma
,
773 int pages
, int use_pg_tbl
)
776 struct cnic_local
*cp
= dev
->cnic_priv
;
778 size
= pages
* (sizeof(void *) + sizeof(dma_addr_t
));
779 dma
->pg_arr
= kzalloc(size
, GFP_ATOMIC
);
780 if (dma
->pg_arr
== NULL
)
783 dma
->pg_map_arr
= (dma_addr_t
*) (dma
->pg_arr
+ pages
);
784 dma
->num_pages
= pages
;
786 for (i
= 0; i
< pages
; i
++) {
787 dma
->pg_arr
[i
] = dma_alloc_coherent(&dev
->pcidev
->dev
,
791 if (dma
->pg_arr
[i
] == NULL
)
797 dma
->pgtbl_size
= ((pages
* 8) + BNX2_PAGE_SIZE
- 1) &
798 ~(BNX2_PAGE_SIZE
- 1);
799 dma
->pgtbl
= dma_alloc_coherent(&dev
->pcidev
->dev
, dma
->pgtbl_size
,
800 &dma
->pgtbl_map
, GFP_ATOMIC
);
801 if (dma
->pgtbl
== NULL
)
804 cp
->setup_pgtbl(dev
, dma
);
809 cnic_free_dma(dev
, dma
);
813 static void cnic_free_context(struct cnic_dev
*dev
)
815 struct cnic_local
*cp
= dev
->cnic_priv
;
818 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
819 if (cp
->ctx_arr
[i
].ctx
) {
820 dma_free_coherent(&dev
->pcidev
->dev
, cp
->ctx_blk_size
,
822 cp
->ctx_arr
[i
].mapping
);
823 cp
->ctx_arr
[i
].ctx
= NULL
;
828 static void __cnic_free_uio_rings(struct cnic_uio_dev
*udev
)
831 dma_free_coherent(&udev
->pdev
->dev
, udev
->l2_buf_size
,
832 udev
->l2_buf
, udev
->l2_buf_map
);
837 dma_free_coherent(&udev
->pdev
->dev
, udev
->l2_ring_size
,
838 udev
->l2_ring
, udev
->l2_ring_map
);
839 udev
->l2_ring
= NULL
;
844 static void __cnic_free_uio(struct cnic_uio_dev
*udev
)
846 uio_unregister_device(&udev
->cnic_uinfo
);
848 __cnic_free_uio_rings(udev
);
850 pci_dev_put(udev
->pdev
);
854 static void cnic_free_uio(struct cnic_uio_dev
*udev
)
859 write_lock(&cnic_dev_lock
);
860 list_del_init(&udev
->list
);
861 write_unlock(&cnic_dev_lock
);
862 __cnic_free_uio(udev
);
865 static void cnic_free_resc(struct cnic_dev
*dev
)
867 struct cnic_local
*cp
= dev
->cnic_priv
;
868 struct cnic_uio_dev
*udev
= cp
->udev
;
873 if (udev
->uio_dev
== -1)
874 __cnic_free_uio_rings(udev
);
877 cnic_free_context(dev
);
882 cnic_free_dma(dev
, &cp
->gbl_buf_info
);
883 cnic_free_dma(dev
, &cp
->kwq_info
);
884 cnic_free_dma(dev
, &cp
->kwq_16_data_info
);
885 cnic_free_dma(dev
, &cp
->kcq2
.dma
);
886 cnic_free_dma(dev
, &cp
->kcq1
.dma
);
887 kfree(cp
->iscsi_tbl
);
888 cp
->iscsi_tbl
= NULL
;
892 cnic_free_id_tbl(&cp
->fcoe_cid_tbl
);
893 cnic_free_id_tbl(&cp
->cid_tbl
);
896 static int cnic_alloc_context(struct cnic_dev
*dev
)
898 struct cnic_local
*cp
= dev
->cnic_priv
;
900 if (BNX2_CHIP(cp
) == BNX2_CHIP_5709
) {
903 cp
->ctx_blk_size
= BNX2_PAGE_SIZE
;
904 cp
->cids_per_blk
= BNX2_PAGE_SIZE
/ 128;
905 arr_size
= BNX2_MAX_CID
/ cp
->cids_per_blk
*
906 sizeof(struct cnic_ctx
);
907 cp
->ctx_arr
= kzalloc(arr_size
, GFP_KERNEL
);
908 if (cp
->ctx_arr
== NULL
)
912 for (i
= 0; i
< 2; i
++) {
913 u32 j
, reg
, off
, lo
, hi
;
916 off
= BNX2_PG_CTX_MAP
;
918 off
= BNX2_ISCSI_CTX_MAP
;
920 reg
= cnic_reg_rd_ind(dev
, off
);
923 for (j
= lo
; j
< hi
; j
+= cp
->cids_per_blk
, k
++)
924 cp
->ctx_arr
[k
].cid
= j
;
928 if (cp
->ctx_blks
>= (BNX2_MAX_CID
/ cp
->cids_per_blk
)) {
933 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
935 dma_alloc_coherent(&dev
->pcidev
->dev
,
937 &cp
->ctx_arr
[i
].mapping
,
939 if (cp
->ctx_arr
[i
].ctx
== NULL
)
946 static u16
cnic_bnx2_next_idx(u16 idx
)
951 static u16
cnic_bnx2_hw_idx(u16 idx
)
956 static u16
cnic_bnx2x_next_idx(u16 idx
)
959 if ((idx
& MAX_KCQE_CNT
) == MAX_KCQE_CNT
)
965 static u16
cnic_bnx2x_hw_idx(u16 idx
)
967 if ((idx
& MAX_KCQE_CNT
) == MAX_KCQE_CNT
)
972 static int cnic_alloc_kcq(struct cnic_dev
*dev
, struct kcq_info
*info
,
975 int err
, i
, use_page_tbl
= 0;
981 err
= cnic_alloc_dma(dev
, &info
->dma
, KCQ_PAGE_CNT
, use_page_tbl
);
985 kcq
= (struct kcqe
**) info
->dma
.pg_arr
;
988 info
->next_idx
= cnic_bnx2_next_idx
;
989 info
->hw_idx
= cnic_bnx2_hw_idx
;
993 info
->next_idx
= cnic_bnx2x_next_idx
;
994 info
->hw_idx
= cnic_bnx2x_hw_idx
;
996 for (i
= 0; i
< KCQ_PAGE_CNT
; i
++) {
997 struct bnx2x_bd_chain_next
*next
=
998 (struct bnx2x_bd_chain_next
*) &kcq
[i
][MAX_KCQE_CNT
];
1001 if (j
>= KCQ_PAGE_CNT
)
1003 next
->addr_hi
= (u64
) info
->dma
.pg_map_arr
[j
] >> 32;
1004 next
->addr_lo
= info
->dma
.pg_map_arr
[j
] & 0xffffffff;
1009 static int __cnic_alloc_uio_rings(struct cnic_uio_dev
*udev
, int pages
)
1011 struct cnic_local
*cp
= udev
->dev
->cnic_priv
;
1016 udev
->l2_ring_size
= pages
* BNX2_PAGE_SIZE
;
1017 udev
->l2_ring
= dma_alloc_coherent(&udev
->pdev
->dev
, udev
->l2_ring_size
,
1019 GFP_KERNEL
| __GFP_COMP
);
1023 udev
->l2_buf_size
= (cp
->l2_rx_ring_size
+ 1) * cp
->l2_single_buf_size
;
1024 udev
->l2_buf_size
= PAGE_ALIGN(udev
->l2_buf_size
);
1025 udev
->l2_buf
= dma_alloc_coherent(&udev
->pdev
->dev
, udev
->l2_buf_size
,
1027 GFP_KERNEL
| __GFP_COMP
);
1028 if (!udev
->l2_buf
) {
1029 __cnic_free_uio_rings(udev
);
1037 static int cnic_alloc_uio_rings(struct cnic_dev
*dev
, int pages
)
1039 struct cnic_local
*cp
= dev
->cnic_priv
;
1040 struct cnic_uio_dev
*udev
;
1042 read_lock(&cnic_dev_lock
);
1043 list_for_each_entry(udev
, &cnic_udev_list
, list
) {
1044 if (udev
->pdev
== dev
->pcidev
) {
1046 if (__cnic_alloc_uio_rings(udev
, pages
)) {
1048 read_unlock(&cnic_dev_lock
);
1052 read_unlock(&cnic_dev_lock
);
1056 read_unlock(&cnic_dev_lock
);
1058 udev
= kzalloc(sizeof(struct cnic_uio_dev
), GFP_ATOMIC
);
1065 udev
->pdev
= dev
->pcidev
;
1067 if (__cnic_alloc_uio_rings(udev
, pages
))
1070 write_lock(&cnic_dev_lock
);
1071 list_add(&udev
->list
, &cnic_udev_list
);
1072 write_unlock(&cnic_dev_lock
);
1074 pci_dev_get(udev
->pdev
);
1085 static int cnic_init_uio(struct cnic_dev
*dev
)
1087 struct cnic_local
*cp
= dev
->cnic_priv
;
1088 struct cnic_uio_dev
*udev
= cp
->udev
;
1089 struct uio_info
*uinfo
;
1095 uinfo
= &udev
->cnic_uinfo
;
1097 uinfo
->mem
[0].addr
= pci_resource_start(dev
->pcidev
, 0);
1098 uinfo
->mem
[0].internal_addr
= dev
->regview
;
1099 uinfo
->mem
[0].memtype
= UIO_MEM_PHYS
;
1101 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
1102 uinfo
->mem
[0].size
= MB_GET_CID_ADDR(TX_TSS_CID
+
1103 TX_MAX_TSS_RINGS
+ 1);
1104 uinfo
->mem
[1].addr
= (unsigned long) cp
->status_blk
.gen
&
1106 if (cp
->ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
)
1107 uinfo
->mem
[1].size
= BNX2_SBLK_MSIX_ALIGN_SIZE
* 9;
1109 uinfo
->mem
[1].size
= BNX2_SBLK_MSIX_ALIGN_SIZE
;
1111 uinfo
->name
= "bnx2_cnic";
1112 } else if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
)) {
1113 uinfo
->mem
[0].size
= pci_resource_len(dev
->pcidev
, 0);
1115 uinfo
->mem
[1].addr
= (unsigned long) cp
->bnx2x_def_status_blk
&
1117 uinfo
->mem
[1].size
= sizeof(*cp
->bnx2x_def_status_blk
);
1119 uinfo
->name
= "bnx2x_cnic";
1122 uinfo
->mem
[1].memtype
= UIO_MEM_LOGICAL
;
1124 uinfo
->mem
[2].addr
= (unsigned long) udev
->l2_ring
;
1125 uinfo
->mem
[2].size
= udev
->l2_ring_size
;
1126 uinfo
->mem
[2].memtype
= UIO_MEM_LOGICAL
;
1128 uinfo
->mem
[3].addr
= (unsigned long) udev
->l2_buf
;
1129 uinfo
->mem
[3].size
= udev
->l2_buf_size
;
1130 uinfo
->mem
[3].memtype
= UIO_MEM_LOGICAL
;
1132 uinfo
->version
= CNIC_MODULE_VERSION
;
1133 uinfo
->irq
= UIO_IRQ_CUSTOM
;
1135 uinfo
->open
= cnic_uio_open
;
1136 uinfo
->release
= cnic_uio_close
;
1138 if (udev
->uio_dev
== -1) {
1142 ret
= uio_register_device(&udev
->pdev
->dev
, uinfo
);
1145 cnic_init_rings(dev
);
1151 static int cnic_alloc_bnx2_resc(struct cnic_dev
*dev
)
1153 struct cnic_local
*cp
= dev
->cnic_priv
;
1156 ret
= cnic_alloc_dma(dev
, &cp
->kwq_info
, KWQ_PAGE_CNT
, 1);
1159 cp
->kwq
= (struct kwqe
**) cp
->kwq_info
.pg_arr
;
1161 ret
= cnic_alloc_kcq(dev
, &cp
->kcq1
, true);
1165 ret
= cnic_alloc_context(dev
);
1169 ret
= cnic_alloc_uio_rings(dev
, 2);
1173 ret
= cnic_init_uio(dev
);
1180 cnic_free_resc(dev
);
1184 static int cnic_alloc_bnx2x_context(struct cnic_dev
*dev
)
1186 struct cnic_local
*cp
= dev
->cnic_priv
;
1187 int ctx_blk_size
= cp
->ethdev
->ctx_blk_size
;
1188 int total_mem
, blks
, i
;
1190 total_mem
= BNX2X_CONTEXT_MEM_SIZE
* cp
->max_cid_space
;
1191 blks
= total_mem
/ ctx_blk_size
;
1192 if (total_mem
% ctx_blk_size
)
1195 if (blks
> cp
->ethdev
->ctx_tbl_len
)
1198 cp
->ctx_arr
= kcalloc(blks
, sizeof(struct cnic_ctx
), GFP_KERNEL
);
1199 if (cp
->ctx_arr
== NULL
)
1202 cp
->ctx_blks
= blks
;
1203 cp
->ctx_blk_size
= ctx_blk_size
;
1204 if (!BNX2X_CHIP_IS_57710(cp
->chip_id
))
1207 cp
->ctx_align
= ctx_blk_size
;
1209 cp
->cids_per_blk
= ctx_blk_size
/ BNX2X_CONTEXT_MEM_SIZE
;
1211 for (i
= 0; i
< blks
; i
++) {
1212 cp
->ctx_arr
[i
].ctx
=
1213 dma_alloc_coherent(&dev
->pcidev
->dev
, cp
->ctx_blk_size
,
1214 &cp
->ctx_arr
[i
].mapping
,
1216 if (cp
->ctx_arr
[i
].ctx
== NULL
)
1219 if (cp
->ctx_align
&& cp
->ctx_blk_size
== ctx_blk_size
) {
1220 if (cp
->ctx_arr
[i
].mapping
& (cp
->ctx_align
- 1)) {
1221 cnic_free_context(dev
);
1222 cp
->ctx_blk_size
+= cp
->ctx_align
;
1231 static int cnic_alloc_bnx2x_resc(struct cnic_dev
*dev
)
1233 struct cnic_local
*cp
= dev
->cnic_priv
;
1234 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
1235 u32 start_cid
= ethdev
->starting_cid
;
1236 int i
, j
, n
, ret
, pages
;
1237 struct cnic_dma
*kwq_16_dma
= &cp
->kwq_16_data_info
;
1239 cp
->max_cid_space
= MAX_ISCSI_TBL_SZ
;
1240 cp
->iscsi_start_cid
= start_cid
;
1241 cp
->fcoe_start_cid
= start_cid
+ MAX_ISCSI_TBL_SZ
;
1243 if (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
)) {
1244 cp
->max_cid_space
+= dev
->max_fcoe_conn
;
1245 cp
->fcoe_init_cid
= ethdev
->fcoe_init_cid
;
1246 if (!cp
->fcoe_init_cid
)
1247 cp
->fcoe_init_cid
= 0x10;
1250 cp
->iscsi_tbl
= kzalloc(sizeof(struct cnic_iscsi
) * MAX_ISCSI_TBL_SZ
,
1255 cp
->ctx_tbl
= kzalloc(sizeof(struct cnic_context
) *
1256 cp
->max_cid_space
, GFP_KERNEL
);
1260 for (i
= 0; i
< MAX_ISCSI_TBL_SZ
; i
++) {
1261 cp
->ctx_tbl
[i
].proto
.iscsi
= &cp
->iscsi_tbl
[i
];
1262 cp
->ctx_tbl
[i
].ulp_proto_id
= CNIC_ULP_ISCSI
;
1265 for (i
= MAX_ISCSI_TBL_SZ
; i
< cp
->max_cid_space
; i
++)
1266 cp
->ctx_tbl
[i
].ulp_proto_id
= CNIC_ULP_FCOE
;
1268 pages
= PAGE_ALIGN(cp
->max_cid_space
* CNIC_KWQ16_DATA_SIZE
) /
1271 ret
= cnic_alloc_dma(dev
, kwq_16_dma
, pages
, 0);
1275 n
= PAGE_SIZE
/ CNIC_KWQ16_DATA_SIZE
;
1276 for (i
= 0, j
= 0; i
< cp
->max_cid_space
; i
++) {
1277 long off
= CNIC_KWQ16_DATA_SIZE
* (i
% n
);
1279 cp
->ctx_tbl
[i
].kwqe_data
= kwq_16_dma
->pg_arr
[j
] + off
;
1280 cp
->ctx_tbl
[i
].kwqe_data_mapping
= kwq_16_dma
->pg_map_arr
[j
] +
1283 if ((i
% n
) == (n
- 1))
1287 ret
= cnic_alloc_kcq(dev
, &cp
->kcq1
, false);
1291 if (CNIC_SUPPORTS_FCOE(cp
)) {
1292 ret
= cnic_alloc_kcq(dev
, &cp
->kcq2
, true);
1297 pages
= PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE
) / PAGE_SIZE
;
1298 ret
= cnic_alloc_dma(dev
, &cp
->gbl_buf_info
, pages
, 0);
1302 ret
= cnic_alloc_bnx2x_context(dev
);
1306 if (cp
->ethdev
->drv_state
& CNIC_DRV_STATE_NO_ISCSI
)
1309 cp
->bnx2x_def_status_blk
= cp
->ethdev
->irq_arr
[1].status_blk
;
1311 cp
->l2_rx_ring_size
= 15;
1313 ret
= cnic_alloc_uio_rings(dev
, 4);
1317 ret
= cnic_init_uio(dev
);
1324 cnic_free_resc(dev
);
1328 static inline u32
cnic_kwq_avail(struct cnic_local
*cp
)
1330 return cp
->max_kwq_idx
-
1331 ((cp
->kwq_prod_idx
- cp
->kwq_con_idx
) & cp
->max_kwq_idx
);
1334 static int cnic_submit_bnx2_kwqes(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1337 struct cnic_local
*cp
= dev
->cnic_priv
;
1338 struct kwqe
*prod_qe
;
1339 u16 prod
, sw_prod
, i
;
1341 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
1342 return -EAGAIN
; /* bnx2 is down */
1344 spin_lock_bh(&cp
->cnic_ulp_lock
);
1345 if (num_wqes
> cnic_kwq_avail(cp
) &&
1346 !test_bit(CNIC_LCL_FL_KWQ_INIT
, &cp
->cnic_local_flags
)) {
1347 spin_unlock_bh(&cp
->cnic_ulp_lock
);
1351 clear_bit(CNIC_LCL_FL_KWQ_INIT
, &cp
->cnic_local_flags
);
1353 prod
= cp
->kwq_prod_idx
;
1354 sw_prod
= prod
& MAX_KWQ_IDX
;
1355 for (i
= 0; i
< num_wqes
; i
++) {
1356 prod_qe
= &cp
->kwq
[KWQ_PG(sw_prod
)][KWQ_IDX(sw_prod
)];
1357 memcpy(prod_qe
, wqes
[i
], sizeof(struct kwqe
));
1359 sw_prod
= prod
& MAX_KWQ_IDX
;
1361 cp
->kwq_prod_idx
= prod
;
1363 CNIC_WR16(dev
, cp
->kwq_io_addr
, cp
->kwq_prod_idx
);
1365 spin_unlock_bh(&cp
->cnic_ulp_lock
);
1369 static void *cnic_get_kwqe_16_data(struct cnic_local
*cp
, u32 l5_cid
,
1370 union l5cm_specific_data
*l5_data
)
1372 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1375 map
= ctx
->kwqe_data_mapping
;
1376 l5_data
->phy_address
.lo
= (u64
) map
& 0xffffffff;
1377 l5_data
->phy_address
.hi
= (u64
) map
>> 32;
1378 return ctx
->kwqe_data
;
1381 static int cnic_submit_kwqe_16(struct cnic_dev
*dev
, u32 cmd
, u32 cid
,
1382 u32 type
, union l5cm_specific_data
*l5_data
)
1384 struct cnic_local
*cp
= dev
->cnic_priv
;
1385 struct l5cm_spe kwqe
;
1386 struct kwqe_16
*kwq
[1];
1390 kwqe
.hdr
.conn_and_cmd_data
=
1391 cpu_to_le32(((cmd
<< SPE_HDR_CMD_ID_SHIFT
) |
1392 BNX2X_HW_CID(cp
, cid
)));
1394 type_16
= (type
<< SPE_HDR_CONN_TYPE_SHIFT
) & SPE_HDR_CONN_TYPE
;
1395 type_16
|= (cp
->pfid
<< SPE_HDR_FUNCTION_ID_SHIFT
) &
1396 SPE_HDR_FUNCTION_ID
;
1398 kwqe
.hdr
.type
= cpu_to_le16(type_16
);
1399 kwqe
.hdr
.reserved1
= 0;
1400 kwqe
.data
.phy_address
.lo
= cpu_to_le32(l5_data
->phy_address
.lo
);
1401 kwqe
.data
.phy_address
.hi
= cpu_to_le32(l5_data
->phy_address
.hi
);
1403 kwq
[0] = (struct kwqe_16
*) &kwqe
;
1405 spin_lock_bh(&cp
->cnic_ulp_lock
);
1406 ret
= cp
->ethdev
->drv_submit_kwqes_16(dev
->netdev
, kwq
, 1);
1407 spin_unlock_bh(&cp
->cnic_ulp_lock
);
1415 static void cnic_reply_bnx2x_kcqes(struct cnic_dev
*dev
, int ulp_type
,
1416 struct kcqe
*cqes
[], u32 num_cqes
)
1418 struct cnic_local
*cp
= dev
->cnic_priv
;
1419 struct cnic_ulp_ops
*ulp_ops
;
1422 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
1423 if (likely(ulp_ops
)) {
1424 ulp_ops
->indicate_kcqes(cp
->ulp_handle
[ulp_type
],
1430 static int cnic_bnx2x_iscsi_init1(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1432 struct cnic_local
*cp
= dev
->cnic_priv
;
1433 struct bnx2x
*bp
= netdev_priv(dev
->netdev
);
1434 struct iscsi_kwqe_init1
*req1
= (struct iscsi_kwqe_init1
*) kwqe
;
1436 u32 pfid
= cp
->pfid
;
1438 cp
->num_iscsi_tasks
= req1
->num_tasks_per_conn
;
1439 cp
->num_ccells
= req1
->num_ccells_per_conn
;
1440 cp
->task_array_size
= BNX2X_ISCSI_TASK_CONTEXT_SIZE
*
1441 cp
->num_iscsi_tasks
;
1442 cp
->r2tq_size
= cp
->num_iscsi_tasks
* BNX2X_ISCSI_MAX_PENDING_R2TS
*
1443 BNX2X_ISCSI_R2TQE_SIZE
;
1444 cp
->hq_size
= cp
->num_ccells
* BNX2X_ISCSI_HQ_BD_SIZE
;
1445 pages
= PAGE_ALIGN(cp
->hq_size
) / PAGE_SIZE
;
1446 hq_bds
= pages
* (PAGE_SIZE
/ BNX2X_ISCSI_HQ_BD_SIZE
);
1447 cp
->num_cqs
= req1
->num_cqs
;
1449 if (!dev
->max_iscsi_conn
)
1452 /* init Tstorm RAM */
1453 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+ TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid
),
1455 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+ TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid
),
1457 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1458 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid
), PAGE_SHIFT
);
1459 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+
1460 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid
),
1461 req1
->num_tasks_per_conn
);
1463 /* init Ustorm RAM */
1464 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+
1465 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid
),
1466 req1
->rq_buffer_size
);
1467 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid
),
1469 CNIC_WR8(dev
, BAR_USTRORM_INTMEM
+
1470 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid
), PAGE_SHIFT
);
1471 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+
1472 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid
),
1473 req1
->num_tasks_per_conn
);
1474 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_RQ_SIZE_OFFSET(pfid
),
1476 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_CQ_SIZE_OFFSET(pfid
),
1478 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid
),
1479 cp
->num_iscsi_tasks
* BNX2X_ISCSI_MAX_PENDING_R2TS
);
1481 /* init Xstorm RAM */
1482 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid
),
1484 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1485 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid
), PAGE_SHIFT
);
1486 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+
1487 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid
),
1488 req1
->num_tasks_per_conn
);
1489 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid
),
1491 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid
),
1492 req1
->num_tasks_per_conn
);
1493 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid
),
1494 cp
->num_iscsi_tasks
* BNX2X_ISCSI_MAX_PENDING_R2TS
);
1496 /* init Cstorm RAM */
1497 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+ CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid
),
1499 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
1500 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid
), PAGE_SHIFT
);
1501 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
1502 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid
),
1503 req1
->num_tasks_per_conn
);
1504 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+ CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid
),
1506 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+ CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid
),
1512 static int cnic_bnx2x_iscsi_init2(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1514 struct iscsi_kwqe_init2
*req2
= (struct iscsi_kwqe_init2
*) kwqe
;
1515 struct cnic_local
*cp
= dev
->cnic_priv
;
1516 struct bnx2x
*bp
= netdev_priv(dev
->netdev
);
1517 u32 pfid
= cp
->pfid
;
1518 struct iscsi_kcqe kcqe
;
1519 struct kcqe
*cqes
[1];
1521 memset(&kcqe
, 0, sizeof(kcqe
));
1522 if (!dev
->max_iscsi_conn
) {
1523 kcqe
.completion_status
=
1524 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED
;
1528 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
1529 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid
), req2
->error_bit_map
[0]);
1530 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
1531 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid
) + 4,
1532 req2
->error_bit_map
[1]);
1534 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+
1535 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid
), req2
->max_cq_sqn
);
1536 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
1537 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid
), req2
->error_bit_map
[0]);
1538 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
1539 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid
) + 4,
1540 req2
->error_bit_map
[1]);
1542 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
1543 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid
), req2
->max_cq_sqn
);
1545 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_SUCCESS
;
1548 kcqe
.op_code
= ISCSI_KCQE_OPCODE_INIT
;
1549 cqes
[0] = (struct kcqe
*) &kcqe
;
1550 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_ISCSI
, cqes
, 1);
1555 static void cnic_free_bnx2x_conn_resc(struct cnic_dev
*dev
, u32 l5_cid
)
1557 struct cnic_local
*cp
= dev
->cnic_priv
;
1558 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1560 if (ctx
->ulp_proto_id
== CNIC_ULP_ISCSI
) {
1561 struct cnic_iscsi
*iscsi
= ctx
->proto
.iscsi
;
1563 cnic_free_dma(dev
, &iscsi
->hq_info
);
1564 cnic_free_dma(dev
, &iscsi
->r2tq_info
);
1565 cnic_free_dma(dev
, &iscsi
->task_array_info
);
1566 cnic_free_id(&cp
->cid_tbl
, ctx
->cid
);
1568 cnic_free_id(&cp
->fcoe_cid_tbl
, ctx
->cid
);
1574 static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev
*dev
, u32 l5_cid
)
1578 struct cnic_local
*cp
= dev
->cnic_priv
;
1579 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1580 struct cnic_iscsi
*iscsi
= ctx
->proto
.iscsi
;
1582 if (ctx
->ulp_proto_id
== CNIC_ULP_FCOE
) {
1583 cid
= cnic_alloc_new_id(&cp
->fcoe_cid_tbl
);
1592 cid
= cnic_alloc_new_id(&cp
->cid_tbl
);
1599 pages
= PAGE_ALIGN(cp
->task_array_size
) / PAGE_SIZE
;
1601 ret
= cnic_alloc_dma(dev
, &iscsi
->task_array_info
, pages
, 1);
1605 pages
= PAGE_ALIGN(cp
->r2tq_size
) / PAGE_SIZE
;
1606 ret
= cnic_alloc_dma(dev
, &iscsi
->r2tq_info
, pages
, 1);
1610 pages
= PAGE_ALIGN(cp
->hq_size
) / PAGE_SIZE
;
1611 ret
= cnic_alloc_dma(dev
, &iscsi
->hq_info
, pages
, 1);
1618 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
1622 static void *cnic_get_bnx2x_ctx(struct cnic_dev
*dev
, u32 cid
, int init
,
1623 struct regpair
*ctx_addr
)
1625 struct cnic_local
*cp
= dev
->cnic_priv
;
1626 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
1627 int blk
= (cid
- ethdev
->starting_cid
) / cp
->cids_per_blk
;
1628 int off
= (cid
- ethdev
->starting_cid
) % cp
->cids_per_blk
;
1629 unsigned long align_off
= 0;
1633 if (cp
->ctx_align
) {
1634 unsigned long mask
= cp
->ctx_align
- 1;
1636 if (cp
->ctx_arr
[blk
].mapping
& mask
)
1637 align_off
= cp
->ctx_align
-
1638 (cp
->ctx_arr
[blk
].mapping
& mask
);
1640 ctx_map
= cp
->ctx_arr
[blk
].mapping
+ align_off
+
1641 (off
* BNX2X_CONTEXT_MEM_SIZE
);
1642 ctx
= cp
->ctx_arr
[blk
].ctx
+ align_off
+
1643 (off
* BNX2X_CONTEXT_MEM_SIZE
);
1645 memset(ctx
, 0, BNX2X_CONTEXT_MEM_SIZE
);
1647 ctx_addr
->lo
= ctx_map
& 0xffffffff;
1648 ctx_addr
->hi
= (u64
) ctx_map
>> 32;
1652 static int cnic_setup_bnx2x_ctx(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1655 struct cnic_local
*cp
= dev
->cnic_priv
;
1656 struct iscsi_kwqe_conn_offload1
*req1
=
1657 (struct iscsi_kwqe_conn_offload1
*) wqes
[0];
1658 struct iscsi_kwqe_conn_offload2
*req2
=
1659 (struct iscsi_kwqe_conn_offload2
*) wqes
[1];
1660 struct iscsi_kwqe_conn_offload3
*req3
;
1661 struct cnic_context
*ctx
= &cp
->ctx_tbl
[req1
->iscsi_conn_id
];
1662 struct cnic_iscsi
*iscsi
= ctx
->proto
.iscsi
;
1664 u32 hw_cid
= BNX2X_HW_CID(cp
, cid
);
1665 struct iscsi_context
*ictx
;
1666 struct regpair context_addr
;
1667 int i
, j
, n
= 2, n_max
;
1668 u8 port
= CNIC_PORT(cp
);
1671 if (!req2
->num_additional_wqes
)
1674 n_max
= req2
->num_additional_wqes
+ 2;
1676 ictx
= cnic_get_bnx2x_ctx(dev
, cid
, 1, &context_addr
);
1680 req3
= (struct iscsi_kwqe_conn_offload3
*) wqes
[n
++];
1682 ictx
->xstorm_ag_context
.hq_prod
= 1;
1684 ictx
->xstorm_st_context
.iscsi
.first_burst_length
=
1685 ISCSI_DEF_FIRST_BURST_LEN
;
1686 ictx
->xstorm_st_context
.iscsi
.max_send_pdu_length
=
1687 ISCSI_DEF_MAX_RECV_SEG_LEN
;
1688 ictx
->xstorm_st_context
.iscsi
.sq_pbl_base
.lo
=
1689 req1
->sq_page_table_addr_lo
;
1690 ictx
->xstorm_st_context
.iscsi
.sq_pbl_base
.hi
=
1691 req1
->sq_page_table_addr_hi
;
1692 ictx
->xstorm_st_context
.iscsi
.sq_curr_pbe
.lo
= req2
->sq_first_pte
.hi
;
1693 ictx
->xstorm_st_context
.iscsi
.sq_curr_pbe
.hi
= req2
->sq_first_pte
.lo
;
1694 ictx
->xstorm_st_context
.iscsi
.hq_pbl_base
.lo
=
1695 iscsi
->hq_info
.pgtbl_map
& 0xffffffff;
1696 ictx
->xstorm_st_context
.iscsi
.hq_pbl_base
.hi
=
1697 (u64
) iscsi
->hq_info
.pgtbl_map
>> 32;
1698 ictx
->xstorm_st_context
.iscsi
.hq_curr_pbe_base
.lo
=
1699 iscsi
->hq_info
.pgtbl
[0];
1700 ictx
->xstorm_st_context
.iscsi
.hq_curr_pbe_base
.hi
=
1701 iscsi
->hq_info
.pgtbl
[1];
1702 ictx
->xstorm_st_context
.iscsi
.r2tq_pbl_base
.lo
=
1703 iscsi
->r2tq_info
.pgtbl_map
& 0xffffffff;
1704 ictx
->xstorm_st_context
.iscsi
.r2tq_pbl_base
.hi
=
1705 (u64
) iscsi
->r2tq_info
.pgtbl_map
>> 32;
1706 ictx
->xstorm_st_context
.iscsi
.r2tq_curr_pbe_base
.lo
=
1707 iscsi
->r2tq_info
.pgtbl
[0];
1708 ictx
->xstorm_st_context
.iscsi
.r2tq_curr_pbe_base
.hi
=
1709 iscsi
->r2tq_info
.pgtbl
[1];
1710 ictx
->xstorm_st_context
.iscsi
.task_pbl_base
.lo
=
1711 iscsi
->task_array_info
.pgtbl_map
& 0xffffffff;
1712 ictx
->xstorm_st_context
.iscsi
.task_pbl_base
.hi
=
1713 (u64
) iscsi
->task_array_info
.pgtbl_map
>> 32;
1714 ictx
->xstorm_st_context
.iscsi
.task_pbl_cache_idx
=
1715 BNX2X_ISCSI_PBL_NOT_CACHED
;
1716 ictx
->xstorm_st_context
.iscsi
.flags
.flags
|=
1717 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA
;
1718 ictx
->xstorm_st_context
.iscsi
.flags
.flags
|=
1719 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T
;
1720 ictx
->xstorm_st_context
.common
.ethernet
.reserved_vlan_type
=
1722 if (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
) &&
1723 cp
->port_mode
== CHIP_2_PORT_MODE
) {
1727 ictx
->xstorm_st_context
.common
.flags
=
1728 1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT
;
1729 ictx
->xstorm_st_context
.common
.flags
=
1730 port
<< XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT
;
1732 ictx
->tstorm_st_context
.iscsi
.hdr_bytes_2_fetch
= ISCSI_HEADER_SIZE
;
1733 /* TSTORM requires the base address of RQ DB & not PTE */
1734 ictx
->tstorm_st_context
.iscsi
.rq_db_phy_addr
.lo
=
1735 req2
->rq_page_table_addr_lo
& PAGE_MASK
;
1736 ictx
->tstorm_st_context
.iscsi
.rq_db_phy_addr
.hi
=
1737 req2
->rq_page_table_addr_hi
;
1738 ictx
->tstorm_st_context
.iscsi
.iscsi_conn_id
= req1
->iscsi_conn_id
;
1739 ictx
->tstorm_st_context
.tcp
.cwnd
= 0x5A8;
1740 ictx
->tstorm_st_context
.tcp
.flags2
|=
1741 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN
;
1742 ictx
->tstorm_st_context
.tcp
.ooo_support_mode
=
1743 TCP_TSTORM_OOO_DROP_AND_PROC_ACK
;
1745 ictx
->timers_context
.flags
|= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG
;
1747 ictx
->ustorm_st_context
.ring
.rq
.pbl_base
.lo
=
1748 req2
->rq_page_table_addr_lo
;
1749 ictx
->ustorm_st_context
.ring
.rq
.pbl_base
.hi
=
1750 req2
->rq_page_table_addr_hi
;
1751 ictx
->ustorm_st_context
.ring
.rq
.curr_pbe
.lo
= req3
->qp_first_pte
[0].hi
;
1752 ictx
->ustorm_st_context
.ring
.rq
.curr_pbe
.hi
= req3
->qp_first_pte
[0].lo
;
1753 ictx
->ustorm_st_context
.ring
.r2tq
.pbl_base
.lo
=
1754 iscsi
->r2tq_info
.pgtbl_map
& 0xffffffff;
1755 ictx
->ustorm_st_context
.ring
.r2tq
.pbl_base
.hi
=
1756 (u64
) iscsi
->r2tq_info
.pgtbl_map
>> 32;
1757 ictx
->ustorm_st_context
.ring
.r2tq
.curr_pbe
.lo
=
1758 iscsi
->r2tq_info
.pgtbl
[0];
1759 ictx
->ustorm_st_context
.ring
.r2tq
.curr_pbe
.hi
=
1760 iscsi
->r2tq_info
.pgtbl
[1];
1761 ictx
->ustorm_st_context
.ring
.cq_pbl_base
.lo
=
1762 req1
->cq_page_table_addr_lo
;
1763 ictx
->ustorm_st_context
.ring
.cq_pbl_base
.hi
=
1764 req1
->cq_page_table_addr_hi
;
1765 ictx
->ustorm_st_context
.ring
.cq
[0].cq_sn
= ISCSI_INITIAL_SN
;
1766 ictx
->ustorm_st_context
.ring
.cq
[0].curr_pbe
.lo
= req2
->cq_first_pte
.hi
;
1767 ictx
->ustorm_st_context
.ring
.cq
[0].curr_pbe
.hi
= req2
->cq_first_pte
.lo
;
1768 ictx
->ustorm_st_context
.task_pbe_cache_index
=
1769 BNX2X_ISCSI_PBL_NOT_CACHED
;
1770 ictx
->ustorm_st_context
.task_pdu_cache_index
=
1771 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED
;
1773 for (i
= 1, j
= 1; i
< cp
->num_cqs
; i
++, j
++) {
1777 req3
= (struct iscsi_kwqe_conn_offload3
*) wqes
[n
++];
1780 ictx
->ustorm_st_context
.ring
.cq
[i
].cq_sn
= ISCSI_INITIAL_SN
;
1781 ictx
->ustorm_st_context
.ring
.cq
[i
].curr_pbe
.lo
=
1782 req3
->qp_first_pte
[j
].hi
;
1783 ictx
->ustorm_st_context
.ring
.cq
[i
].curr_pbe
.hi
=
1784 req3
->qp_first_pte
[j
].lo
;
1787 ictx
->ustorm_st_context
.task_pbl_base
.lo
=
1788 iscsi
->task_array_info
.pgtbl_map
& 0xffffffff;
1789 ictx
->ustorm_st_context
.task_pbl_base
.hi
=
1790 (u64
) iscsi
->task_array_info
.pgtbl_map
>> 32;
1791 ictx
->ustorm_st_context
.tce_phy_addr
.lo
=
1792 iscsi
->task_array_info
.pgtbl
[0];
1793 ictx
->ustorm_st_context
.tce_phy_addr
.hi
=
1794 iscsi
->task_array_info
.pgtbl
[1];
1795 ictx
->ustorm_st_context
.iscsi_conn_id
= req1
->iscsi_conn_id
;
1796 ictx
->ustorm_st_context
.num_cqs
= cp
->num_cqs
;
1797 ictx
->ustorm_st_context
.negotiated_rx
|= ISCSI_DEF_MAX_RECV_SEG_LEN
;
1798 ictx
->ustorm_st_context
.negotiated_rx_and_flags
|=
1799 ISCSI_DEF_MAX_BURST_LEN
;
1800 ictx
->ustorm_st_context
.negotiated_rx
|=
1801 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T
<<
1802 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT
;
1804 ictx
->cstorm_st_context
.hq_pbl_base
.lo
=
1805 iscsi
->hq_info
.pgtbl_map
& 0xffffffff;
1806 ictx
->cstorm_st_context
.hq_pbl_base
.hi
=
1807 (u64
) iscsi
->hq_info
.pgtbl_map
>> 32;
1808 ictx
->cstorm_st_context
.hq_curr_pbe
.lo
= iscsi
->hq_info
.pgtbl
[0];
1809 ictx
->cstorm_st_context
.hq_curr_pbe
.hi
= iscsi
->hq_info
.pgtbl
[1];
1810 ictx
->cstorm_st_context
.task_pbl_base
.lo
=
1811 iscsi
->task_array_info
.pgtbl_map
& 0xffffffff;
1812 ictx
->cstorm_st_context
.task_pbl_base
.hi
=
1813 (u64
) iscsi
->task_array_info
.pgtbl_map
>> 32;
1814 /* CSTORM and USTORM initialization is different, CSTORM requires
1815 * CQ DB base & not PTE addr */
1816 ictx
->cstorm_st_context
.cq_db_base
.lo
=
1817 req1
->cq_page_table_addr_lo
& PAGE_MASK
;
1818 ictx
->cstorm_st_context
.cq_db_base
.hi
= req1
->cq_page_table_addr_hi
;
1819 ictx
->cstorm_st_context
.iscsi_conn_id
= req1
->iscsi_conn_id
;
1820 ictx
->cstorm_st_context
.cq_proc_en_bit_map
= (1 << cp
->num_cqs
) - 1;
1821 for (i
= 0; i
< cp
->num_cqs
; i
++) {
1822 ictx
->cstorm_st_context
.cq_c_prod_sqn_arr
.sqn
[i
] =
1824 ictx
->cstorm_st_context
.cq_c_sqn_2_notify_arr
.sqn
[i
] =
1828 ictx
->xstorm_ag_context
.cdu_reserved
=
1829 CDU_RSRVD_VALUE_TYPE_A(hw_cid
, CDU_REGION_NUMBER_XCM_AG
,
1830 ISCSI_CONNECTION_TYPE
);
1831 ictx
->ustorm_ag_context
.cdu_usage
=
1832 CDU_RSRVD_VALUE_TYPE_A(hw_cid
, CDU_REGION_NUMBER_UCM_AG
,
1833 ISCSI_CONNECTION_TYPE
);
1838 static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1841 struct iscsi_kwqe_conn_offload1
*req1
;
1842 struct iscsi_kwqe_conn_offload2
*req2
;
1843 struct cnic_local
*cp
= dev
->cnic_priv
;
1844 struct cnic_context
*ctx
;
1845 struct iscsi_kcqe kcqe
;
1846 struct kcqe
*cqes
[1];
1855 req1
= (struct iscsi_kwqe_conn_offload1
*) wqes
[0];
1856 req2
= (struct iscsi_kwqe_conn_offload2
*) wqes
[1];
1857 if ((num
- 2) < req2
->num_additional_wqes
) {
1861 *work
= 2 + req2
->num_additional_wqes
;
1863 l5_cid
= req1
->iscsi_conn_id
;
1864 if (l5_cid
>= MAX_ISCSI_TBL_SZ
)
1867 memset(&kcqe
, 0, sizeof(kcqe
));
1868 kcqe
.op_code
= ISCSI_KCQE_OPCODE_OFFLOAD_CONN
;
1869 kcqe
.iscsi_conn_id
= l5_cid
;
1870 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE
;
1872 ctx
= &cp
->ctx_tbl
[l5_cid
];
1873 if (test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
)) {
1874 kcqe
.completion_status
=
1875 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY
;
1879 if (atomic_inc_return(&cp
->iscsi_conn
) > dev
->max_iscsi_conn
) {
1880 atomic_dec(&cp
->iscsi_conn
);
1883 ret
= cnic_alloc_bnx2x_conn_resc(dev
, l5_cid
);
1885 atomic_dec(&cp
->iscsi_conn
);
1889 ret
= cnic_setup_bnx2x_ctx(dev
, wqes
, num
);
1891 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
1892 atomic_dec(&cp
->iscsi_conn
);
1896 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_SUCCESS
;
1897 kcqe
.iscsi_conn_context_id
= BNX2X_HW_CID(cp
, cp
->ctx_tbl
[l5_cid
].cid
);
1900 cqes
[0] = (struct kcqe
*) &kcqe
;
1901 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_ISCSI
, cqes
, 1);
1906 static int cnic_bnx2x_iscsi_update(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1908 struct cnic_local
*cp
= dev
->cnic_priv
;
1909 struct iscsi_kwqe_conn_update
*req
=
1910 (struct iscsi_kwqe_conn_update
*) kwqe
;
1912 union l5cm_specific_data l5_data
;
1913 u32 l5_cid
, cid
= BNX2X_SW_CID(req
->context_id
);
1916 if (cnic_get_l5_cid(cp
, cid
, &l5_cid
) != 0)
1919 data
= cnic_get_kwqe_16_data(cp
, l5_cid
, &l5_data
);
1923 memcpy(data
, kwqe
, sizeof(struct kwqe
));
1925 ret
= cnic_submit_kwqe_16(dev
, ISCSI_RAMROD_CMD_ID_UPDATE_CONN
,
1926 req
->context_id
, ISCSI_CONNECTION_TYPE
, &l5_data
);
1930 static int cnic_bnx2x_destroy_ramrod(struct cnic_dev
*dev
, u32 l5_cid
)
1932 struct cnic_local
*cp
= dev
->cnic_priv
;
1933 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1934 union l5cm_specific_data l5_data
;
1938 init_waitqueue_head(&ctx
->waitq
);
1940 memset(&l5_data
, 0, sizeof(l5_data
));
1941 hw_cid
= BNX2X_HW_CID(cp
, ctx
->cid
);
1943 ret
= cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_COMMON_CFC_DEL
,
1944 hw_cid
, NONE_CONNECTION_TYPE
, &l5_data
);
1947 wait_event_timeout(ctx
->waitq
, ctx
->wait_cond
, CNIC_RAMROD_TMO
);
1948 if (unlikely(test_bit(CTX_FL_CID_ERROR
, &ctx
->ctx_flags
)))
1955 static int cnic_bnx2x_iscsi_destroy(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1957 struct cnic_local
*cp
= dev
->cnic_priv
;
1958 struct iscsi_kwqe_conn_destroy
*req
=
1959 (struct iscsi_kwqe_conn_destroy
*) kwqe
;
1960 u32 l5_cid
= req
->reserved0
;
1961 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1963 struct iscsi_kcqe kcqe
;
1964 struct kcqe
*cqes
[1];
1966 if (!test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
))
1967 goto skip_cfc_delete
;
1969 if (!time_after(jiffies
, ctx
->timestamp
+ (2 * HZ
))) {
1970 unsigned long delta
= ctx
->timestamp
+ (2 * HZ
) - jiffies
;
1972 if (delta
> (2 * HZ
))
1975 set_bit(CTX_FL_DELETE_WAIT
, &ctx
->ctx_flags
);
1976 queue_delayed_work(cnic_wq
, &cp
->delete_task
, delta
);
1980 ret
= cnic_bnx2x_destroy_ramrod(dev
, l5_cid
);
1983 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
1986 atomic_dec(&cp
->iscsi_conn
);
1987 clear_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
);
1991 memset(&kcqe
, 0, sizeof(kcqe
));
1992 kcqe
.op_code
= ISCSI_KCQE_OPCODE_DESTROY_CONN
;
1993 kcqe
.iscsi_conn_id
= l5_cid
;
1994 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_SUCCESS
;
1995 kcqe
.iscsi_conn_context_id
= req
->context_id
;
1997 cqes
[0] = (struct kcqe
*) &kcqe
;
1998 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_ISCSI
, cqes
, 1);
2003 static void cnic_init_storm_conn_bufs(struct cnic_dev
*dev
,
2004 struct l4_kwq_connect_req1
*kwqe1
,
2005 struct l4_kwq_connect_req3
*kwqe3
,
2006 struct l5cm_active_conn_buffer
*conn_buf
)
2008 struct l5cm_conn_addr_params
*conn_addr
= &conn_buf
->conn_addr_buf
;
2009 struct l5cm_xstorm_conn_buffer
*xstorm_buf
=
2010 &conn_buf
->xstorm_conn_buffer
;
2011 struct l5cm_tstorm_conn_buffer
*tstorm_buf
=
2012 &conn_buf
->tstorm_conn_buffer
;
2013 struct regpair context_addr
;
2014 u32 cid
= BNX2X_SW_CID(kwqe1
->cid
);
2015 struct in6_addr src_ip
, dst_ip
;
2019 addrp
= (u32
*) &conn_addr
->local_ip_addr
;
2020 for (i
= 0; i
< 4; i
++, addrp
++)
2021 src_ip
.in6_u
.u6_addr32
[i
] = cpu_to_be32(*addrp
);
2023 addrp
= (u32
*) &conn_addr
->remote_ip_addr
;
2024 for (i
= 0; i
< 4; i
++, addrp
++)
2025 dst_ip
.in6_u
.u6_addr32
[i
] = cpu_to_be32(*addrp
);
2027 cnic_get_bnx2x_ctx(dev
, cid
, 0, &context_addr
);
2029 xstorm_buf
->context_addr
.hi
= context_addr
.hi
;
2030 xstorm_buf
->context_addr
.lo
= context_addr
.lo
;
2031 xstorm_buf
->mss
= 0xffff;
2032 xstorm_buf
->rcv_buf
= kwqe3
->rcv_buf
;
2033 if (kwqe1
->tcp_flags
& L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE
)
2034 xstorm_buf
->params
|= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE
;
2035 xstorm_buf
->pseudo_header_checksum
=
2036 swab16(~csum_ipv6_magic(&src_ip
, &dst_ip
, 0, IPPROTO_TCP
, 0));
2038 if (!(kwqe1
->tcp_flags
& L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK
))
2039 tstorm_buf
->params
|=
2040 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE
;
2041 if (kwqe3
->ka_timeout
) {
2042 tstorm_buf
->ka_enable
= 1;
2043 tstorm_buf
->ka_timeout
= kwqe3
->ka_timeout
;
2044 tstorm_buf
->ka_interval
= kwqe3
->ka_interval
;
2045 tstorm_buf
->ka_max_probe_count
= kwqe3
->ka_max_probe_count
;
2047 tstorm_buf
->max_rt_time
= 0xffffffff;
2050 static void cnic_init_bnx2x_mac(struct cnic_dev
*dev
)
2052 struct cnic_local
*cp
= dev
->cnic_priv
;
2053 struct bnx2x
*bp
= netdev_priv(dev
->netdev
);
2054 u32 pfid
= cp
->pfid
;
2055 u8
*mac
= dev
->mac_addr
;
2057 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
2058 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid
), mac
[0]);
2059 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
2060 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid
), mac
[1]);
2061 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
2062 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid
), mac
[2]);
2063 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
2064 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid
), mac
[3]);
2065 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
2066 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid
), mac
[4]);
2067 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
2068 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid
), mac
[5]);
2070 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
2071 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid
), mac
[5]);
2072 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
2073 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid
) + 1,
2075 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
2076 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid
), mac
[3]);
2077 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
2078 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid
) + 1,
2080 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
2081 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid
), mac
[1]);
2082 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
2083 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid
) + 1,
2087 static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev
*dev
, int tcp_ts
)
2089 struct cnic_local
*cp
= dev
->cnic_priv
;
2090 struct bnx2x
*bp
= netdev_priv(dev
->netdev
);
2091 u8 xstorm_flags
= XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN
;
2092 u16 tstorm_flags
= 0;
2095 xstorm_flags
|= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED
;
2096 tstorm_flags
|= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED
;
2099 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
2100 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp
->pfid
), xstorm_flags
);
2102 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+
2103 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp
->pfid
), tstorm_flags
);
2106 static int cnic_bnx2x_connect(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
2109 struct cnic_local
*cp
= dev
->cnic_priv
;
2110 struct bnx2x
*bp
= netdev_priv(dev
->netdev
);
2111 struct l4_kwq_connect_req1
*kwqe1
=
2112 (struct l4_kwq_connect_req1
*) wqes
[0];
2113 struct l4_kwq_connect_req3
*kwqe3
;
2114 struct l5cm_active_conn_buffer
*conn_buf
;
2115 struct l5cm_conn_addr_params
*conn_addr
;
2116 union l5cm_specific_data l5_data
;
2117 u32 l5_cid
= kwqe1
->pg_cid
;
2118 struct cnic_sock
*csk
= &cp
->csk_tbl
[l5_cid
];
2119 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
2127 if (kwqe1
->conn_flags
& L4_KWQ_CONNECT_REQ1_IP_V6
)
2137 if (sizeof(*conn_buf
) > CNIC_KWQ16_DATA_SIZE
) {
2138 netdev_err(dev
->netdev
, "conn_buf size too big\n");
2141 conn_buf
= cnic_get_kwqe_16_data(cp
, l5_cid
, &l5_data
);
2145 memset(conn_buf
, 0, sizeof(*conn_buf
));
2147 conn_addr
= &conn_buf
->conn_addr_buf
;
2148 conn_addr
->remote_addr_0
= csk
->ha
[0];
2149 conn_addr
->remote_addr_1
= csk
->ha
[1];
2150 conn_addr
->remote_addr_2
= csk
->ha
[2];
2151 conn_addr
->remote_addr_3
= csk
->ha
[3];
2152 conn_addr
->remote_addr_4
= csk
->ha
[4];
2153 conn_addr
->remote_addr_5
= csk
->ha
[5];
2155 if (kwqe1
->conn_flags
& L4_KWQ_CONNECT_REQ1_IP_V6
) {
2156 struct l4_kwq_connect_req2
*kwqe2
=
2157 (struct l4_kwq_connect_req2
*) wqes
[1];
2159 conn_addr
->local_ip_addr
.ip_addr_hi_hi
= kwqe2
->src_ip_v6_4
;
2160 conn_addr
->local_ip_addr
.ip_addr_hi_lo
= kwqe2
->src_ip_v6_3
;
2161 conn_addr
->local_ip_addr
.ip_addr_lo_hi
= kwqe2
->src_ip_v6_2
;
2163 conn_addr
->remote_ip_addr
.ip_addr_hi_hi
= kwqe2
->dst_ip_v6_4
;
2164 conn_addr
->remote_ip_addr
.ip_addr_hi_lo
= kwqe2
->dst_ip_v6_3
;
2165 conn_addr
->remote_ip_addr
.ip_addr_lo_hi
= kwqe2
->dst_ip_v6_2
;
2166 conn_addr
->params
|= L5CM_CONN_ADDR_PARAMS_IP_VERSION
;
2168 kwqe3
= (struct l4_kwq_connect_req3
*) wqes
[*work
- 1];
2170 conn_addr
->local_ip_addr
.ip_addr_lo_lo
= kwqe1
->src_ip
;
2171 conn_addr
->remote_ip_addr
.ip_addr_lo_lo
= kwqe1
->dst_ip
;
2172 conn_addr
->local_tcp_port
= kwqe1
->src_port
;
2173 conn_addr
->remote_tcp_port
= kwqe1
->dst_port
;
2175 conn_addr
->pmtu
= kwqe3
->pmtu
;
2176 cnic_init_storm_conn_bufs(dev
, kwqe1
, kwqe3
, conn_buf
);
2178 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+
2179 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp
->pfid
), csk
->vlan_id
);
2181 cnic_bnx2x_set_tcp_timestamp(dev
,
2182 kwqe1
->tcp_flags
& L4_KWQ_CONNECT_REQ1_TIME_STAMP
);
2184 ret
= cnic_submit_kwqe_16(dev
, L5CM_RAMROD_CMD_ID_TCP_CONNECT
,
2185 kwqe1
->cid
, ISCSI_CONNECTION_TYPE
, &l5_data
);
2187 set_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
);
2192 static int cnic_bnx2x_close(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2194 struct l4_kwq_close_req
*req
= (struct l4_kwq_close_req
*) kwqe
;
2195 union l5cm_specific_data l5_data
;
2198 memset(&l5_data
, 0, sizeof(l5_data
));
2199 ret
= cnic_submit_kwqe_16(dev
, L5CM_RAMROD_CMD_ID_CLOSE
,
2200 req
->cid
, ISCSI_CONNECTION_TYPE
, &l5_data
);
2204 static int cnic_bnx2x_reset(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2206 struct l4_kwq_reset_req
*req
= (struct l4_kwq_reset_req
*) kwqe
;
2207 union l5cm_specific_data l5_data
;
2210 memset(&l5_data
, 0, sizeof(l5_data
));
2211 ret
= cnic_submit_kwqe_16(dev
, L5CM_RAMROD_CMD_ID_ABORT
,
2212 req
->cid
, ISCSI_CONNECTION_TYPE
, &l5_data
);
2215 static int cnic_bnx2x_offload_pg(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2217 struct l4_kwq_offload_pg
*req
= (struct l4_kwq_offload_pg
*) kwqe
;
2219 struct kcqe
*cqes
[1];
2221 memset(&kcqe
, 0, sizeof(kcqe
));
2222 kcqe
.pg_host_opaque
= req
->host_opaque
;
2223 kcqe
.pg_cid
= req
->host_opaque
;
2224 kcqe
.op_code
= L4_KCQE_OPCODE_VALUE_OFFLOAD_PG
;
2225 cqes
[0] = (struct kcqe
*) &kcqe
;
2226 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_L4
, cqes
, 1);
2230 static int cnic_bnx2x_update_pg(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2232 struct l4_kwq_update_pg
*req
= (struct l4_kwq_update_pg
*) kwqe
;
2234 struct kcqe
*cqes
[1];
2236 memset(&kcqe
, 0, sizeof(kcqe
));
2237 kcqe
.pg_host_opaque
= req
->pg_host_opaque
;
2238 kcqe
.pg_cid
= req
->pg_cid
;
2239 kcqe
.op_code
= L4_KCQE_OPCODE_VALUE_UPDATE_PG
;
2240 cqes
[0] = (struct kcqe
*) &kcqe
;
2241 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_L4
, cqes
, 1);
2245 static int cnic_bnx2x_fcoe_stat(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2247 struct fcoe_kwqe_stat
*req
;
2248 struct fcoe_stat_ramrod_params
*fcoe_stat
;
2249 union l5cm_specific_data l5_data
;
2250 struct cnic_local
*cp
= dev
->cnic_priv
;
2254 req
= (struct fcoe_kwqe_stat
*) kwqe
;
2255 cid
= BNX2X_HW_CID(cp
, cp
->fcoe_init_cid
);
2257 fcoe_stat
= cnic_get_kwqe_16_data(cp
, BNX2X_FCOE_L5_CID_BASE
, &l5_data
);
2261 memset(fcoe_stat
, 0, sizeof(*fcoe_stat
));
2262 memcpy(&fcoe_stat
->stat_kwqe
, req
, sizeof(*req
));
2264 ret
= cnic_submit_kwqe_16(dev
, FCOE_RAMROD_CMD_ID_STAT_FUNC
, cid
,
2265 FCOE_CONNECTION_TYPE
, &l5_data
);
2269 static int cnic_bnx2x_fcoe_init1(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
2273 struct cnic_local
*cp
= dev
->cnic_priv
;
2275 struct fcoe_init_ramrod_params
*fcoe_init
;
2276 struct fcoe_kwqe_init1
*req1
;
2277 struct fcoe_kwqe_init2
*req2
;
2278 struct fcoe_kwqe_init3
*req3
;
2279 union l5cm_specific_data l5_data
;
2285 req1
= (struct fcoe_kwqe_init1
*) wqes
[0];
2286 req2
= (struct fcoe_kwqe_init2
*) wqes
[1];
2287 req3
= (struct fcoe_kwqe_init3
*) wqes
[2];
2288 if (req2
->hdr
.op_code
!= FCOE_KWQE_OPCODE_INIT2
) {
2292 if (req3
->hdr
.op_code
!= FCOE_KWQE_OPCODE_INIT3
) {
2297 if (sizeof(*fcoe_init
) > CNIC_KWQ16_DATA_SIZE
) {
2298 netdev_err(dev
->netdev
, "fcoe_init size too big\n");
2301 fcoe_init
= cnic_get_kwqe_16_data(cp
, BNX2X_FCOE_L5_CID_BASE
, &l5_data
);
2305 memset(fcoe_init
, 0, sizeof(*fcoe_init
));
2306 memcpy(&fcoe_init
->init_kwqe1
, req1
, sizeof(*req1
));
2307 memcpy(&fcoe_init
->init_kwqe2
, req2
, sizeof(*req2
));
2308 memcpy(&fcoe_init
->init_kwqe3
, req3
, sizeof(*req3
));
2309 fcoe_init
->eq_pbl_base
.lo
= cp
->kcq2
.dma
.pgtbl_map
& 0xffffffff;
2310 fcoe_init
->eq_pbl_base
.hi
= (u64
) cp
->kcq2
.dma
.pgtbl_map
>> 32;
2311 fcoe_init
->eq_pbl_size
= cp
->kcq2
.dma
.num_pages
;
2313 fcoe_init
->sb_num
= cp
->status_blk_num
;
2314 fcoe_init
->eq_prod
= MAX_KCQ_IDX
;
2315 fcoe_init
->sb_id
= HC_INDEX_FCOE_EQ_CONS
;
2316 cp
->kcq2
.sw_prod_idx
= 0;
2318 cid
= BNX2X_HW_CID(cp
, cp
->fcoe_init_cid
);
2319 ret
= cnic_submit_kwqe_16(dev
, FCOE_RAMROD_CMD_ID_INIT_FUNC
, cid
,
2320 FCOE_CONNECTION_TYPE
, &l5_data
);
2325 static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
2329 u32 cid
= -1, l5_cid
;
2330 struct cnic_local
*cp
= dev
->cnic_priv
;
2331 struct fcoe_kwqe_conn_offload1
*req1
;
2332 struct fcoe_kwqe_conn_offload2
*req2
;
2333 struct fcoe_kwqe_conn_offload3
*req3
;
2334 struct fcoe_kwqe_conn_offload4
*req4
;
2335 struct fcoe_conn_offload_ramrod_params
*fcoe_offload
;
2336 struct cnic_context
*ctx
;
2337 struct fcoe_context
*fctx
;
2338 struct regpair ctx_addr
;
2339 union l5cm_specific_data l5_data
;
2340 struct fcoe_kcqe kcqe
;
2341 struct kcqe
*cqes
[1];
2347 req1
= (struct fcoe_kwqe_conn_offload1
*) wqes
[0];
2348 req2
= (struct fcoe_kwqe_conn_offload2
*) wqes
[1];
2349 req3
= (struct fcoe_kwqe_conn_offload3
*) wqes
[2];
2350 req4
= (struct fcoe_kwqe_conn_offload4
*) wqes
[3];
2354 l5_cid
= req1
->fcoe_conn_id
;
2355 if (l5_cid
>= dev
->max_fcoe_conn
)
2358 l5_cid
+= BNX2X_FCOE_L5_CID_BASE
;
2360 ctx
= &cp
->ctx_tbl
[l5_cid
];
2361 if (test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
))
2364 ret
= cnic_alloc_bnx2x_conn_resc(dev
, l5_cid
);
2371 fctx
= cnic_get_bnx2x_ctx(dev
, cid
, 1, &ctx_addr
);
2373 u32 hw_cid
= BNX2X_HW_CID(cp
, cid
);
2376 val
= CDU_RSRVD_VALUE_TYPE_A(hw_cid
, CDU_REGION_NUMBER_XCM_AG
,
2377 FCOE_CONNECTION_TYPE
);
2378 fctx
->xstorm_ag_context
.cdu_reserved
= val
;
2379 val
= CDU_RSRVD_VALUE_TYPE_A(hw_cid
, CDU_REGION_NUMBER_UCM_AG
,
2380 FCOE_CONNECTION_TYPE
);
2381 fctx
->ustorm_ag_context
.cdu_usage
= val
;
2383 if (sizeof(*fcoe_offload
) > CNIC_KWQ16_DATA_SIZE
) {
2384 netdev_err(dev
->netdev
, "fcoe_offload size too big\n");
2387 fcoe_offload
= cnic_get_kwqe_16_data(cp
, l5_cid
, &l5_data
);
2391 memset(fcoe_offload
, 0, sizeof(*fcoe_offload
));
2392 memcpy(&fcoe_offload
->offload_kwqe1
, req1
, sizeof(*req1
));
2393 memcpy(&fcoe_offload
->offload_kwqe2
, req2
, sizeof(*req2
));
2394 memcpy(&fcoe_offload
->offload_kwqe3
, req3
, sizeof(*req3
));
2395 memcpy(&fcoe_offload
->offload_kwqe4
, req4
, sizeof(*req4
));
2397 cid
= BNX2X_HW_CID(cp
, cid
);
2398 ret
= cnic_submit_kwqe_16(dev
, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN
, cid
,
2399 FCOE_CONNECTION_TYPE
, &l5_data
);
2401 set_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
);
2407 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
2409 memset(&kcqe
, 0, sizeof(kcqe
));
2410 kcqe
.op_code
= FCOE_KCQE_OPCODE_OFFLOAD_CONN
;
2411 kcqe
.fcoe_conn_id
= req1
->fcoe_conn_id
;
2412 kcqe
.completion_status
= FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE
;
2414 cqes
[0] = (struct kcqe
*) &kcqe
;
2415 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_FCOE
, cqes
, 1);
2419 static int cnic_bnx2x_fcoe_enable(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2421 struct fcoe_kwqe_conn_enable_disable
*req
;
2422 struct fcoe_conn_enable_disable_ramrod_params
*fcoe_enable
;
2423 union l5cm_specific_data l5_data
;
2426 struct cnic_local
*cp
= dev
->cnic_priv
;
2428 req
= (struct fcoe_kwqe_conn_enable_disable
*) kwqe
;
2429 cid
= req
->context_id
;
2430 l5_cid
= req
->conn_id
+ BNX2X_FCOE_L5_CID_BASE
;
2432 if (sizeof(*fcoe_enable
) > CNIC_KWQ16_DATA_SIZE
) {
2433 netdev_err(dev
->netdev
, "fcoe_enable size too big\n");
2436 fcoe_enable
= cnic_get_kwqe_16_data(cp
, l5_cid
, &l5_data
);
2440 memset(fcoe_enable
, 0, sizeof(*fcoe_enable
));
2441 memcpy(&fcoe_enable
->enable_disable_kwqe
, req
, sizeof(*req
));
2442 ret
= cnic_submit_kwqe_16(dev
, FCOE_RAMROD_CMD_ID_ENABLE_CONN
, cid
,
2443 FCOE_CONNECTION_TYPE
, &l5_data
);
2447 static int cnic_bnx2x_fcoe_disable(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2449 struct fcoe_kwqe_conn_enable_disable
*req
;
2450 struct fcoe_conn_enable_disable_ramrod_params
*fcoe_disable
;
2451 union l5cm_specific_data l5_data
;
2454 struct cnic_local
*cp
= dev
->cnic_priv
;
2456 req
= (struct fcoe_kwqe_conn_enable_disable
*) kwqe
;
2457 cid
= req
->context_id
;
2458 l5_cid
= req
->conn_id
;
2459 if (l5_cid
>= dev
->max_fcoe_conn
)
2462 l5_cid
+= BNX2X_FCOE_L5_CID_BASE
;
2464 if (sizeof(*fcoe_disable
) > CNIC_KWQ16_DATA_SIZE
) {
2465 netdev_err(dev
->netdev
, "fcoe_disable size too big\n");
2468 fcoe_disable
= cnic_get_kwqe_16_data(cp
, l5_cid
, &l5_data
);
2472 memset(fcoe_disable
, 0, sizeof(*fcoe_disable
));
2473 memcpy(&fcoe_disable
->enable_disable_kwqe
, req
, sizeof(*req
));
2474 ret
= cnic_submit_kwqe_16(dev
, FCOE_RAMROD_CMD_ID_DISABLE_CONN
, cid
,
2475 FCOE_CONNECTION_TYPE
, &l5_data
);
2479 static int cnic_bnx2x_fcoe_destroy(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2481 struct fcoe_kwqe_conn_destroy
*req
;
2482 union l5cm_specific_data l5_data
;
2485 struct cnic_local
*cp
= dev
->cnic_priv
;
2486 struct cnic_context
*ctx
;
2487 struct fcoe_kcqe kcqe
;
2488 struct kcqe
*cqes
[1];
2490 req
= (struct fcoe_kwqe_conn_destroy
*) kwqe
;
2491 cid
= req
->context_id
;
2492 l5_cid
= req
->conn_id
;
2493 if (l5_cid
>= dev
->max_fcoe_conn
)
2496 l5_cid
+= BNX2X_FCOE_L5_CID_BASE
;
2498 ctx
= &cp
->ctx_tbl
[l5_cid
];
2500 init_waitqueue_head(&ctx
->waitq
);
2503 memset(&kcqe
, 0, sizeof(kcqe
));
2504 kcqe
.completion_status
= FCOE_KCQE_COMPLETION_STATUS_ERROR
;
2505 memset(&l5_data
, 0, sizeof(l5_data
));
2506 ret
= cnic_submit_kwqe_16(dev
, FCOE_RAMROD_CMD_ID_TERMINATE_CONN
, cid
,
2507 FCOE_CONNECTION_TYPE
, &l5_data
);
2509 wait_event_timeout(ctx
->waitq
, ctx
->wait_cond
, CNIC_RAMROD_TMO
);
2511 kcqe
.completion_status
= 0;
2514 set_bit(CTX_FL_DELETE_WAIT
, &ctx
->ctx_flags
);
2515 queue_delayed_work(cnic_wq
, &cp
->delete_task
, msecs_to_jiffies(2000));
2517 kcqe
.op_code
= FCOE_KCQE_OPCODE_DESTROY_CONN
;
2518 kcqe
.fcoe_conn_id
= req
->conn_id
;
2519 kcqe
.fcoe_conn_context_id
= cid
;
2521 cqes
[0] = (struct kcqe
*) &kcqe
;
2522 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_FCOE
, cqes
, 1);
2526 static void cnic_bnx2x_delete_wait(struct cnic_dev
*dev
, u32 start_cid
)
2528 struct cnic_local
*cp
= dev
->cnic_priv
;
2531 for (i
= start_cid
; i
< cp
->max_cid_space
; i
++) {
2532 struct cnic_context
*ctx
= &cp
->ctx_tbl
[i
];
2535 while (test_bit(CTX_FL_DELETE_WAIT
, &ctx
->ctx_flags
))
2538 for (j
= 0; j
< 5; j
++) {
2539 if (!test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
))
2544 if (test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
))
2545 netdev_warn(dev
->netdev
, "CID %x not deleted\n",
2550 static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2552 struct fcoe_kwqe_destroy
*req
;
2553 union l5cm_specific_data l5_data
;
2554 struct cnic_local
*cp
= dev
->cnic_priv
;
2558 cnic_bnx2x_delete_wait(dev
, MAX_ISCSI_TBL_SZ
);
2560 req
= (struct fcoe_kwqe_destroy
*) kwqe
;
2561 cid
= BNX2X_HW_CID(cp
, cp
->fcoe_init_cid
);
2563 memset(&l5_data
, 0, sizeof(l5_data
));
2564 ret
= cnic_submit_kwqe_16(dev
, FCOE_RAMROD_CMD_ID_DESTROY_FUNC
, cid
,
2565 FCOE_CONNECTION_TYPE
, &l5_data
);
2569 static void cnic_bnx2x_kwqe_err(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2571 struct cnic_local
*cp
= dev
->cnic_priv
;
2573 struct kcqe
*cqes
[1];
2575 u32 opcode
= KWQE_OPCODE(kwqe
->kwqe_op_flag
);
2576 u32 layer_code
= kwqe
->kwqe_op_flag
& KWQE_LAYER_MASK
;
2580 cid
= kwqe
->kwqe_info0
;
2581 memset(&kcqe
, 0, sizeof(kcqe
));
2583 if (layer_code
== KWQE_FLAGS_LAYER_MASK_L5_FCOE
) {
2586 ulp_type
= CNIC_ULP_FCOE
;
2587 if (opcode
== FCOE_KWQE_OPCODE_DISABLE_CONN
) {
2588 struct fcoe_kwqe_conn_enable_disable
*req
;
2590 req
= (struct fcoe_kwqe_conn_enable_disable
*) kwqe
;
2591 kcqe_op
= FCOE_KCQE_OPCODE_DISABLE_CONN
;
2592 cid
= req
->context_id
;
2593 l5_cid
= req
->conn_id
;
2594 } else if (opcode
== FCOE_KWQE_OPCODE_DESTROY
) {
2595 kcqe_op
= FCOE_KCQE_OPCODE_DESTROY_FUNC
;
2599 kcqe
.kcqe_op_flag
= kcqe_op
<< KCQE_FLAGS_OPCODE_SHIFT
;
2600 kcqe
.kcqe_op_flag
|= KCQE_FLAGS_LAYER_MASK_L5_FCOE
;
2601 kcqe
.kcqe_info1
= FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR
;
2602 kcqe
.kcqe_info2
= cid
;
2603 kcqe
.kcqe_info0
= l5_cid
;
2605 } else if (layer_code
== KWQE_FLAGS_LAYER_MASK_L5_ISCSI
) {
2606 ulp_type
= CNIC_ULP_ISCSI
;
2607 if (opcode
== ISCSI_KWQE_OPCODE_UPDATE_CONN
)
2608 cid
= kwqe
->kwqe_info1
;
2610 kcqe
.kcqe_op_flag
= (opcode
+ 0x10) << KCQE_FLAGS_OPCODE_SHIFT
;
2611 kcqe
.kcqe_op_flag
|= KCQE_FLAGS_LAYER_MASK_L5_ISCSI
;
2612 kcqe
.kcqe_info1
= ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR
;
2613 kcqe
.kcqe_info2
= cid
;
2614 cnic_get_l5_cid(cp
, BNX2X_SW_CID(cid
), &kcqe
.kcqe_info0
);
2616 } else if (layer_code
== KWQE_FLAGS_LAYER_MASK_L4
) {
2617 struct l4_kcq
*l4kcqe
= (struct l4_kcq
*) &kcqe
;
2619 ulp_type
= CNIC_ULP_L4
;
2620 if (opcode
== L4_KWQE_OPCODE_VALUE_CONNECT1
)
2621 kcqe_op
= L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
;
2622 else if (opcode
== L4_KWQE_OPCODE_VALUE_RESET
)
2623 kcqe_op
= L4_KCQE_OPCODE_VALUE_RESET_COMP
;
2624 else if (opcode
== L4_KWQE_OPCODE_VALUE_CLOSE
)
2625 kcqe_op
= L4_KCQE_OPCODE_VALUE_CLOSE_COMP
;
2629 kcqe
.kcqe_op_flag
= (kcqe_op
<< KCQE_FLAGS_OPCODE_SHIFT
) |
2630 KCQE_FLAGS_LAYER_MASK_L4
;
2631 l4kcqe
->status
= L4_KCQE_COMPLETION_STATUS_PARITY_ERROR
;
2633 cnic_get_l5_cid(cp
, BNX2X_SW_CID(cid
), &l4kcqe
->conn_id
);
2639 cnic_reply_bnx2x_kcqes(dev
, ulp_type
, cqes
, 1);
2642 static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev
*dev
,
2643 struct kwqe
*wqes
[], u32 num_wqes
)
2649 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
2650 return -EAGAIN
; /* bnx2 is down */
2652 for (i
= 0; i
< num_wqes
; ) {
2654 opcode
= KWQE_OPCODE(kwqe
->kwqe_op_flag
);
2658 case ISCSI_KWQE_OPCODE_INIT1
:
2659 ret
= cnic_bnx2x_iscsi_init1(dev
, kwqe
);
2661 case ISCSI_KWQE_OPCODE_INIT2
:
2662 ret
= cnic_bnx2x_iscsi_init2(dev
, kwqe
);
2664 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1
:
2665 ret
= cnic_bnx2x_iscsi_ofld1(dev
, &wqes
[i
],
2666 num_wqes
- i
, &work
);
2668 case ISCSI_KWQE_OPCODE_UPDATE_CONN
:
2669 ret
= cnic_bnx2x_iscsi_update(dev
, kwqe
);
2671 case ISCSI_KWQE_OPCODE_DESTROY_CONN
:
2672 ret
= cnic_bnx2x_iscsi_destroy(dev
, kwqe
);
2674 case L4_KWQE_OPCODE_VALUE_CONNECT1
:
2675 ret
= cnic_bnx2x_connect(dev
, &wqes
[i
], num_wqes
- i
,
2678 case L4_KWQE_OPCODE_VALUE_CLOSE
:
2679 ret
= cnic_bnx2x_close(dev
, kwqe
);
2681 case L4_KWQE_OPCODE_VALUE_RESET
:
2682 ret
= cnic_bnx2x_reset(dev
, kwqe
);
2684 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG
:
2685 ret
= cnic_bnx2x_offload_pg(dev
, kwqe
);
2687 case L4_KWQE_OPCODE_VALUE_UPDATE_PG
:
2688 ret
= cnic_bnx2x_update_pg(dev
, kwqe
);
2690 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG
:
2695 netdev_err(dev
->netdev
, "Unknown type of KWQE(0x%x)\n",
2700 netdev_err(dev
->netdev
, "KWQE(0x%x) failed\n",
2703 /* Possibly bnx2x parity error, send completion
2704 * to ulp drivers with error code to speed up
2705 * cleanup and reset recovery.
2707 if (ret
== -EIO
|| ret
== -EAGAIN
)
2708 cnic_bnx2x_kwqe_err(dev
, kwqe
);
2715 static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev
*dev
,
2716 struct kwqe
*wqes
[], u32 num_wqes
)
2718 struct cnic_local
*cp
= dev
->cnic_priv
;
2723 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
2724 return -EAGAIN
; /* bnx2 is down */
2726 if (!BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
))
2729 for (i
= 0; i
< num_wqes
; ) {
2731 opcode
= KWQE_OPCODE(kwqe
->kwqe_op_flag
);
2735 case FCOE_KWQE_OPCODE_INIT1
:
2736 ret
= cnic_bnx2x_fcoe_init1(dev
, &wqes
[i
],
2737 num_wqes
- i
, &work
);
2739 case FCOE_KWQE_OPCODE_OFFLOAD_CONN1
:
2740 ret
= cnic_bnx2x_fcoe_ofld1(dev
, &wqes
[i
],
2741 num_wqes
- i
, &work
);
2743 case FCOE_KWQE_OPCODE_ENABLE_CONN
:
2744 ret
= cnic_bnx2x_fcoe_enable(dev
, kwqe
);
2746 case FCOE_KWQE_OPCODE_DISABLE_CONN
:
2747 ret
= cnic_bnx2x_fcoe_disable(dev
, kwqe
);
2749 case FCOE_KWQE_OPCODE_DESTROY_CONN
:
2750 ret
= cnic_bnx2x_fcoe_destroy(dev
, kwqe
);
2752 case FCOE_KWQE_OPCODE_DESTROY
:
2753 ret
= cnic_bnx2x_fcoe_fw_destroy(dev
, kwqe
);
2755 case FCOE_KWQE_OPCODE_STAT
:
2756 ret
= cnic_bnx2x_fcoe_stat(dev
, kwqe
);
2760 netdev_err(dev
->netdev
, "Unknown type of KWQE(0x%x)\n",
2765 netdev_err(dev
->netdev
, "KWQE(0x%x) failed\n",
2768 /* Possibly bnx2x parity error, send completion
2769 * to ulp drivers with error code to speed up
2770 * cleanup and reset recovery.
2772 if (ret
== -EIO
|| ret
== -EAGAIN
)
2773 cnic_bnx2x_kwqe_err(dev
, kwqe
);
2780 static int cnic_submit_bnx2x_kwqes(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
2786 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
2787 return -EAGAIN
; /* bnx2x is down */
2792 layer_code
= wqes
[0]->kwqe_op_flag
& KWQE_LAYER_MASK
;
2793 switch (layer_code
) {
2794 case KWQE_FLAGS_LAYER_MASK_L5_ISCSI
:
2795 case KWQE_FLAGS_LAYER_MASK_L4
:
2796 case KWQE_FLAGS_LAYER_MASK_L2
:
2797 ret
= cnic_submit_bnx2x_iscsi_kwqes(dev
, wqes
, num_wqes
);
2800 case KWQE_FLAGS_LAYER_MASK_L5_FCOE
:
2801 ret
= cnic_submit_bnx2x_fcoe_kwqes(dev
, wqes
, num_wqes
);
2807 static inline u32
cnic_get_kcqe_layer_mask(u32 opflag
)
2809 if (unlikely(KCQE_OPCODE(opflag
) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN
))
2810 return KCQE_FLAGS_LAYER_MASK_L4
;
2812 return opflag
& KCQE_FLAGS_LAYER_MASK
;
2815 static void service_kcqes(struct cnic_dev
*dev
, int num_cqes
)
2817 struct cnic_local
*cp
= dev
->cnic_priv
;
2823 struct cnic_ulp_ops
*ulp_ops
;
2825 u32 kcqe_op_flag
= cp
->completed_kcq
[i
]->kcqe_op_flag
;
2826 u32 kcqe_layer
= cnic_get_kcqe_layer_mask(kcqe_op_flag
);
2828 if (unlikely(kcqe_op_flag
& KCQE_RAMROD_COMPLETION
))
2831 while (j
< num_cqes
) {
2832 u32 next_op
= cp
->completed_kcq
[i
+ j
]->kcqe_op_flag
;
2834 if (cnic_get_kcqe_layer_mask(next_op
) != kcqe_layer
)
2837 if (unlikely(next_op
& KCQE_RAMROD_COMPLETION
))
2842 if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L5_RDMA
)
2843 ulp_type
= CNIC_ULP_RDMA
;
2844 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L5_ISCSI
)
2845 ulp_type
= CNIC_ULP_ISCSI
;
2846 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L5_FCOE
)
2847 ulp_type
= CNIC_ULP_FCOE
;
2848 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L4
)
2849 ulp_type
= CNIC_ULP_L4
;
2850 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L2
)
2853 netdev_err(dev
->netdev
, "Unknown type of KCQE(0x%x)\n",
2859 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
2860 if (likely(ulp_ops
)) {
2861 ulp_ops
->indicate_kcqes(cp
->ulp_handle
[ulp_type
],
2862 cp
->completed_kcq
+ i
, j
);
2871 cnic_spq_completion(dev
, DRV_CTL_RET_L5_SPQ_CREDIT_CMD
, comp
);
2874 static int cnic_get_kcqes(struct cnic_dev
*dev
, struct kcq_info
*info
)
2876 struct cnic_local
*cp
= dev
->cnic_priv
;
2877 u16 i
, ri
, hw_prod
, last
;
2879 int kcqe_cnt
= 0, last_cnt
= 0;
2881 i
= ri
= last
= info
->sw_prod_idx
;
2883 hw_prod
= *info
->hw_prod_idx_ptr
;
2884 hw_prod
= info
->hw_idx(hw_prod
);
2886 while ((i
!= hw_prod
) && (kcqe_cnt
< MAX_COMPLETED_KCQE
)) {
2887 kcqe
= &info
->kcq
[KCQ_PG(ri
)][KCQ_IDX(ri
)];
2888 cp
->completed_kcq
[kcqe_cnt
++] = kcqe
;
2889 i
= info
->next_idx(i
);
2890 ri
= i
& MAX_KCQ_IDX
;
2891 if (likely(!(kcqe
->kcqe_op_flag
& KCQE_FLAGS_NEXT
))) {
2892 last_cnt
= kcqe_cnt
;
2897 info
->sw_prod_idx
= last
;
2901 static int cnic_l2_completion(struct cnic_local
*cp
)
2903 u16 hw_cons
, sw_cons
;
2904 struct cnic_uio_dev
*udev
= cp
->udev
;
2905 union eth_rx_cqe
*cqe
, *cqe_ring
= (union eth_rx_cqe
*)
2906 (udev
->l2_ring
+ (2 * BNX2_PAGE_SIZE
));
2910 if (!test_bit(CNIC_F_BNX2X_CLASS
, &cp
->dev
->flags
))
2913 hw_cons
= *cp
->rx_cons_ptr
;
2914 if ((hw_cons
& BNX2X_MAX_RCQ_DESC_CNT
) == BNX2X_MAX_RCQ_DESC_CNT
)
2917 sw_cons
= cp
->rx_cons
;
2918 while (sw_cons
!= hw_cons
) {
2921 cqe
= &cqe_ring
[sw_cons
& BNX2X_MAX_RCQ_DESC_CNT
];
2922 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
2923 if (cqe_fp_flags
& ETH_FAST_PATH_RX_CQE_TYPE
) {
2924 cmd
= le32_to_cpu(cqe
->ramrod_cqe
.conn_and_cmd_data
);
2925 cmd
>>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT
;
2926 if (cmd
== RAMROD_CMD_ID_ETH_CLIENT_SETUP
||
2927 cmd
== RAMROD_CMD_ID_ETH_HALT
)
2930 sw_cons
= BNX2X_NEXT_RCQE(sw_cons
);
2935 static void cnic_chk_pkt_rings(struct cnic_local
*cp
)
2937 u16 rx_cons
, tx_cons
;
2940 if (!test_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
))
2943 rx_cons
= *cp
->rx_cons_ptr
;
2944 tx_cons
= *cp
->tx_cons_ptr
;
2945 if (cp
->tx_cons
!= tx_cons
|| cp
->rx_cons
!= rx_cons
) {
2946 if (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
))
2947 comp
= cnic_l2_completion(cp
);
2949 cp
->tx_cons
= tx_cons
;
2950 cp
->rx_cons
= rx_cons
;
2953 uio_event_notify(&cp
->udev
->cnic_uinfo
);
2956 clear_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
);
2959 static u32
cnic_service_bnx2_queues(struct cnic_dev
*dev
)
2961 struct cnic_local
*cp
= dev
->cnic_priv
;
2962 u32 status_idx
= (u16
) *cp
->kcq1
.status_idx_ptr
;
2965 /* status block index must be read before reading other fields */
2967 cp
->kwq_con_idx
= *cp
->kwq_con_idx_ptr
;
2969 while ((kcqe_cnt
= cnic_get_kcqes(dev
, &cp
->kcq1
))) {
2971 service_kcqes(dev
, kcqe_cnt
);
2973 /* Tell compiler that status_blk fields can change. */
2975 status_idx
= (u16
) *cp
->kcq1
.status_idx_ptr
;
2976 /* status block index must be read first */
2978 cp
->kwq_con_idx
= *cp
->kwq_con_idx_ptr
;
2981 CNIC_WR16(dev
, cp
->kcq1
.io_addr
, cp
->kcq1
.sw_prod_idx
);
2983 cnic_chk_pkt_rings(cp
);
2988 static int cnic_service_bnx2(void *data
, void *status_blk
)
2990 struct cnic_dev
*dev
= data
;
2992 if (unlikely(!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))) {
2993 struct status_block
*sblk
= status_blk
;
2995 return sblk
->status_idx
;
2998 return cnic_service_bnx2_queues(dev
);
3001 static void cnic_service_bnx2_msix(unsigned long data
)
3003 struct cnic_dev
*dev
= (struct cnic_dev
*) data
;
3004 struct cnic_local
*cp
= dev
->cnic_priv
;
3006 cp
->last_status_idx
= cnic_service_bnx2_queues(dev
);
3008 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
3009 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| cp
->last_status_idx
);
3012 static void cnic_doirq(struct cnic_dev
*dev
)
3014 struct cnic_local
*cp
= dev
->cnic_priv
;
3016 if (likely(test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))) {
3017 u16 prod
= cp
->kcq1
.sw_prod_idx
& MAX_KCQ_IDX
;
3019 prefetch(cp
->status_blk
.gen
);
3020 prefetch(&cp
->kcq1
.kcq
[KCQ_PG(prod
)][KCQ_IDX(prod
)]);
3022 tasklet_schedule(&cp
->cnic_irq_task
);
3026 static irqreturn_t
cnic_irq(int irq
, void *dev_instance
)
3028 struct cnic_dev
*dev
= dev_instance
;
3029 struct cnic_local
*cp
= dev
->cnic_priv
;
3039 static inline void cnic_ack_bnx2x_int(struct cnic_dev
*dev
, u8 id
, u8 storm
,
3040 u16 index
, u8 op
, u8 update
)
3042 struct cnic_local
*cp
= dev
->cnic_priv
;
3043 u32 hc_addr
= (HC_REG_COMMAND_REG
+ CNIC_PORT(cp
) * 32 +
3044 COMMAND_REG_INT_ACK
);
3045 struct igu_ack_register igu_ack
;
3047 igu_ack
.status_block_index
= index
;
3048 igu_ack
.sb_id_and_flags
=
3049 ((id
<< IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT
) |
3050 (storm
<< IGU_ACK_REGISTER_STORM_ID_SHIFT
) |
3051 (update
<< IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT
) |
3052 (op
<< IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT
));
3054 CNIC_WR(dev
, hc_addr
, (*(u32
*)&igu_ack
));
3057 static void cnic_ack_igu_sb(struct cnic_dev
*dev
, u8 igu_sb_id
, u8 segment
,
3058 u16 index
, u8 op
, u8 update
)
3060 struct igu_regular cmd_data
;
3061 u32 igu_addr
= BAR_IGU_INTMEM
+ (IGU_CMD_INT_ACK_BASE
+ igu_sb_id
) * 8;
3063 cmd_data
.sb_id_and_flags
=
3064 (index
<< IGU_REGULAR_SB_INDEX_SHIFT
) |
3065 (segment
<< IGU_REGULAR_SEGMENT_ACCESS_SHIFT
) |
3066 (update
<< IGU_REGULAR_BUPDATE_SHIFT
) |
3067 (op
<< IGU_REGULAR_ENABLE_INT_SHIFT
);
3070 CNIC_WR(dev
, igu_addr
, cmd_data
.sb_id_and_flags
);
3073 static void cnic_ack_bnx2x_msix(struct cnic_dev
*dev
)
3075 struct cnic_local
*cp
= dev
->cnic_priv
;
3077 cnic_ack_bnx2x_int(dev
, cp
->bnx2x_igu_sb_id
, CSTORM_ID
, 0,
3078 IGU_INT_DISABLE
, 0);
3081 static void cnic_ack_bnx2x_e2_msix(struct cnic_dev
*dev
)
3083 struct cnic_local
*cp
= dev
->cnic_priv
;
3085 cnic_ack_igu_sb(dev
, cp
->bnx2x_igu_sb_id
, IGU_SEG_ACCESS_DEF
, 0,
3086 IGU_INT_DISABLE
, 0);
3089 static void cnic_arm_bnx2x_msix(struct cnic_dev
*dev
, u32 idx
)
3091 struct cnic_local
*cp
= dev
->cnic_priv
;
3093 cnic_ack_bnx2x_int(dev
, cp
->bnx2x_igu_sb_id
, CSTORM_ID
, idx
,
3097 static void cnic_arm_bnx2x_e2_msix(struct cnic_dev
*dev
, u32 idx
)
3099 struct cnic_local
*cp
= dev
->cnic_priv
;
3101 cnic_ack_igu_sb(dev
, cp
->bnx2x_igu_sb_id
, IGU_SEG_ACCESS_DEF
, idx
,
3105 static u32
cnic_service_bnx2x_kcq(struct cnic_dev
*dev
, struct kcq_info
*info
)
3107 u32 last_status
= *info
->status_idx_ptr
;
3110 /* status block index must be read before reading the KCQ */
3112 while ((kcqe_cnt
= cnic_get_kcqes(dev
, info
))) {
3114 service_kcqes(dev
, kcqe_cnt
);
3116 /* Tell compiler that sblk fields can change. */
3119 last_status
= *info
->status_idx_ptr
;
3120 /* status block index must be read before reading the KCQ */
3126 static void cnic_service_bnx2x_bh(unsigned long data
)
3128 struct cnic_dev
*dev
= (struct cnic_dev
*) data
;
3129 struct cnic_local
*cp
= dev
->cnic_priv
;
3130 u32 status_idx
, new_status_idx
;
3132 if (unlikely(!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)))
3136 status_idx
= cnic_service_bnx2x_kcq(dev
, &cp
->kcq1
);
3138 CNIC_WR16(dev
, cp
->kcq1
.io_addr
,
3139 cp
->kcq1
.sw_prod_idx
+ MAX_KCQ_IDX
);
3141 if (cp
->ethdev
->drv_state
& CNIC_DRV_STATE_NO_FCOE
) {
3142 cp
->arm_int(dev
, status_idx
);
3146 new_status_idx
= cnic_service_bnx2x_kcq(dev
, &cp
->kcq2
);
3148 if (new_status_idx
!= status_idx
)
3151 CNIC_WR16(dev
, cp
->kcq2
.io_addr
, cp
->kcq2
.sw_prod_idx
+
3154 cnic_ack_igu_sb(dev
, cp
->bnx2x_igu_sb_id
, IGU_SEG_ACCESS_DEF
,
3155 status_idx
, IGU_INT_ENABLE
, 1);
3161 static int cnic_service_bnx2x(void *data
, void *status_blk
)
3163 struct cnic_dev
*dev
= data
;
3164 struct cnic_local
*cp
= dev
->cnic_priv
;
3166 if (!(cp
->ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
3169 cnic_chk_pkt_rings(cp
);
3174 static void cnic_ulp_stop_one(struct cnic_local
*cp
, int if_type
)
3176 struct cnic_ulp_ops
*ulp_ops
;
3178 if (if_type
== CNIC_ULP_ISCSI
)
3179 cnic_send_nlmsg(cp
, ISCSI_KEVENT_IF_DOWN
, NULL
);
3181 mutex_lock(&cnic_lock
);
3182 ulp_ops
= rcu_dereference_protected(cp
->ulp_ops
[if_type
],
3183 lockdep_is_held(&cnic_lock
));
3185 mutex_unlock(&cnic_lock
);
3188 set_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
3189 mutex_unlock(&cnic_lock
);
3191 if (test_and_clear_bit(ULP_F_START
, &cp
->ulp_flags
[if_type
]))
3192 ulp_ops
->cnic_stop(cp
->ulp_handle
[if_type
]);
3194 clear_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
3197 static void cnic_ulp_stop(struct cnic_dev
*dev
)
3199 struct cnic_local
*cp
= dev
->cnic_priv
;
3202 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++)
3203 cnic_ulp_stop_one(cp
, if_type
);
3206 static void cnic_ulp_start(struct cnic_dev
*dev
)
3208 struct cnic_local
*cp
= dev
->cnic_priv
;
3211 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
3212 struct cnic_ulp_ops
*ulp_ops
;
3214 mutex_lock(&cnic_lock
);
3215 ulp_ops
= rcu_dereference_protected(cp
->ulp_ops
[if_type
],
3216 lockdep_is_held(&cnic_lock
));
3217 if (!ulp_ops
|| !ulp_ops
->cnic_start
) {
3218 mutex_unlock(&cnic_lock
);
3221 set_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
3222 mutex_unlock(&cnic_lock
);
3224 if (!test_and_set_bit(ULP_F_START
, &cp
->ulp_flags
[if_type
]))
3225 ulp_ops
->cnic_start(cp
->ulp_handle
[if_type
]);
3227 clear_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
3231 static int cnic_copy_ulp_stats(struct cnic_dev
*dev
, int ulp_type
)
3233 struct cnic_local
*cp
= dev
->cnic_priv
;
3234 struct cnic_ulp_ops
*ulp_ops
;
3237 mutex_lock(&cnic_lock
);
3238 ulp_ops
= cnic_ulp_tbl_prot(ulp_type
);
3239 if (ulp_ops
&& ulp_ops
->cnic_get_stats
)
3240 rc
= ulp_ops
->cnic_get_stats(cp
->ulp_handle
[ulp_type
]);
3243 mutex_unlock(&cnic_lock
);
3247 static int cnic_ctl(void *data
, struct cnic_ctl_info
*info
)
3249 struct cnic_dev
*dev
= data
;
3250 int ulp_type
= CNIC_ULP_ISCSI
;
3252 switch (info
->cmd
) {
3253 case CNIC_CTL_STOP_CMD
:
3261 case CNIC_CTL_START_CMD
:
3264 if (!cnic_start_hw(dev
))
3265 cnic_ulp_start(dev
);
3269 case CNIC_CTL_STOP_ISCSI_CMD
: {
3270 struct cnic_local
*cp
= dev
->cnic_priv
;
3271 set_bit(CNIC_LCL_FL_STOP_ISCSI
, &cp
->cnic_local_flags
);
3272 queue_delayed_work(cnic_wq
, &cp
->delete_task
, 0);
3275 case CNIC_CTL_COMPLETION_CMD
: {
3276 struct cnic_ctl_completion
*comp
= &info
->data
.comp
;
3277 u32 cid
= BNX2X_SW_CID(comp
->cid
);
3279 struct cnic_local
*cp
= dev
->cnic_priv
;
3281 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
3284 if (cnic_get_l5_cid(cp
, cid
, &l5_cid
) == 0) {
3285 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
3287 if (unlikely(comp
->error
)) {
3288 set_bit(CTX_FL_CID_ERROR
, &ctx
->ctx_flags
);
3289 netdev_err(dev
->netdev
,
3290 "CID %x CFC delete comp error %x\n",
3295 wake_up(&ctx
->waitq
);
3299 case CNIC_CTL_FCOE_STATS_GET_CMD
:
3300 ulp_type
= CNIC_ULP_FCOE
;
3302 case CNIC_CTL_ISCSI_STATS_GET_CMD
:
3304 cnic_copy_ulp_stats(dev
, ulp_type
);
3314 static void cnic_ulp_init(struct cnic_dev
*dev
)
3317 struct cnic_local
*cp
= dev
->cnic_priv
;
3319 for (i
= 0; i
< MAX_CNIC_ULP_TYPE_EXT
; i
++) {
3320 struct cnic_ulp_ops
*ulp_ops
;
3322 mutex_lock(&cnic_lock
);
3323 ulp_ops
= cnic_ulp_tbl_prot(i
);
3324 if (!ulp_ops
|| !ulp_ops
->cnic_init
) {
3325 mutex_unlock(&cnic_lock
);
3329 mutex_unlock(&cnic_lock
);
3331 if (!test_and_set_bit(ULP_F_INIT
, &cp
->ulp_flags
[i
]))
3332 ulp_ops
->cnic_init(dev
);
3338 static void cnic_ulp_exit(struct cnic_dev
*dev
)
3341 struct cnic_local
*cp
= dev
->cnic_priv
;
3343 for (i
= 0; i
< MAX_CNIC_ULP_TYPE_EXT
; i
++) {
3344 struct cnic_ulp_ops
*ulp_ops
;
3346 mutex_lock(&cnic_lock
);
3347 ulp_ops
= cnic_ulp_tbl_prot(i
);
3348 if (!ulp_ops
|| !ulp_ops
->cnic_exit
) {
3349 mutex_unlock(&cnic_lock
);
3353 mutex_unlock(&cnic_lock
);
3355 if (test_and_clear_bit(ULP_F_INIT
, &cp
->ulp_flags
[i
]))
3356 ulp_ops
->cnic_exit(dev
);
3362 static int cnic_cm_offload_pg(struct cnic_sock
*csk
)
3364 struct cnic_dev
*dev
= csk
->dev
;
3365 struct l4_kwq_offload_pg
*l4kwqe
;
3366 struct kwqe
*wqes
[1];
3368 l4kwqe
= (struct l4_kwq_offload_pg
*) &csk
->kwqe1
;
3369 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
3370 wqes
[0] = (struct kwqe
*) l4kwqe
;
3372 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_OFFLOAD_PG
;
3374 L4_LAYER_CODE
<< L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT
;
3375 l4kwqe
->l2hdr_nbytes
= ETH_HLEN
;
3377 l4kwqe
->da0
= csk
->ha
[0];
3378 l4kwqe
->da1
= csk
->ha
[1];
3379 l4kwqe
->da2
= csk
->ha
[2];
3380 l4kwqe
->da3
= csk
->ha
[3];
3381 l4kwqe
->da4
= csk
->ha
[4];
3382 l4kwqe
->da5
= csk
->ha
[5];
3384 l4kwqe
->sa0
= dev
->mac_addr
[0];
3385 l4kwqe
->sa1
= dev
->mac_addr
[1];
3386 l4kwqe
->sa2
= dev
->mac_addr
[2];
3387 l4kwqe
->sa3
= dev
->mac_addr
[3];
3388 l4kwqe
->sa4
= dev
->mac_addr
[4];
3389 l4kwqe
->sa5
= dev
->mac_addr
[5];
3391 l4kwqe
->etype
= ETH_P_IP
;
3392 l4kwqe
->ipid_start
= DEF_IPID_START
;
3393 l4kwqe
->host_opaque
= csk
->l5_cid
;
3396 l4kwqe
->pg_flags
|= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING
;
3397 l4kwqe
->vlan_tag
= csk
->vlan_id
;
3398 l4kwqe
->l2hdr_nbytes
+= 4;
3401 return dev
->submit_kwqes(dev
, wqes
, 1);
3404 static int cnic_cm_update_pg(struct cnic_sock
*csk
)
3406 struct cnic_dev
*dev
= csk
->dev
;
3407 struct l4_kwq_update_pg
*l4kwqe
;
3408 struct kwqe
*wqes
[1];
3410 l4kwqe
= (struct l4_kwq_update_pg
*) &csk
->kwqe1
;
3411 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
3412 wqes
[0] = (struct kwqe
*) l4kwqe
;
3414 l4kwqe
->opcode
= L4_KWQE_OPCODE_VALUE_UPDATE_PG
;
3416 L4_LAYER_CODE
<< L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT
;
3417 l4kwqe
->pg_cid
= csk
->pg_cid
;
3419 l4kwqe
->da0
= csk
->ha
[0];
3420 l4kwqe
->da1
= csk
->ha
[1];
3421 l4kwqe
->da2
= csk
->ha
[2];
3422 l4kwqe
->da3
= csk
->ha
[3];
3423 l4kwqe
->da4
= csk
->ha
[4];
3424 l4kwqe
->da5
= csk
->ha
[5];
3426 l4kwqe
->pg_host_opaque
= csk
->l5_cid
;
3427 l4kwqe
->pg_valids
= L4_KWQ_UPDATE_PG_VALIDS_DA
;
3429 return dev
->submit_kwqes(dev
, wqes
, 1);
3432 static int cnic_cm_upload_pg(struct cnic_sock
*csk
)
3434 struct cnic_dev
*dev
= csk
->dev
;
3435 struct l4_kwq_upload
*l4kwqe
;
3436 struct kwqe
*wqes
[1];
3438 l4kwqe
= (struct l4_kwq_upload
*) &csk
->kwqe1
;
3439 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
3440 wqes
[0] = (struct kwqe
*) l4kwqe
;
3442 l4kwqe
->opcode
= L4_KWQE_OPCODE_VALUE_UPLOAD_PG
;
3444 L4_LAYER_CODE
<< L4_KWQ_UPLOAD_LAYER_CODE_SHIFT
;
3445 l4kwqe
->cid
= csk
->pg_cid
;
3447 return dev
->submit_kwqes(dev
, wqes
, 1);
3450 static int cnic_cm_conn_req(struct cnic_sock
*csk
)
3452 struct cnic_dev
*dev
= csk
->dev
;
3453 struct l4_kwq_connect_req1
*l4kwqe1
;
3454 struct l4_kwq_connect_req2
*l4kwqe2
;
3455 struct l4_kwq_connect_req3
*l4kwqe3
;
3456 struct kwqe
*wqes
[3];
3460 l4kwqe1
= (struct l4_kwq_connect_req1
*) &csk
->kwqe1
;
3461 l4kwqe2
= (struct l4_kwq_connect_req2
*) &csk
->kwqe2
;
3462 l4kwqe3
= (struct l4_kwq_connect_req3
*) &csk
->kwqe3
;
3463 memset(l4kwqe1
, 0, sizeof(*l4kwqe1
));
3464 memset(l4kwqe2
, 0, sizeof(*l4kwqe2
));
3465 memset(l4kwqe3
, 0, sizeof(*l4kwqe3
));
3467 l4kwqe3
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT3
;
3469 L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT
;
3470 l4kwqe3
->ka_timeout
= csk
->ka_timeout
;
3471 l4kwqe3
->ka_interval
= csk
->ka_interval
;
3472 l4kwqe3
->ka_max_probe_count
= csk
->ka_max_probe_count
;
3473 l4kwqe3
->tos
= csk
->tos
;
3474 l4kwqe3
->ttl
= csk
->ttl
;
3475 l4kwqe3
->snd_seq_scale
= csk
->snd_seq_scale
;
3476 l4kwqe3
->pmtu
= csk
->mtu
;
3477 l4kwqe3
->rcv_buf
= csk
->rcv_buf
;
3478 l4kwqe3
->snd_buf
= csk
->snd_buf
;
3479 l4kwqe3
->seed
= csk
->seed
;
3481 wqes
[0] = (struct kwqe
*) l4kwqe1
;
3482 if (test_bit(SK_F_IPV6
, &csk
->flags
)) {
3483 wqes
[1] = (struct kwqe
*) l4kwqe2
;
3484 wqes
[2] = (struct kwqe
*) l4kwqe3
;
3487 l4kwqe1
->conn_flags
= L4_KWQ_CONNECT_REQ1_IP_V6
;
3488 l4kwqe2
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT2
;
3490 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT
|
3491 L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT
;
3492 l4kwqe2
->src_ip_v6_2
= be32_to_cpu(csk
->src_ip
[1]);
3493 l4kwqe2
->src_ip_v6_3
= be32_to_cpu(csk
->src_ip
[2]);
3494 l4kwqe2
->src_ip_v6_4
= be32_to_cpu(csk
->src_ip
[3]);
3495 l4kwqe2
->dst_ip_v6_2
= be32_to_cpu(csk
->dst_ip
[1]);
3496 l4kwqe2
->dst_ip_v6_3
= be32_to_cpu(csk
->dst_ip
[2]);
3497 l4kwqe2
->dst_ip_v6_4
= be32_to_cpu(csk
->dst_ip
[3]);
3498 l4kwqe3
->mss
= l4kwqe3
->pmtu
- sizeof(struct ipv6hdr
) -
3499 sizeof(struct tcphdr
);
3501 wqes
[1] = (struct kwqe
*) l4kwqe3
;
3502 l4kwqe3
->mss
= l4kwqe3
->pmtu
- sizeof(struct iphdr
) -
3503 sizeof(struct tcphdr
);
3506 l4kwqe1
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT1
;
3508 (L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT
) |
3509 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT
;
3510 l4kwqe1
->cid
= csk
->cid
;
3511 l4kwqe1
->pg_cid
= csk
->pg_cid
;
3512 l4kwqe1
->src_ip
= be32_to_cpu(csk
->src_ip
[0]);
3513 l4kwqe1
->dst_ip
= be32_to_cpu(csk
->dst_ip
[0]);
3514 l4kwqe1
->src_port
= be16_to_cpu(csk
->src_port
);
3515 l4kwqe1
->dst_port
= be16_to_cpu(csk
->dst_port
);
3516 if (csk
->tcp_flags
& SK_TCP_NO_DELAY_ACK
)
3517 tcp_flags
|= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK
;
3518 if (csk
->tcp_flags
& SK_TCP_KEEP_ALIVE
)
3519 tcp_flags
|= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE
;
3520 if (csk
->tcp_flags
& SK_TCP_NAGLE
)
3521 tcp_flags
|= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE
;
3522 if (csk
->tcp_flags
& SK_TCP_TIMESTAMP
)
3523 tcp_flags
|= L4_KWQ_CONNECT_REQ1_TIME_STAMP
;
3524 if (csk
->tcp_flags
& SK_TCP_SACK
)
3525 tcp_flags
|= L4_KWQ_CONNECT_REQ1_SACK
;
3526 if (csk
->tcp_flags
& SK_TCP_SEG_SCALING
)
3527 tcp_flags
|= L4_KWQ_CONNECT_REQ1_SEG_SCALING
;
3529 l4kwqe1
->tcp_flags
= tcp_flags
;
3531 return dev
->submit_kwqes(dev
, wqes
, num_wqes
);
3534 static int cnic_cm_close_req(struct cnic_sock
*csk
)
3536 struct cnic_dev
*dev
= csk
->dev
;
3537 struct l4_kwq_close_req
*l4kwqe
;
3538 struct kwqe
*wqes
[1];
3540 l4kwqe
= (struct l4_kwq_close_req
*) &csk
->kwqe2
;
3541 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
3542 wqes
[0] = (struct kwqe
*) l4kwqe
;
3544 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_CLOSE
;
3545 l4kwqe
->flags
= L4_LAYER_CODE
<< L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT
;
3546 l4kwqe
->cid
= csk
->cid
;
3548 return dev
->submit_kwqes(dev
, wqes
, 1);
3551 static int cnic_cm_abort_req(struct cnic_sock
*csk
)
3553 struct cnic_dev
*dev
= csk
->dev
;
3554 struct l4_kwq_reset_req
*l4kwqe
;
3555 struct kwqe
*wqes
[1];
3557 l4kwqe
= (struct l4_kwq_reset_req
*) &csk
->kwqe2
;
3558 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
3559 wqes
[0] = (struct kwqe
*) l4kwqe
;
3561 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_RESET
;
3562 l4kwqe
->flags
= L4_LAYER_CODE
<< L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT
;
3563 l4kwqe
->cid
= csk
->cid
;
3565 return dev
->submit_kwqes(dev
, wqes
, 1);
3568 static int cnic_cm_create(struct cnic_dev
*dev
, int ulp_type
, u32 cid
,
3569 u32 l5_cid
, struct cnic_sock
**csk
, void *context
)
3571 struct cnic_local
*cp
= dev
->cnic_priv
;
3572 struct cnic_sock
*csk1
;
3574 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
3578 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
3580 if (test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
))
3584 csk1
= &cp
->csk_tbl
[l5_cid
];
3585 if (atomic_read(&csk1
->ref_count
))
3588 if (test_and_set_bit(SK_F_INUSE
, &csk1
->flags
))
3593 csk1
->l5_cid
= l5_cid
;
3594 csk1
->ulp_type
= ulp_type
;
3595 csk1
->context
= context
;
3597 csk1
->ka_timeout
= DEF_KA_TIMEOUT
;
3598 csk1
->ka_interval
= DEF_KA_INTERVAL
;
3599 csk1
->ka_max_probe_count
= DEF_KA_MAX_PROBE_COUNT
;
3600 csk1
->tos
= DEF_TOS
;
3601 csk1
->ttl
= DEF_TTL
;
3602 csk1
->snd_seq_scale
= DEF_SND_SEQ_SCALE
;
3603 csk1
->rcv_buf
= DEF_RCV_BUF
;
3604 csk1
->snd_buf
= DEF_SND_BUF
;
3605 csk1
->seed
= DEF_SEED
;
3611 static void cnic_cm_cleanup(struct cnic_sock
*csk
)
3613 if (csk
->src_port
) {
3614 struct cnic_dev
*dev
= csk
->dev
;
3615 struct cnic_local
*cp
= dev
->cnic_priv
;
3617 cnic_free_id(&cp
->csk_port_tbl
, be16_to_cpu(csk
->src_port
));
3622 static void cnic_close_conn(struct cnic_sock
*csk
)
3624 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
)) {
3625 cnic_cm_upload_pg(csk
);
3626 clear_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
);
3628 cnic_cm_cleanup(csk
);
3631 static int cnic_cm_destroy(struct cnic_sock
*csk
)
3633 if (!cnic_in_use(csk
))
3637 clear_bit(SK_F_INUSE
, &csk
->flags
);
3638 smp_mb__after_clear_bit();
3639 while (atomic_read(&csk
->ref_count
) != 1)
3641 cnic_cm_cleanup(csk
);
3648 static inline u16
cnic_get_vlan(struct net_device
*dev
,
3649 struct net_device
**vlan_dev
)
3651 if (dev
->priv_flags
& IFF_802_1Q_VLAN
) {
3652 *vlan_dev
= vlan_dev_real_dev(dev
);
3653 return vlan_dev_vlan_id(dev
);
3659 static int cnic_get_v4_route(struct sockaddr_in
*dst_addr
,
3660 struct dst_entry
**dst
)
3662 #if defined(CONFIG_INET)
3665 rt
= ip_route_output(&init_net
, dst_addr
->sin_addr
.s_addr
, 0, 0, 0);
3672 return -ENETUNREACH
;
3676 static int cnic_get_v6_route(struct sockaddr_in6
*dst_addr
,
3677 struct dst_entry
**dst
)
3679 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
3682 memset(&fl6
, 0, sizeof(fl6
));
3683 fl6
.daddr
= dst_addr
->sin6_addr
;
3684 if (ipv6_addr_type(&fl6
.daddr
) & IPV6_ADDR_LINKLOCAL
)
3685 fl6
.flowi6_oif
= dst_addr
->sin6_scope_id
;
3687 *dst
= ip6_route_output(&init_net
, NULL
, &fl6
);
3688 if ((*dst
)->error
) {
3691 return -ENETUNREACH
;
3696 return -ENETUNREACH
;
3699 static struct cnic_dev
*cnic_cm_select_dev(struct sockaddr_in
*dst_addr
,
3702 struct cnic_dev
*dev
= NULL
;
3703 struct dst_entry
*dst
;
3704 struct net_device
*netdev
= NULL
;
3705 int err
= -ENETUNREACH
;
3707 if (dst_addr
->sin_family
== AF_INET
)
3708 err
= cnic_get_v4_route(dst_addr
, &dst
);
3709 else if (dst_addr
->sin_family
== AF_INET6
) {
3710 struct sockaddr_in6
*dst_addr6
=
3711 (struct sockaddr_in6
*) dst_addr
;
3713 err
= cnic_get_v6_route(dst_addr6
, &dst
);
3723 cnic_get_vlan(dst
->dev
, &netdev
);
3725 dev
= cnic_from_netdev(netdev
);
3734 static int cnic_resolve_addr(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
3736 struct cnic_dev
*dev
= csk
->dev
;
3737 struct cnic_local
*cp
= dev
->cnic_priv
;
3739 return cnic_send_nlmsg(cp
, ISCSI_KEVENT_PATH_REQ
, csk
);
3742 static int cnic_get_route(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
3744 struct cnic_dev
*dev
= csk
->dev
;
3745 struct cnic_local
*cp
= dev
->cnic_priv
;
3747 struct dst_entry
*dst
= NULL
;
3748 struct net_device
*realdev
;
3752 if (saddr
->local
.v6
.sin6_family
== AF_INET6
&&
3753 saddr
->remote
.v6
.sin6_family
== AF_INET6
)
3755 else if (saddr
->local
.v4
.sin_family
== AF_INET
&&
3756 saddr
->remote
.v4
.sin_family
== AF_INET
)
3761 clear_bit(SK_F_IPV6
, &csk
->flags
);
3764 set_bit(SK_F_IPV6
, &csk
->flags
);
3765 cnic_get_v6_route(&saddr
->remote
.v6
, &dst
);
3767 memcpy(&csk
->dst_ip
[0], &saddr
->remote
.v6
.sin6_addr
,
3768 sizeof(struct in6_addr
));
3769 csk
->dst_port
= saddr
->remote
.v6
.sin6_port
;
3770 local_port
= saddr
->local
.v6
.sin6_port
;
3773 cnic_get_v4_route(&saddr
->remote
.v4
, &dst
);
3775 csk
->dst_ip
[0] = saddr
->remote
.v4
.sin_addr
.s_addr
;
3776 csk
->dst_port
= saddr
->remote
.v4
.sin_port
;
3777 local_port
= saddr
->local
.v4
.sin_port
;
3781 csk
->mtu
= dev
->netdev
->mtu
;
3782 if (dst
&& dst
->dev
) {
3783 u16 vlan
= cnic_get_vlan(dst
->dev
, &realdev
);
3784 if (realdev
== dev
->netdev
) {
3785 csk
->vlan_id
= vlan
;
3786 csk
->mtu
= dst_mtu(dst
);
3790 port_id
= be16_to_cpu(local_port
);
3791 if (port_id
>= CNIC_LOCAL_PORT_MIN
&&
3792 port_id
< CNIC_LOCAL_PORT_MAX
) {
3793 if (cnic_alloc_id(&cp
->csk_port_tbl
, port_id
))
3799 port_id
= cnic_alloc_new_id(&cp
->csk_port_tbl
);
3800 if (port_id
== -1) {
3804 local_port
= cpu_to_be16(port_id
);
3806 csk
->src_port
= local_port
;
3813 static void cnic_init_csk_state(struct cnic_sock
*csk
)
3816 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3817 clear_bit(SK_F_CLOSING
, &csk
->flags
);
3820 static int cnic_cm_connect(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
3822 struct cnic_local
*cp
= csk
->dev
->cnic_priv
;
3825 if (cp
->ethdev
->drv_state
& CNIC_DRV_STATE_NO_ISCSI
)
3828 if (!cnic_in_use(csk
))
3831 if (test_and_set_bit(SK_F_CONNECT_START
, &csk
->flags
))
3834 cnic_init_csk_state(csk
);
3836 err
= cnic_get_route(csk
, saddr
);
3840 err
= cnic_resolve_addr(csk
, saddr
);
3845 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
3849 static int cnic_cm_abort(struct cnic_sock
*csk
)
3851 struct cnic_local
*cp
= csk
->dev
->cnic_priv
;
3852 u32 opcode
= L4_KCQE_OPCODE_VALUE_RESET_COMP
;
3854 if (!cnic_in_use(csk
))
3857 if (cnic_abort_prep(csk
))
3858 return cnic_cm_abort_req(csk
);
3860 /* Getting here means that we haven't started connect, or
3861 * connect was not successful, or it has been reset by the target.
3864 cp
->close_conn(csk
, opcode
);
3865 if (csk
->state
!= opcode
) {
3866 /* Wait for remote reset sequence to complete */
3867 while (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
))
3876 static int cnic_cm_close(struct cnic_sock
*csk
)
3878 if (!cnic_in_use(csk
))
3881 if (cnic_close_prep(csk
)) {
3882 csk
->state
= L4_KCQE_OPCODE_VALUE_CLOSE_COMP
;
3883 return cnic_cm_close_req(csk
);
3885 /* Wait for remote reset sequence to complete */
3886 while (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
))
3894 static void cnic_cm_upcall(struct cnic_local
*cp
, struct cnic_sock
*csk
,
3897 struct cnic_ulp_ops
*ulp_ops
;
3898 int ulp_type
= csk
->ulp_type
;
3901 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
3903 if (opcode
== L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
)
3904 ulp_ops
->cm_connect_complete(csk
);
3905 else if (opcode
== L4_KCQE_OPCODE_VALUE_CLOSE_COMP
)
3906 ulp_ops
->cm_close_complete(csk
);
3907 else if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
)
3908 ulp_ops
->cm_remote_abort(csk
);
3909 else if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_COMP
)
3910 ulp_ops
->cm_abort_complete(csk
);
3911 else if (opcode
== L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED
)
3912 ulp_ops
->cm_remote_close(csk
);
3917 static int cnic_cm_set_pg(struct cnic_sock
*csk
)
3919 if (cnic_offld_prep(csk
)) {
3920 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
))
3921 cnic_cm_update_pg(csk
);
3923 cnic_cm_offload_pg(csk
);
3928 static void cnic_cm_process_offld_pg(struct cnic_dev
*dev
, struct l4_kcq
*kcqe
)
3930 struct cnic_local
*cp
= dev
->cnic_priv
;
3931 u32 l5_cid
= kcqe
->pg_host_opaque
;
3932 u8 opcode
= kcqe
->op_code
;
3933 struct cnic_sock
*csk
= &cp
->csk_tbl
[l5_cid
];
3936 if (!cnic_in_use(csk
))
3939 if (opcode
== L4_KCQE_OPCODE_VALUE_UPDATE_PG
) {
3940 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3943 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3944 if (kcqe
->status
== L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL
) {
3945 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3946 cnic_cm_upcall(cp
, csk
,
3947 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
);
3951 csk
->pg_cid
= kcqe
->pg_cid
;
3952 set_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
);
3953 cnic_cm_conn_req(csk
);
3959 static void cnic_process_fcoe_term_conn(struct cnic_dev
*dev
, struct kcqe
*kcqe
)
3961 struct cnic_local
*cp
= dev
->cnic_priv
;
3962 struct fcoe_kcqe
*fc_kcqe
= (struct fcoe_kcqe
*) kcqe
;
3963 u32 l5_cid
= fc_kcqe
->fcoe_conn_id
+ BNX2X_FCOE_L5_CID_BASE
;
3964 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
3966 ctx
->timestamp
= jiffies
;
3968 wake_up(&ctx
->waitq
);
3971 static void cnic_cm_process_kcqe(struct cnic_dev
*dev
, struct kcqe
*kcqe
)
3973 struct cnic_local
*cp
= dev
->cnic_priv
;
3974 struct l4_kcq
*l4kcqe
= (struct l4_kcq
*) kcqe
;
3975 u8 opcode
= l4kcqe
->op_code
;
3977 struct cnic_sock
*csk
;
3979 if (opcode
== FCOE_RAMROD_CMD_ID_TERMINATE_CONN
) {
3980 cnic_process_fcoe_term_conn(dev
, kcqe
);
3983 if (opcode
== L4_KCQE_OPCODE_VALUE_OFFLOAD_PG
||
3984 opcode
== L4_KCQE_OPCODE_VALUE_UPDATE_PG
) {
3985 cnic_cm_process_offld_pg(dev
, l4kcqe
);
3989 l5_cid
= l4kcqe
->conn_id
;
3991 l5_cid
= l4kcqe
->cid
;
3992 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
3995 csk
= &cp
->csk_tbl
[l5_cid
];
3998 if (!cnic_in_use(csk
)) {
4004 case L5CM_RAMROD_CMD_ID_TCP_CONNECT
:
4005 if (l4kcqe
->status
!= 0) {
4006 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
4007 cnic_cm_upcall(cp
, csk
,
4008 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
);
4011 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
:
4012 if (l4kcqe
->status
== 0)
4013 set_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
);
4014 else if (l4kcqe
->status
==
4015 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR
)
4016 set_bit(SK_F_HW_ERR
, &csk
->flags
);
4018 smp_mb__before_clear_bit();
4019 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
4020 cnic_cm_upcall(cp
, csk
, opcode
);
4023 case L5CM_RAMROD_CMD_ID_CLOSE
:
4024 if (l4kcqe
->status
!= 0) {
4025 netdev_warn(dev
->netdev
, "RAMROD CLOSE compl with "
4026 "status 0x%x\n", l4kcqe
->status
);
4027 opcode
= L4_KCQE_OPCODE_VALUE_CLOSE_COMP
;
4032 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
:
4033 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP
:
4034 case L4_KCQE_OPCODE_VALUE_RESET_COMP
:
4035 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE
:
4036 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
:
4037 if (l4kcqe
->status
== L4_KCQE_COMPLETION_STATUS_PARITY_ERROR
)
4038 set_bit(SK_F_HW_ERR
, &csk
->flags
);
4040 cp
->close_conn(csk
, opcode
);
4043 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED
:
4044 /* after we already sent CLOSE_REQ */
4045 if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
) &&
4046 !test_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
) &&
4047 csk
->state
== L4_KCQE_OPCODE_VALUE_CLOSE_COMP
)
4048 cp
->close_conn(csk
, L4_KCQE_OPCODE_VALUE_RESET_COMP
);
4050 cnic_cm_upcall(cp
, csk
, opcode
);
4056 static void cnic_cm_indicate_kcqe(void *data
, struct kcqe
*kcqe
[], u32 num
)
4058 struct cnic_dev
*dev
= data
;
4061 for (i
= 0; i
< num
; i
++)
4062 cnic_cm_process_kcqe(dev
, kcqe
[i
]);
4065 static struct cnic_ulp_ops cm_ulp_ops
= {
4066 .indicate_kcqes
= cnic_cm_indicate_kcqe
,
4069 static void cnic_cm_free_mem(struct cnic_dev
*dev
)
4071 struct cnic_local
*cp
= dev
->cnic_priv
;
4075 cnic_free_id_tbl(&cp
->csk_port_tbl
);
4078 static int cnic_cm_alloc_mem(struct cnic_dev
*dev
)
4080 struct cnic_local
*cp
= dev
->cnic_priv
;
4083 cp
->csk_tbl
= kzalloc(sizeof(struct cnic_sock
) * MAX_CM_SK_TBL_SZ
,
4088 port_id
= prandom_u32();
4089 port_id
%= CNIC_LOCAL_PORT_RANGE
;
4090 if (cnic_init_id_tbl(&cp
->csk_port_tbl
, CNIC_LOCAL_PORT_RANGE
,
4091 CNIC_LOCAL_PORT_MIN
, port_id
)) {
4092 cnic_cm_free_mem(dev
);
4098 static int cnic_ready_to_close(struct cnic_sock
*csk
, u32 opcode
)
4100 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
4101 /* Unsolicited RESET_COMP or RESET_RECEIVED */
4102 opcode
= L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
;
4103 csk
->state
= opcode
;
4106 /* 1. If event opcode matches the expected event in csk->state
4107 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
4109 * 3. If the expected event is 0, meaning the connection was never
4110 * never established, we accept the opcode from cm_abort.
4112 if (opcode
== csk
->state
|| csk
->state
== 0 ||
4113 csk
->state
== L4_KCQE_OPCODE_VALUE_CLOSE_COMP
||
4114 csk
->state
== L4_KCQE_OPCODE_VALUE_RESET_COMP
) {
4115 if (!test_and_set_bit(SK_F_CLOSING
, &csk
->flags
)) {
4116 if (csk
->state
== 0)
4117 csk
->state
= opcode
;
4124 static void cnic_close_bnx2_conn(struct cnic_sock
*csk
, u32 opcode
)
4126 struct cnic_dev
*dev
= csk
->dev
;
4127 struct cnic_local
*cp
= dev
->cnic_priv
;
4129 if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
) {
4130 cnic_cm_upcall(cp
, csk
, opcode
);
4134 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
4135 cnic_close_conn(csk
);
4136 csk
->state
= opcode
;
4137 cnic_cm_upcall(cp
, csk
, opcode
);
4140 static void cnic_cm_stop_bnx2_hw(struct cnic_dev
*dev
)
4144 static int cnic_cm_init_bnx2_hw(struct cnic_dev
*dev
)
4148 seed
= prandom_u32();
4149 cnic_ctx_wr(dev
, 45, 0, seed
);
4153 static void cnic_close_bnx2x_conn(struct cnic_sock
*csk
, u32 opcode
)
4155 struct cnic_dev
*dev
= csk
->dev
;
4156 struct cnic_local
*cp
= dev
->cnic_priv
;
4157 struct cnic_context
*ctx
= &cp
->ctx_tbl
[csk
->l5_cid
];
4158 union l5cm_specific_data l5_data
;
4160 int close_complete
= 0;
4163 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
:
4164 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP
:
4165 case L4_KCQE_OPCODE_VALUE_RESET_COMP
:
4166 if (cnic_ready_to_close(csk
, opcode
)) {
4167 if (test_bit(SK_F_HW_ERR
, &csk
->flags
))
4169 else if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
))
4170 cmd
= L5CM_RAMROD_CMD_ID_SEARCHER_DELETE
;
4175 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE
:
4176 cmd
= L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
;
4178 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
:
4183 memset(&l5_data
, 0, sizeof(l5_data
));
4185 cnic_submit_kwqe_16(dev
, cmd
, csk
->cid
, ISCSI_CONNECTION_TYPE
,
4187 } else if (close_complete
) {
4188 ctx
->timestamp
= jiffies
;
4189 cnic_close_conn(csk
);
4190 cnic_cm_upcall(cp
, csk
, csk
->state
);
4194 static void cnic_cm_stop_bnx2x_hw(struct cnic_dev
*dev
)
4196 struct cnic_local
*cp
= dev
->cnic_priv
;
4201 if (!netif_running(dev
->netdev
))
4204 cnic_bnx2x_delete_wait(dev
, 0);
4206 cancel_delayed_work(&cp
->delete_task
);
4207 flush_workqueue(cnic_wq
);
4209 if (atomic_read(&cp
->iscsi_conn
) != 0)
4210 netdev_warn(dev
->netdev
, "%d iSCSI connections not destroyed\n",
4211 atomic_read(&cp
->iscsi_conn
));
4214 static int cnic_cm_init_bnx2x_hw(struct cnic_dev
*dev
)
4216 struct cnic_local
*cp
= dev
->cnic_priv
;
4217 struct bnx2x
*bp
= netdev_priv(dev
->netdev
);
4218 u32 pfid
= cp
->pfid
;
4219 u32 port
= CNIC_PORT(cp
);
4221 cnic_init_bnx2x_mac(dev
);
4222 cnic_bnx2x_set_tcp_timestamp(dev
, 1);
4224 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+
4225 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid
), 0);
4227 CNIC_WR(dev
, BAR_XSTRORM_INTMEM
+
4228 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port
), 1);
4229 CNIC_WR(dev
, BAR_XSTRORM_INTMEM
+
4230 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port
),
4233 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
4234 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid
), DEF_TTL
);
4235 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
4236 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid
), DEF_TOS
);
4237 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
4238 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid
), 2);
4239 CNIC_WR(dev
, BAR_XSTRORM_INTMEM
+
4240 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid
), DEF_SWS_TIMER
);
4242 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+ TSTORM_TCP_MAX_CWND_OFFSET(pfid
),
4247 static void cnic_delete_task(struct work_struct
*work
)
4249 struct cnic_local
*cp
;
4250 struct cnic_dev
*dev
;
4252 int need_resched
= 0;
4254 cp
= container_of(work
, struct cnic_local
, delete_task
.work
);
4257 if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI
, &cp
->cnic_local_flags
)) {
4258 struct drv_ctl_info info
;
4260 cnic_ulp_stop_one(cp
, CNIC_ULP_ISCSI
);
4262 info
.cmd
= DRV_CTL_ISCSI_STOPPED_CMD
;
4263 cp
->ethdev
->drv_ctl(dev
->netdev
, &info
);
4266 for (i
= 0; i
< cp
->max_cid_space
; i
++) {
4267 struct cnic_context
*ctx
= &cp
->ctx_tbl
[i
];
4270 if (!test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
) ||
4271 !test_bit(CTX_FL_DELETE_WAIT
, &ctx
->ctx_flags
))
4274 if (!time_after(jiffies
, ctx
->timestamp
+ (2 * HZ
))) {
4279 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT
, &ctx
->ctx_flags
))
4282 err
= cnic_bnx2x_destroy_ramrod(dev
, i
);
4284 cnic_free_bnx2x_conn_resc(dev
, i
);
4286 if (ctx
->ulp_proto_id
== CNIC_ULP_ISCSI
)
4287 atomic_dec(&cp
->iscsi_conn
);
4289 clear_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
);
4294 queue_delayed_work(cnic_wq
, &cp
->delete_task
,
4295 msecs_to_jiffies(10));
4299 static int cnic_cm_open(struct cnic_dev
*dev
)
4301 struct cnic_local
*cp
= dev
->cnic_priv
;
4304 err
= cnic_cm_alloc_mem(dev
);
4308 err
= cp
->start_cm(dev
);
4313 INIT_DELAYED_WORK(&cp
->delete_task
, cnic_delete_task
);
4315 dev
->cm_create
= cnic_cm_create
;
4316 dev
->cm_destroy
= cnic_cm_destroy
;
4317 dev
->cm_connect
= cnic_cm_connect
;
4318 dev
->cm_abort
= cnic_cm_abort
;
4319 dev
->cm_close
= cnic_cm_close
;
4320 dev
->cm_select_dev
= cnic_cm_select_dev
;
4322 cp
->ulp_handle
[CNIC_ULP_L4
] = dev
;
4323 rcu_assign_pointer(cp
->ulp_ops
[CNIC_ULP_L4
], &cm_ulp_ops
);
4327 cnic_cm_free_mem(dev
);
4331 static int cnic_cm_shutdown(struct cnic_dev
*dev
)
4333 struct cnic_local
*cp
= dev
->cnic_priv
;
4339 for (i
= 0; i
< MAX_CM_SK_TBL_SZ
; i
++) {
4340 struct cnic_sock
*csk
= &cp
->csk_tbl
[i
];
4342 clear_bit(SK_F_INUSE
, &csk
->flags
);
4343 cnic_cm_cleanup(csk
);
4345 cnic_cm_free_mem(dev
);
4350 static void cnic_init_context(struct cnic_dev
*dev
, u32 cid
)
4355 cid_addr
= GET_CID_ADDR(cid
);
4357 for (i
= 0; i
< CTX_SIZE
; i
+= 4)
4358 cnic_ctx_wr(dev
, cid_addr
, i
, 0);
4361 static int cnic_setup_5709_context(struct cnic_dev
*dev
, int valid
)
4363 struct cnic_local
*cp
= dev
->cnic_priv
;
4365 u32 valid_bit
= valid
? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID
: 0;
4367 if (BNX2_CHIP(cp
) != BNX2_CHIP_5709
)
4370 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
4372 u32 idx
= cp
->ctx_arr
[i
].cid
/ cp
->cids_per_blk
;
4375 memset(cp
->ctx_arr
[i
].ctx
, 0, BNX2_PAGE_SIZE
);
4377 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_DATA0
,
4378 (cp
->ctx_arr
[i
].mapping
& 0xffffffff) | valid_bit
);
4379 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_DATA1
,
4380 (u64
) cp
->ctx_arr
[i
].mapping
>> 32);
4381 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_CTRL
, idx
|
4382 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
);
4383 for (j
= 0; j
< 10; j
++) {
4385 val
= CNIC_RD(dev
, BNX2_CTX_HOST_PAGE_TBL_CTRL
);
4386 if (!(val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
))
4390 if (val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
) {
4398 static void cnic_free_irq(struct cnic_dev
*dev
)
4400 struct cnic_local
*cp
= dev
->cnic_priv
;
4401 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4403 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
4404 cp
->disable_int_sync(dev
);
4405 tasklet_kill(&cp
->cnic_irq_task
);
4406 free_irq(ethdev
->irq_arr
[0].vector
, dev
);
4410 static int cnic_request_irq(struct cnic_dev
*dev
)
4412 struct cnic_local
*cp
= dev
->cnic_priv
;
4413 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4416 err
= request_irq(ethdev
->irq_arr
[0].vector
, cnic_irq
, 0, "cnic", dev
);
4418 tasklet_disable(&cp
->cnic_irq_task
);
4423 static int cnic_init_bnx2_irq(struct cnic_dev
*dev
)
4425 struct cnic_local
*cp
= dev
->cnic_priv
;
4426 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4428 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
4430 int sblk_num
= cp
->status_blk_num
;
4431 u32 base
= ((sblk_num
- 1) * BNX2_HC_SB_CONFIG_SIZE
) +
4432 BNX2_HC_SB_CONFIG_1
;
4434 CNIC_WR(dev
, base
, BNX2_HC_SB_CONFIG_1_ONE_SHOT
);
4436 CNIC_WR(dev
, base
+ BNX2_HC_COMP_PROD_TRIP_OFF
, (2 << 16) | 8);
4437 CNIC_WR(dev
, base
+ BNX2_HC_COM_TICKS_OFF
, (64 << 16) | 220);
4438 CNIC_WR(dev
, base
+ BNX2_HC_CMD_TICKS_OFF
, (64 << 16) | 220);
4440 cp
->last_status_idx
= cp
->status_blk
.bnx2
->status_idx
;
4441 tasklet_init(&cp
->cnic_irq_task
, cnic_service_bnx2_msix
,
4442 (unsigned long) dev
);
4443 err
= cnic_request_irq(dev
);
4447 while (cp
->status_blk
.bnx2
->status_completion_producer_index
&&
4449 CNIC_WR(dev
, BNX2_HC_COALESCE_NOW
,
4450 1 << (11 + sblk_num
));
4455 if (cp
->status_blk
.bnx2
->status_completion_producer_index
) {
4461 struct status_block
*sblk
= cp
->status_blk
.gen
;
4462 u32 hc_cmd
= CNIC_RD(dev
, BNX2_HC_COMMAND
);
4465 while (sblk
->status_completion_producer_index
&& i
< 10) {
4466 CNIC_WR(dev
, BNX2_HC_COMMAND
,
4467 hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
4472 if (sblk
->status_completion_producer_index
)
4479 netdev_err(dev
->netdev
, "KCQ index not resetting to 0\n");
4483 static void cnic_enable_bnx2_int(struct cnic_dev
*dev
)
4485 struct cnic_local
*cp
= dev
->cnic_priv
;
4486 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4488 if (!(ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
4491 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
4492 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| cp
->last_status_idx
);
4495 static void cnic_disable_bnx2_int_sync(struct cnic_dev
*dev
)
4497 struct cnic_local
*cp
= dev
->cnic_priv
;
4498 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4500 if (!(ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
4503 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
4504 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
4505 CNIC_RD(dev
, BNX2_PCICFG_INT_ACK_CMD
);
4506 synchronize_irq(ethdev
->irq_arr
[0].vector
);
4509 static void cnic_init_bnx2_tx_ring(struct cnic_dev
*dev
)
4511 struct cnic_local
*cp
= dev
->cnic_priv
;
4512 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4513 struct cnic_uio_dev
*udev
= cp
->udev
;
4514 u32 cid_addr
, tx_cid
, sb_id
;
4515 u32 val
, offset0
, offset1
, offset2
, offset3
;
4517 struct bnx2_tx_bd
*txbd
;
4518 dma_addr_t buf_map
, ring_map
= udev
->l2_ring_map
;
4519 struct status_block
*s_blk
= cp
->status_blk
.gen
;
4521 sb_id
= cp
->status_blk_num
;
4523 cp
->tx_cons_ptr
= &s_blk
->status_tx_quick_consumer_index2
;
4524 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
4525 struct status_block_msix
*sblk
= cp
->status_blk
.bnx2
;
4527 tx_cid
= TX_TSS_CID
+ sb_id
- 1;
4528 CNIC_WR(dev
, BNX2_TSCH_TSS_CFG
, (sb_id
<< 24) |
4530 cp
->tx_cons_ptr
= &sblk
->status_tx_quick_consumer_index
;
4532 cp
->tx_cons
= *cp
->tx_cons_ptr
;
4534 cid_addr
= GET_CID_ADDR(tx_cid
);
4535 if (BNX2_CHIP(cp
) == BNX2_CHIP_5709
) {
4536 u32 cid_addr2
= GET_CID_ADDR(tx_cid
+ 4) + 0x40;
4538 for (i
= 0; i
< PHY_CTX_SIZE
; i
+= 4)
4539 cnic_ctx_wr(dev
, cid_addr2
, i
, 0);
4541 offset0
= BNX2_L2CTX_TYPE_XI
;
4542 offset1
= BNX2_L2CTX_CMD_TYPE_XI
;
4543 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI_XI
;
4544 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO_XI
;
4546 cnic_init_context(dev
, tx_cid
);
4547 cnic_init_context(dev
, tx_cid
+ 1);
4549 offset0
= BNX2_L2CTX_TYPE
;
4550 offset1
= BNX2_L2CTX_CMD_TYPE
;
4551 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI
;
4552 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO
;
4554 val
= BNX2_L2CTX_TYPE_TYPE_L2
| BNX2_L2CTX_TYPE_SIZE_L2
;
4555 cnic_ctx_wr(dev
, cid_addr
, offset0
, val
);
4557 val
= BNX2_L2CTX_CMD_TYPE_TYPE_L2
| (8 << 16);
4558 cnic_ctx_wr(dev
, cid_addr
, offset1
, val
);
4560 txbd
= udev
->l2_ring
;
4562 buf_map
= udev
->l2_buf_map
;
4563 for (i
= 0; i
< BNX2_MAX_TX_DESC_CNT
; i
++, txbd
++) {
4564 txbd
->tx_bd_haddr_hi
= (u64
) buf_map
>> 32;
4565 txbd
->tx_bd_haddr_lo
= (u64
) buf_map
& 0xffffffff;
4567 val
= (u64
) ring_map
>> 32;
4568 cnic_ctx_wr(dev
, cid_addr
, offset2
, val
);
4569 txbd
->tx_bd_haddr_hi
= val
;
4571 val
= (u64
) ring_map
& 0xffffffff;
4572 cnic_ctx_wr(dev
, cid_addr
, offset3
, val
);
4573 txbd
->tx_bd_haddr_lo
= val
;
4576 static void cnic_init_bnx2_rx_ring(struct cnic_dev
*dev
)
4578 struct cnic_local
*cp
= dev
->cnic_priv
;
4579 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4580 struct cnic_uio_dev
*udev
= cp
->udev
;
4581 u32 cid_addr
, sb_id
, val
, coal_reg
, coal_val
;
4583 struct bnx2_rx_bd
*rxbd
;
4584 struct status_block
*s_blk
= cp
->status_blk
.gen
;
4585 dma_addr_t ring_map
= udev
->l2_ring_map
;
4587 sb_id
= cp
->status_blk_num
;
4588 cnic_init_context(dev
, 2);
4589 cp
->rx_cons_ptr
= &s_blk
->status_rx_quick_consumer_index2
;
4590 coal_reg
= BNX2_HC_COMMAND
;
4591 coal_val
= CNIC_RD(dev
, coal_reg
);
4592 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
4593 struct status_block_msix
*sblk
= cp
->status_blk
.bnx2
;
4595 cp
->rx_cons_ptr
= &sblk
->status_rx_quick_consumer_index
;
4596 coal_reg
= BNX2_HC_COALESCE_NOW
;
4597 coal_val
= 1 << (11 + sb_id
);
4600 while (!(*cp
->rx_cons_ptr
!= 0) && i
< 10) {
4601 CNIC_WR(dev
, coal_reg
, coal_val
);
4606 cp
->rx_cons
= *cp
->rx_cons_ptr
;
4608 cid_addr
= GET_CID_ADDR(2);
4609 val
= BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE
|
4610 BNX2_L2CTX_CTX_TYPE_SIZE_L2
| (0x02 << 8);
4611 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_CTX_TYPE
, val
);
4614 val
= 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT
;
4616 val
= BNX2_L2CTX_L2_STATUSB_NUM(sb_id
);
4617 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_HOST_BDIDX
, val
);
4619 rxbd
= udev
->l2_ring
+ BNX2_PAGE_SIZE
;
4620 for (i
= 0; i
< BNX2_MAX_RX_DESC_CNT
; i
++, rxbd
++) {
4622 int n
= (i
% cp
->l2_rx_ring_size
) + 1;
4624 buf_map
= udev
->l2_buf_map
+ (n
* cp
->l2_single_buf_size
);
4625 rxbd
->rx_bd_len
= cp
->l2_single_buf_size
;
4626 rxbd
->rx_bd_flags
= RX_BD_FLAGS_START
| RX_BD_FLAGS_END
;
4627 rxbd
->rx_bd_haddr_hi
= (u64
) buf_map
>> 32;
4628 rxbd
->rx_bd_haddr_lo
= (u64
) buf_map
& 0xffffffff;
4630 val
= (u64
) (ring_map
+ BNX2_PAGE_SIZE
) >> 32;
4631 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_NX_BDHADDR_HI
, val
);
4632 rxbd
->rx_bd_haddr_hi
= val
;
4634 val
= (u64
) (ring_map
+ BNX2_PAGE_SIZE
) & 0xffffffff;
4635 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_NX_BDHADDR_LO
, val
);
4636 rxbd
->rx_bd_haddr_lo
= val
;
4638 val
= cnic_reg_rd_ind(dev
, BNX2_RXP_SCRATCH_RXP_FLOOD
);
4639 cnic_reg_wr_ind(dev
, BNX2_RXP_SCRATCH_RXP_FLOOD
, val
| (1 << 2));
4642 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev
*dev
)
4644 struct kwqe
*wqes
[1], l2kwqe
;
4646 memset(&l2kwqe
, 0, sizeof(l2kwqe
));
4648 l2kwqe
.kwqe_op_flag
= (L2_LAYER_CODE
<< KWQE_LAYER_SHIFT
) |
4649 (L2_KWQE_OPCODE_VALUE_FLUSH
<<
4650 KWQE_OPCODE_SHIFT
) | 2;
4651 dev
->submit_kwqes(dev
, wqes
, 1);
4654 static void cnic_set_bnx2_mac(struct cnic_dev
*dev
)
4656 struct cnic_local
*cp
= dev
->cnic_priv
;
4659 val
= cp
->func
<< 2;
4661 cp
->shmem_base
= cnic_reg_rd_ind(dev
, BNX2_SHM_HDR_ADDR_0
+ val
);
4663 val
= cnic_reg_rd_ind(dev
, cp
->shmem_base
+
4664 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER
);
4665 dev
->mac_addr
[0] = (u8
) (val
>> 8);
4666 dev
->mac_addr
[1] = (u8
) val
;
4668 CNIC_WR(dev
, BNX2_EMAC_MAC_MATCH4
, val
);
4670 val
= cnic_reg_rd_ind(dev
, cp
->shmem_base
+
4671 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER
);
4672 dev
->mac_addr
[2] = (u8
) (val
>> 24);
4673 dev
->mac_addr
[3] = (u8
) (val
>> 16);
4674 dev
->mac_addr
[4] = (u8
) (val
>> 8);
4675 dev
->mac_addr
[5] = (u8
) val
;
4677 CNIC_WR(dev
, BNX2_EMAC_MAC_MATCH5
, val
);
4679 val
= 4 | BNX2_RPM_SORT_USER2_BC_EN
;
4680 if (BNX2_CHIP(cp
) != BNX2_CHIP_5709
)
4681 val
|= BNX2_RPM_SORT_USER2_PROM_VLAN
;
4683 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, 0x0);
4684 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, val
);
4685 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, val
| BNX2_RPM_SORT_USER2_ENA
);
4688 static int cnic_start_bnx2_hw(struct cnic_dev
*dev
)
4690 struct cnic_local
*cp
= dev
->cnic_priv
;
4691 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4692 struct status_block
*sblk
= cp
->status_blk
.gen
;
4693 u32 val
, kcq_cid_addr
, kwq_cid_addr
;
4696 cnic_set_bnx2_mac(dev
);
4698 val
= CNIC_RD(dev
, BNX2_MQ_CONFIG
);
4699 val
&= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE
;
4700 if (BNX2_PAGE_BITS
> 12)
4701 val
|= (12 - 8) << 4;
4703 val
|= (BNX2_PAGE_BITS
- 8) << 4;
4705 CNIC_WR(dev
, BNX2_MQ_CONFIG
, val
);
4707 CNIC_WR(dev
, BNX2_HC_COMP_PROD_TRIP
, (2 << 16) | 8);
4708 CNIC_WR(dev
, BNX2_HC_COM_TICKS
, (64 << 16) | 220);
4709 CNIC_WR(dev
, BNX2_HC_CMD_TICKS
, (64 << 16) | 220);
4711 err
= cnic_setup_5709_context(dev
, 1);
4715 cnic_init_context(dev
, KWQ_CID
);
4716 cnic_init_context(dev
, KCQ_CID
);
4718 kwq_cid_addr
= GET_CID_ADDR(KWQ_CID
);
4719 cp
->kwq_io_addr
= MB_GET_CID_ADDR(KWQ_CID
) + L5_KRNLQ_HOST_QIDX
;
4721 cp
->max_kwq_idx
= MAX_KWQ_IDX
;
4722 cp
->kwq_prod_idx
= 0;
4723 cp
->kwq_con_idx
= 0;
4724 set_bit(CNIC_LCL_FL_KWQ_INIT
, &cp
->cnic_local_flags
);
4726 if (BNX2_CHIP(cp
) == BNX2_CHIP_5706
|| BNX2_CHIP(cp
) == BNX2_CHIP_5708
)
4727 cp
->kwq_con_idx_ptr
= &sblk
->status_rx_quick_consumer_index15
;
4729 cp
->kwq_con_idx_ptr
= &sblk
->status_cmd_consumer_index
;
4731 /* Initialize the kernel work queue context. */
4732 val
= KRNLQ_TYPE_TYPE_KRNLQ
| KRNLQ_SIZE_TYPE_SIZE
|
4733 (BNX2_PAGE_BITS
- 8) | KRNLQ_FLAGS_QE_SELF_SEQ
;
4734 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_TYPE
, val
);
4736 val
= (BNX2_PAGE_SIZE
/ sizeof(struct kwqe
) - 1) << 16;
4737 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_QE_SELF_SEQ_MAX
, val
);
4739 val
= ((BNX2_PAGE_SIZE
/ sizeof(struct kwqe
)) << 16) | KWQ_PAGE_CNT
;
4740 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_PGTBL_NPAGES
, val
);
4742 val
= (u32
) ((u64
) cp
->kwq_info
.pgtbl_map
>> 32);
4743 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_HI
, val
);
4745 val
= (u32
) cp
->kwq_info
.pgtbl_map
;
4746 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_LO
, val
);
4748 kcq_cid_addr
= GET_CID_ADDR(KCQ_CID
);
4749 cp
->kcq1
.io_addr
= MB_GET_CID_ADDR(KCQ_CID
) + L5_KRNLQ_HOST_QIDX
;
4751 cp
->kcq1
.sw_prod_idx
= 0;
4752 cp
->kcq1
.hw_prod_idx_ptr
=
4753 &sblk
->status_completion_producer_index
;
4755 cp
->kcq1
.status_idx_ptr
= &sblk
->status_idx
;
4757 /* Initialize the kernel complete queue context. */
4758 val
= KRNLQ_TYPE_TYPE_KRNLQ
| KRNLQ_SIZE_TYPE_SIZE
|
4759 (BNX2_PAGE_BITS
- 8) | KRNLQ_FLAGS_QE_SELF_SEQ
;
4760 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_TYPE
, val
);
4762 val
= (BNX2_PAGE_SIZE
/ sizeof(struct kcqe
) - 1) << 16;
4763 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_QE_SELF_SEQ_MAX
, val
);
4765 val
= ((BNX2_PAGE_SIZE
/ sizeof(struct kcqe
)) << 16) | KCQ_PAGE_CNT
;
4766 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_PGTBL_NPAGES
, val
);
4768 val
= (u32
) ((u64
) cp
->kcq1
.dma
.pgtbl_map
>> 32);
4769 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_HI
, val
);
4771 val
= (u32
) cp
->kcq1
.dma
.pgtbl_map
;
4772 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_LO
, val
);
4775 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
4776 struct status_block_msix
*msblk
= cp
->status_blk
.bnx2
;
4777 u32 sb_id
= cp
->status_blk_num
;
4778 u32 sb
= BNX2_L2CTX_L5_STATUSB_NUM(sb_id
);
4780 cp
->kcq1
.hw_prod_idx_ptr
=
4781 &msblk
->status_completion_producer_index
;
4782 cp
->kcq1
.status_idx_ptr
= &msblk
->status_idx
;
4783 cp
->kwq_con_idx_ptr
= &msblk
->status_cmd_consumer_index
;
4784 cp
->int_num
= sb_id
<< BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT
;
4785 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_HOST_QIDX
, sb
);
4786 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_HOST_QIDX
, sb
);
4789 /* Enable Commnad Scheduler notification when we write to the
4790 * host producer index of the kernel contexts. */
4791 CNIC_WR(dev
, BNX2_MQ_KNL_CMD_MASK1
, 2);
4793 /* Enable Command Scheduler notification when we write to either
4794 * the Send Queue or Receive Queue producer indexes of the kernel
4795 * bypass contexts. */
4796 CNIC_WR(dev
, BNX2_MQ_KNL_BYP_CMD_MASK1
, 7);
4797 CNIC_WR(dev
, BNX2_MQ_KNL_BYP_WRITE_MASK1
, 7);
4799 /* Notify COM when the driver post an application buffer. */
4800 CNIC_WR(dev
, BNX2_MQ_KNL_RX_V2P_MASK2
, 0x2000);
4802 /* Set the CP and COM doorbells. These two processors polls the
4803 * doorbell for a non zero value before running. This must be done
4804 * after setting up the kernel queue contexts. */
4805 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 1);
4806 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 1);
4808 cnic_init_bnx2_tx_ring(dev
);
4809 cnic_init_bnx2_rx_ring(dev
);
4811 err
= cnic_init_bnx2_irq(dev
);
4813 netdev_err(dev
->netdev
, "cnic_init_irq failed\n");
4814 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 0);
4815 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 0);
4819 ethdev
->drv_state
|= CNIC_DRV_STATE_HANDLES_IRQ
;
4824 static void cnic_setup_bnx2x_context(struct cnic_dev
*dev
)
4826 struct cnic_local
*cp
= dev
->cnic_priv
;
4827 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4828 u32 start_offset
= ethdev
->ctx_tbl_offset
;
4831 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
4832 struct cnic_ctx
*ctx
= &cp
->ctx_arr
[i
];
4833 dma_addr_t map
= ctx
->mapping
;
4835 if (cp
->ctx_align
) {
4836 unsigned long mask
= cp
->ctx_align
- 1;
4838 map
= (map
+ mask
) & ~mask
;
4841 cnic_ctx_tbl_wr(dev
, start_offset
+ i
, map
);
4845 static int cnic_init_bnx2x_irq(struct cnic_dev
*dev
)
4847 struct cnic_local
*cp
= dev
->cnic_priv
;
4848 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4851 tasklet_init(&cp
->cnic_irq_task
, cnic_service_bnx2x_bh
,
4852 (unsigned long) dev
);
4853 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
)
4854 err
= cnic_request_irq(dev
);
4859 static inline void cnic_storm_memset_hc_disable(struct cnic_dev
*dev
,
4860 u16 sb_id
, u8 sb_index
,
4863 struct bnx2x
*bp
= netdev_priv(dev
->netdev
);
4865 u32 addr
= BAR_CSTRORM_INTMEM
+
4866 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id
) +
4867 offsetof(struct hc_status_block_data_e1x
, index_data
) +
4868 sizeof(struct hc_index_data
)*sb_index
+
4869 offsetof(struct hc_index_data
, flags
);
4870 u16 flags
= CNIC_RD16(dev
, addr
);
4872 flags
&= ~HC_INDEX_DATA_HC_ENABLED
;
4873 flags
|= (((~disable
) << HC_INDEX_DATA_HC_ENABLED_SHIFT
) &
4874 HC_INDEX_DATA_HC_ENABLED
);
4875 CNIC_WR16(dev
, addr
, flags
);
4878 static void cnic_enable_bnx2x_int(struct cnic_dev
*dev
)
4880 struct cnic_local
*cp
= dev
->cnic_priv
;
4881 struct bnx2x
*bp
= netdev_priv(dev
->netdev
);
4882 u8 sb_id
= cp
->status_blk_num
;
4884 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
4885 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id
) +
4886 offsetof(struct hc_status_block_data_e1x
, index_data
) +
4887 sizeof(struct hc_index_data
)*HC_INDEX_ISCSI_EQ_CONS
+
4888 offsetof(struct hc_index_data
, timeout
), 64 / 4);
4889 cnic_storm_memset_hc_disable(dev
, sb_id
, HC_INDEX_ISCSI_EQ_CONS
, 0);
4892 static void cnic_disable_bnx2x_int_sync(struct cnic_dev
*dev
)
4896 static void cnic_init_bnx2x_tx_ring(struct cnic_dev
*dev
,
4897 struct client_init_ramrod_data
*data
)
4899 struct cnic_local
*cp
= dev
->cnic_priv
;
4900 struct cnic_uio_dev
*udev
= cp
->udev
;
4901 union eth_tx_bd_types
*txbd
= (union eth_tx_bd_types
*) udev
->l2_ring
;
4902 dma_addr_t buf_map
, ring_map
= udev
->l2_ring_map
;
4903 struct host_sp_status_block
*sb
= cp
->bnx2x_def_status_blk
;
4905 u32 cli
= cp
->ethdev
->iscsi_l2_client_id
;
4908 memset(txbd
, 0, BNX2_PAGE_SIZE
);
4910 buf_map
= udev
->l2_buf_map
;
4911 for (i
= 0; i
< BNX2_MAX_TX_DESC_CNT
; i
+= 3, txbd
+= 3) {
4912 struct eth_tx_start_bd
*start_bd
= &txbd
->start_bd
;
4913 struct eth_tx_parse_bd_e1x
*pbd_e1x
=
4914 &((txbd
+ 1)->parse_bd_e1x
);
4915 struct eth_tx_parse_bd_e2
*pbd_e2
= &((txbd
+ 1)->parse_bd_e2
);
4916 struct eth_tx_bd
*reg_bd
= &((txbd
+ 2)->reg_bd
);
4918 start_bd
->addr_hi
= cpu_to_le32((u64
) buf_map
>> 32);
4919 start_bd
->addr_lo
= cpu_to_le32(buf_map
& 0xffffffff);
4920 reg_bd
->addr_hi
= start_bd
->addr_hi
;
4921 reg_bd
->addr_lo
= start_bd
->addr_lo
+ 0x10;
4922 start_bd
->nbytes
= cpu_to_le16(0x10);
4923 start_bd
->nbd
= cpu_to_le16(3);
4924 start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
4925 start_bd
->general_data
&= ~ETH_TX_START_BD_PARSE_NBDS
;
4926 start_bd
->general_data
|= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT
);
4928 if (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
))
4929 pbd_e2
->parsing_data
= (UNICAST_ADDRESS
<<
4930 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT
);
4932 pbd_e1x
->global_data
= (UNICAST_ADDRESS
<<
4933 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT
);
4936 val
= (u64
) ring_map
>> 32;
4937 txbd
->next_bd
.addr_hi
= cpu_to_le32(val
);
4939 data
->tx
.tx_bd_page_base
.hi
= cpu_to_le32(val
);
4941 val
= (u64
) ring_map
& 0xffffffff;
4942 txbd
->next_bd
.addr_lo
= cpu_to_le32(val
);
4944 data
->tx
.tx_bd_page_base
.lo
= cpu_to_le32(val
);
4946 /* Other ramrod params */
4947 data
->tx
.tx_sb_index_number
= HC_SP_INDEX_ETH_ISCSI_CQ_CONS
;
4948 data
->tx
.tx_status_block_id
= BNX2X_DEF_SB_ID
;
4950 /* reset xstorm per client statistics */
4951 if (cli
< MAX_STAT_COUNTER_ID
) {
4952 data
->general
.statistics_zero_flg
= 1;
4953 data
->general
.statistics_en_flg
= 1;
4954 data
->general
.statistics_counter_id
= cli
;
4958 &sb
->sp_sb
.index_values
[HC_SP_INDEX_ETH_ISCSI_CQ_CONS
];
4961 static void cnic_init_bnx2x_rx_ring(struct cnic_dev
*dev
,
4962 struct client_init_ramrod_data
*data
)
4964 struct cnic_local
*cp
= dev
->cnic_priv
;
4965 struct cnic_uio_dev
*udev
= cp
->udev
;
4966 struct eth_rx_bd
*rxbd
= (struct eth_rx_bd
*) (udev
->l2_ring
+
4968 struct eth_rx_cqe_next_page
*rxcqe
= (struct eth_rx_cqe_next_page
*)
4969 (udev
->l2_ring
+ (2 * BNX2_PAGE_SIZE
));
4970 struct host_sp_status_block
*sb
= cp
->bnx2x_def_status_blk
;
4972 u32 cli
= cp
->ethdev
->iscsi_l2_client_id
;
4973 int cl_qzone_id
= BNX2X_CL_QZONE_ID(cp
, cli
);
4975 dma_addr_t ring_map
= udev
->l2_ring_map
;
4978 data
->general
.client_id
= cli
;
4979 data
->general
.activate_flg
= 1;
4980 data
->general
.sp_client_id
= cli
;
4981 data
->general
.mtu
= cpu_to_le16(cp
->l2_single_buf_size
- 14);
4982 data
->general
.func_id
= cp
->pfid
;
4984 for (i
= 0; i
< BNX2X_MAX_RX_DESC_CNT
; i
++, rxbd
++) {
4986 int n
= (i
% cp
->l2_rx_ring_size
) + 1;
4988 buf_map
= udev
->l2_buf_map
+ (n
* cp
->l2_single_buf_size
);
4989 rxbd
->addr_hi
= cpu_to_le32((u64
) buf_map
>> 32);
4990 rxbd
->addr_lo
= cpu_to_le32(buf_map
& 0xffffffff);
4993 val
= (u64
) (ring_map
+ BNX2_PAGE_SIZE
) >> 32;
4994 rxbd
->addr_hi
= cpu_to_le32(val
);
4995 data
->rx
.bd_page_base
.hi
= cpu_to_le32(val
);
4997 val
= (u64
) (ring_map
+ BNX2_PAGE_SIZE
) & 0xffffffff;
4998 rxbd
->addr_lo
= cpu_to_le32(val
);
4999 data
->rx
.bd_page_base
.lo
= cpu_to_le32(val
);
5001 rxcqe
+= BNX2X_MAX_RCQ_DESC_CNT
;
5002 val
= (u64
) (ring_map
+ (2 * BNX2_PAGE_SIZE
)) >> 32;
5003 rxcqe
->addr_hi
= cpu_to_le32(val
);
5004 data
->rx
.cqe_page_base
.hi
= cpu_to_le32(val
);
5006 val
= (u64
) (ring_map
+ (2 * BNX2_PAGE_SIZE
)) & 0xffffffff;
5007 rxcqe
->addr_lo
= cpu_to_le32(val
);
5008 data
->rx
.cqe_page_base
.lo
= cpu_to_le32(val
);
5010 /* Other ramrod params */
5011 data
->rx
.client_qzone_id
= cl_qzone_id
;
5012 data
->rx
.rx_sb_index_number
= HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS
;
5013 data
->rx
.status_block_id
= BNX2X_DEF_SB_ID
;
5015 data
->rx
.cache_line_alignment_log_size
= L1_CACHE_SHIFT
;
5017 data
->rx
.max_bytes_on_bd
= cpu_to_le16(cp
->l2_single_buf_size
);
5018 data
->rx
.outer_vlan_removal_enable_flg
= 1;
5019 data
->rx
.silent_vlan_removal_flg
= 1;
5020 data
->rx
.silent_vlan_value
= 0;
5021 data
->rx
.silent_vlan_mask
= 0xffff;
5024 &sb
->sp_sb
.index_values
[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS
];
5025 cp
->rx_cons
= *cp
->rx_cons_ptr
;
5028 static void cnic_init_bnx2x_kcq(struct cnic_dev
*dev
)
5030 struct cnic_local
*cp
= dev
->cnic_priv
;
5031 struct bnx2x
*bp
= netdev_priv(dev
->netdev
);
5032 u32 pfid
= cp
->pfid
;
5034 cp
->kcq1
.io_addr
= BAR_CSTRORM_INTMEM
+
5035 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid
, 0);
5036 cp
->kcq1
.sw_prod_idx
= 0;
5038 if (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
)) {
5039 struct host_hc_status_block_e2
*sb
= cp
->status_blk
.gen
;
5041 cp
->kcq1
.hw_prod_idx_ptr
=
5042 &sb
->sb
.index_values
[HC_INDEX_ISCSI_EQ_CONS
];
5043 cp
->kcq1
.status_idx_ptr
=
5044 &sb
->sb
.running_index
[SM_RX_ID
];
5046 struct host_hc_status_block_e1x
*sb
= cp
->status_blk
.gen
;
5048 cp
->kcq1
.hw_prod_idx_ptr
=
5049 &sb
->sb
.index_values
[HC_INDEX_ISCSI_EQ_CONS
];
5050 cp
->kcq1
.status_idx_ptr
=
5051 &sb
->sb
.running_index
[SM_RX_ID
];
5054 if (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
)) {
5055 struct host_hc_status_block_e2
*sb
= cp
->status_blk
.gen
;
5057 cp
->kcq2
.io_addr
= BAR_USTRORM_INTMEM
+
5058 USTORM_FCOE_EQ_PROD_OFFSET(pfid
);
5059 cp
->kcq2
.sw_prod_idx
= 0;
5060 cp
->kcq2
.hw_prod_idx_ptr
=
5061 &sb
->sb
.index_values
[HC_INDEX_FCOE_EQ_CONS
];
5062 cp
->kcq2
.status_idx_ptr
=
5063 &sb
->sb
.running_index
[SM_RX_ID
];
5067 static int cnic_start_bnx2x_hw(struct cnic_dev
*dev
)
5069 struct cnic_local
*cp
= dev
->cnic_priv
;
5070 struct bnx2x
*bp
= netdev_priv(dev
->netdev
);
5071 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
5075 dev
->stats_addr
= ethdev
->addr_drv_info_to_mcp
;
5076 cp
->port_mode
= bp
->common
.chip_port_mode
;
5077 cp
->pfid
= bp
->pfid
;
5078 cp
->func
= bp
->pf_num
;
5080 func
= CNIC_FUNC(cp
);
5083 ret
= cnic_init_id_tbl(&cp
->cid_tbl
, MAX_ISCSI_TBL_SZ
,
5084 cp
->iscsi_start_cid
, 0);
5089 if (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
)) {
5090 ret
= cnic_init_id_tbl(&cp
->fcoe_cid_tbl
, dev
->max_fcoe_conn
,
5091 cp
->fcoe_start_cid
, 0);
5097 cp
->bnx2x_igu_sb_id
= ethdev
->irq_arr
[0].status_blk_num2
;
5099 cnic_init_bnx2x_kcq(dev
);
5102 CNIC_WR16(dev
, cp
->kcq1
.io_addr
, MAX_KCQ_IDX
);
5103 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
5104 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid
, 0), 0);
5105 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
5106 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid
, 0),
5107 cp
->kcq1
.dma
.pg_map_arr
[1] & 0xffffffff);
5108 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
5109 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid
, 0) + 4,
5110 (u64
) cp
->kcq1
.dma
.pg_map_arr
[1] >> 32);
5111 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
5112 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid
, 0),
5113 cp
->kcq1
.dma
.pg_map_arr
[0] & 0xffffffff);
5114 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
5115 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid
, 0) + 4,
5116 (u64
) cp
->kcq1
.dma
.pg_map_arr
[0] >> 32);
5117 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
5118 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid
, 0), 1);
5119 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
5120 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid
, 0), cp
->status_blk_num
);
5121 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
5122 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid
, 0),
5123 HC_INDEX_ISCSI_EQ_CONS
);
5125 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
5126 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid
),
5127 cp
->gbl_buf_info
.pg_map_arr
[0] & 0xffffffff);
5128 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
5129 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid
) + 4,
5130 (u64
) cp
->gbl_buf_info
.pg_map_arr
[0] >> 32);
5132 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
5133 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid
), DEF_RCV_BUF
);
5135 cnic_setup_bnx2x_context(dev
);
5137 ret
= cnic_init_bnx2x_irq(dev
);
5141 ethdev
->drv_state
|= CNIC_DRV_STATE_HANDLES_IRQ
;
5145 static void cnic_init_rings(struct cnic_dev
*dev
)
5147 struct cnic_local
*cp
= dev
->cnic_priv
;
5148 struct bnx2x
*bp
= netdev_priv(dev
->netdev
);
5149 struct cnic_uio_dev
*udev
= cp
->udev
;
5151 if (test_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
))
5154 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
5155 cnic_init_bnx2_tx_ring(dev
);
5156 cnic_init_bnx2_rx_ring(dev
);
5157 set_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
);
5158 } else if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
)) {
5159 u32 cli
= cp
->ethdev
->iscsi_l2_client_id
;
5160 u32 cid
= cp
->ethdev
->iscsi_l2_cid
;
5162 struct client_init_ramrod_data
*data
;
5163 union l5cm_specific_data l5_data
;
5164 struct ustorm_eth_rx_producers rx_prods
= {0};
5165 u32 off
, i
, *cid_ptr
;
5167 rx_prods
.bd_prod
= 0;
5168 rx_prods
.cqe_prod
= BNX2X_MAX_RCQ_DESC_CNT
;
5171 cl_qzone_id
= BNX2X_CL_QZONE_ID(cp
, cli
);
5173 off
= BAR_USTRORM_INTMEM
+
5174 (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
) ?
5175 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id
) :
5176 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp
), cli
));
5178 for (i
= 0; i
< sizeof(struct ustorm_eth_rx_producers
) / 4; i
++)
5179 CNIC_WR(dev
, off
+ i
* 4, ((u32
*) &rx_prods
)[i
]);
5181 set_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
);
5183 data
= udev
->l2_buf
;
5184 cid_ptr
= udev
->l2_buf
+ 12;
5186 memset(data
, 0, sizeof(*data
));
5188 cnic_init_bnx2x_tx_ring(dev
, data
);
5189 cnic_init_bnx2x_rx_ring(dev
, data
);
5191 l5_data
.phy_address
.lo
= udev
->l2_buf_map
& 0xffffffff;
5192 l5_data
.phy_address
.hi
= (u64
) udev
->l2_buf_map
>> 32;
5194 set_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
);
5196 cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_ETH_CLIENT_SETUP
,
5197 cid
, ETH_CONNECTION_TYPE
, &l5_data
);
5200 while (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
) &&
5204 if (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
))
5205 netdev_err(dev
->netdev
,
5206 "iSCSI CLIENT_SETUP did not complete\n");
5207 cnic_spq_completion(dev
, DRV_CTL_RET_L2_SPQ_CREDIT_CMD
, 1);
5208 cnic_ring_ctl(dev
, cid
, cli
, 1);
5213 static void cnic_shutdown_rings(struct cnic_dev
*dev
)
5215 struct cnic_local
*cp
= dev
->cnic_priv
;
5216 struct cnic_uio_dev
*udev
= cp
->udev
;
5219 if (!test_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
))
5222 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
5223 cnic_shutdown_bnx2_rx_ring(dev
);
5224 } else if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
)) {
5225 u32 cli
= cp
->ethdev
->iscsi_l2_client_id
;
5226 u32 cid
= cp
->ethdev
->iscsi_l2_cid
;
5227 union l5cm_specific_data l5_data
;
5230 cnic_ring_ctl(dev
, cid
, cli
, 0);
5232 set_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
);
5234 l5_data
.phy_address
.lo
= cli
;
5235 l5_data
.phy_address
.hi
= 0;
5236 cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_ETH_HALT
,
5237 cid
, ETH_CONNECTION_TYPE
, &l5_data
);
5239 while (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
) &&
5243 if (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
))
5244 netdev_err(dev
->netdev
,
5245 "iSCSI CLIENT_HALT did not complete\n");
5246 cnic_spq_completion(dev
, DRV_CTL_RET_L2_SPQ_CREDIT_CMD
, 1);
5248 memset(&l5_data
, 0, sizeof(l5_data
));
5249 cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_COMMON_CFC_DEL
,
5250 cid
, NONE_CONNECTION_TYPE
, &l5_data
);
5253 clear_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
);
5254 rx_ring
= udev
->l2_ring
+ BNX2_PAGE_SIZE
;
5255 memset(rx_ring
, 0, BNX2_PAGE_SIZE
);
5258 static int cnic_register_netdev(struct cnic_dev
*dev
)
5260 struct cnic_local
*cp
= dev
->cnic_priv
;
5261 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
5267 if (ethdev
->drv_state
& CNIC_DRV_STATE_REGD
)
5270 err
= ethdev
->drv_register_cnic(dev
->netdev
, cp
->cnic_ops
, dev
);
5272 netdev_err(dev
->netdev
, "register_cnic failed\n");
5277 static void cnic_unregister_netdev(struct cnic_dev
*dev
)
5279 struct cnic_local
*cp
= dev
->cnic_priv
;
5280 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
5285 ethdev
->drv_unregister_cnic(dev
->netdev
);
5288 static int cnic_start_hw(struct cnic_dev
*dev
)
5290 struct cnic_local
*cp
= dev
->cnic_priv
;
5291 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
5294 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
5297 dev
->regview
= ethdev
->io_base
;
5298 pci_dev_get(dev
->pcidev
);
5299 cp
->func
= PCI_FUNC(dev
->pcidev
->devfn
);
5300 cp
->status_blk
.gen
= ethdev
->irq_arr
[0].status_blk
;
5301 cp
->status_blk_num
= ethdev
->irq_arr
[0].status_blk_num
;
5303 err
= cp
->alloc_resc(dev
);
5305 netdev_err(dev
->netdev
, "allocate resource failure\n");
5309 err
= cp
->start_hw(dev
);
5313 err
= cnic_cm_open(dev
);
5317 set_bit(CNIC_F_CNIC_UP
, &dev
->flags
);
5319 cp
->enable_int(dev
);
5325 pci_dev_put(dev
->pcidev
);
5329 static void cnic_stop_bnx2_hw(struct cnic_dev
*dev
)
5331 cnic_disable_bnx2_int_sync(dev
);
5333 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 0);
5334 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 0);
5336 cnic_init_context(dev
, KWQ_CID
);
5337 cnic_init_context(dev
, KCQ_CID
);
5339 cnic_setup_5709_context(dev
, 0);
5342 cnic_free_resc(dev
);
5346 static void cnic_stop_bnx2x_hw(struct cnic_dev
*dev
)
5348 struct cnic_local
*cp
= dev
->cnic_priv
;
5349 struct bnx2x
*bp
= netdev_priv(dev
->netdev
);
5350 u32 hc_index
= HC_INDEX_ISCSI_EQ_CONS
;
5351 u32 sb_id
= cp
->status_blk_num
;
5352 u32 idx_off
, syn_off
;
5356 if (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
)) {
5357 idx_off
= offsetof(struct hc_status_block_e2
, index_values
) +
5358 (hc_index
* sizeof(u16
));
5360 syn_off
= CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hc_index
, sb_id
);
5362 idx_off
= offsetof(struct hc_status_block_e1x
, index_values
) +
5363 (hc_index
* sizeof(u16
));
5365 syn_off
= CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hc_index
, sb_id
);
5367 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+ syn_off
, 0);
5368 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+ CSTORM_STATUS_BLOCK_OFFSET(sb_id
) +
5371 *cp
->kcq1
.hw_prod_idx_ptr
= 0;
5372 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
5373 CSTORM_ISCSI_EQ_CONS_OFFSET(cp
->pfid
, 0), 0);
5374 CNIC_WR16(dev
, cp
->kcq1
.io_addr
, 0);
5375 cnic_free_resc(dev
);
5378 static void cnic_stop_hw(struct cnic_dev
*dev
)
5380 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
5381 struct cnic_local
*cp
= dev
->cnic_priv
;
5384 /* Need to wait for the ring shutdown event to complete
5385 * before clearing the CNIC_UP flag.
5387 while (cp
->udev
&& cp
->udev
->uio_dev
!= -1 && i
< 15) {
5391 cnic_shutdown_rings(dev
);
5393 cp
->ethdev
->drv_state
&= ~CNIC_DRV_STATE_HANDLES_IRQ
;
5394 clear_bit(CNIC_F_CNIC_UP
, &dev
->flags
);
5395 RCU_INIT_POINTER(cp
->ulp_ops
[CNIC_ULP_L4
], NULL
);
5397 cnic_cm_shutdown(dev
);
5399 pci_dev_put(dev
->pcidev
);
5403 static void cnic_free_dev(struct cnic_dev
*dev
)
5407 while ((atomic_read(&dev
->ref_count
) != 0) && i
< 10) {
5411 if (atomic_read(&dev
->ref_count
) != 0)
5412 netdev_err(dev
->netdev
, "Failed waiting for ref count to go to zero\n");
5414 netdev_info(dev
->netdev
, "Removed CNIC device\n");
5415 dev_put(dev
->netdev
);
5419 static struct cnic_dev
*cnic_alloc_dev(struct net_device
*dev
,
5420 struct pci_dev
*pdev
)
5422 struct cnic_dev
*cdev
;
5423 struct cnic_local
*cp
;
5426 alloc_size
= sizeof(struct cnic_dev
) + sizeof(struct cnic_local
);
5428 cdev
= kzalloc(alloc_size
, GFP_KERNEL
);
5433 cdev
->cnic_priv
= (char *)cdev
+ sizeof(struct cnic_dev
);
5434 cdev
->register_device
= cnic_register_device
;
5435 cdev
->unregister_device
= cnic_unregister_device
;
5436 cdev
->iscsi_nl_msg_recv
= cnic_iscsi_nl_msg_recv
;
5438 cp
= cdev
->cnic_priv
;
5440 cp
->l2_single_buf_size
= 0x400;
5441 cp
->l2_rx_ring_size
= 3;
5443 spin_lock_init(&cp
->cnic_ulp_lock
);
5445 netdev_info(dev
, "Added CNIC device\n");
5450 static struct cnic_dev
*init_bnx2_cnic(struct net_device
*dev
)
5452 struct pci_dev
*pdev
;
5453 struct cnic_dev
*cdev
;
5454 struct cnic_local
*cp
;
5455 struct bnx2
*bp
= netdev_priv(dev
);
5456 struct cnic_eth_dev
*ethdev
= NULL
;
5459 ethdev
= (bp
->cnic_probe
)(dev
);
5464 pdev
= ethdev
->pdev
;
5470 if ((pdev
->device
== PCI_DEVICE_ID_NX2_5709
||
5471 pdev
->device
== PCI_DEVICE_ID_NX2_5709S
) &&
5472 (pdev
->revision
< 0x10)) {
5478 cdev
= cnic_alloc_dev(dev
, pdev
);
5482 set_bit(CNIC_F_BNX2_CLASS
, &cdev
->flags
);
5483 cdev
->submit_kwqes
= cnic_submit_bnx2_kwqes
;
5485 cp
= cdev
->cnic_priv
;
5486 cp
->ethdev
= ethdev
;
5487 cdev
->pcidev
= pdev
;
5488 cp
->chip_id
= ethdev
->chip_id
;
5490 cdev
->max_iscsi_conn
= ethdev
->max_iscsi_conn
;
5492 cp
->cnic_ops
= &cnic_bnx2_ops
;
5493 cp
->start_hw
= cnic_start_bnx2_hw
;
5494 cp
->stop_hw
= cnic_stop_bnx2_hw
;
5495 cp
->setup_pgtbl
= cnic_setup_page_tbl
;
5496 cp
->alloc_resc
= cnic_alloc_bnx2_resc
;
5497 cp
->free_resc
= cnic_free_resc
;
5498 cp
->start_cm
= cnic_cm_init_bnx2_hw
;
5499 cp
->stop_cm
= cnic_cm_stop_bnx2_hw
;
5500 cp
->enable_int
= cnic_enable_bnx2_int
;
5501 cp
->disable_int_sync
= cnic_disable_bnx2_int_sync
;
5502 cp
->close_conn
= cnic_close_bnx2_conn
;
5510 static struct cnic_dev
*init_bnx2x_cnic(struct net_device
*dev
)
5512 struct pci_dev
*pdev
;
5513 struct cnic_dev
*cdev
;
5514 struct cnic_local
*cp
;
5515 struct bnx2x
*bp
= netdev_priv(dev
);
5516 struct cnic_eth_dev
*ethdev
= NULL
;
5519 ethdev
= bp
->cnic_probe(dev
);
5524 pdev
= ethdev
->pdev
;
5529 cdev
= cnic_alloc_dev(dev
, pdev
);
5535 set_bit(CNIC_F_BNX2X_CLASS
, &cdev
->flags
);
5536 cdev
->submit_kwqes
= cnic_submit_bnx2x_kwqes
;
5538 cp
= cdev
->cnic_priv
;
5539 cp
->ethdev
= ethdev
;
5540 cdev
->pcidev
= pdev
;
5541 cp
->chip_id
= ethdev
->chip_id
;
5543 cdev
->stats_addr
= ethdev
->addr_drv_info_to_mcp
;
5545 if (!(ethdev
->drv_state
& CNIC_DRV_STATE_NO_ISCSI
))
5546 cdev
->max_iscsi_conn
= ethdev
->max_iscsi_conn
;
5547 if (CNIC_SUPPORTS_FCOE(cp
)) {
5548 cdev
->max_fcoe_conn
= ethdev
->max_fcoe_conn
;
5549 cdev
->max_fcoe_exchanges
= ethdev
->max_fcoe_exchanges
;
5552 if (cdev
->max_fcoe_conn
> BNX2X_FCOE_NUM_CONNECTIONS
)
5553 cdev
->max_fcoe_conn
= BNX2X_FCOE_NUM_CONNECTIONS
;
5555 memcpy(cdev
->mac_addr
, ethdev
->iscsi_mac
, 6);
5557 cp
->cnic_ops
= &cnic_bnx2x_ops
;
5558 cp
->start_hw
= cnic_start_bnx2x_hw
;
5559 cp
->stop_hw
= cnic_stop_bnx2x_hw
;
5560 cp
->setup_pgtbl
= cnic_setup_page_tbl_le
;
5561 cp
->alloc_resc
= cnic_alloc_bnx2x_resc
;
5562 cp
->free_resc
= cnic_free_resc
;
5563 cp
->start_cm
= cnic_cm_init_bnx2x_hw
;
5564 cp
->stop_cm
= cnic_cm_stop_bnx2x_hw
;
5565 cp
->enable_int
= cnic_enable_bnx2x_int
;
5566 cp
->disable_int_sync
= cnic_disable_bnx2x_int_sync
;
5567 if (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
)) {
5568 cp
->ack_int
= cnic_ack_bnx2x_e2_msix
;
5569 cp
->arm_int
= cnic_arm_bnx2x_e2_msix
;
5571 cp
->ack_int
= cnic_ack_bnx2x_msix
;
5572 cp
->arm_int
= cnic_arm_bnx2x_msix
;
5574 cp
->close_conn
= cnic_close_bnx2x_conn
;
5578 static struct cnic_dev
*is_cnic_dev(struct net_device
*dev
)
5580 struct ethtool_drvinfo drvinfo
;
5581 struct cnic_dev
*cdev
= NULL
;
5583 if (dev
->ethtool_ops
&& dev
->ethtool_ops
->get_drvinfo
) {
5584 memset(&drvinfo
, 0, sizeof(drvinfo
));
5585 dev
->ethtool_ops
->get_drvinfo(dev
, &drvinfo
);
5587 if (!strcmp(drvinfo
.driver
, "bnx2"))
5588 cdev
= init_bnx2_cnic(dev
);
5589 if (!strcmp(drvinfo
.driver
, "bnx2x"))
5590 cdev
= init_bnx2x_cnic(dev
);
5592 write_lock(&cnic_dev_lock
);
5593 list_add(&cdev
->list
, &cnic_dev_list
);
5594 write_unlock(&cnic_dev_lock
);
5600 static void cnic_rcv_netevent(struct cnic_local
*cp
, unsigned long event
,
5606 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
5607 struct cnic_ulp_ops
*ulp_ops
;
5610 ulp_ops
= rcu_dereference(cp
->ulp_ops
[if_type
]);
5611 if (!ulp_ops
|| !ulp_ops
->indicate_netevent
)
5614 ctx
= cp
->ulp_handle
[if_type
];
5616 ulp_ops
->indicate_netevent(ctx
, event
, vlan_id
);
5621 /* netdev event handler */
5622 static int cnic_netdev_event(struct notifier_block
*this, unsigned long event
,
5625 struct net_device
*netdev
= ptr
;
5626 struct cnic_dev
*dev
;
5629 dev
= cnic_from_netdev(netdev
);
5631 if (!dev
&& (event
== NETDEV_REGISTER
|| netif_running(netdev
))) {
5632 /* Check for the hot-plug device */
5633 dev
= is_cnic_dev(netdev
);
5640 struct cnic_local
*cp
= dev
->cnic_priv
;
5644 else if (event
== NETDEV_UNREGISTER
)
5647 if (event
== NETDEV_UP
|| (new_dev
&& netif_running(netdev
))) {
5648 if (cnic_register_netdev(dev
) != 0) {
5652 if (!cnic_start_hw(dev
))
5653 cnic_ulp_start(dev
);
5656 cnic_rcv_netevent(cp
, event
, 0);
5658 if (event
== NETDEV_GOING_DOWN
) {
5661 cnic_unregister_netdev(dev
);
5662 } else if (event
== NETDEV_UNREGISTER
) {
5663 write_lock(&cnic_dev_lock
);
5664 list_del_init(&dev
->list
);
5665 write_unlock(&cnic_dev_lock
);
5673 struct net_device
*realdev
;
5676 vid
= cnic_get_vlan(netdev
, &realdev
);
5678 dev
= cnic_from_netdev(realdev
);
5680 vid
|= VLAN_TAG_PRESENT
;
5681 cnic_rcv_netevent(dev
->cnic_priv
, event
, vid
);
5690 static struct notifier_block cnic_netdev_notifier
= {
5691 .notifier_call
= cnic_netdev_event
5694 static void cnic_release(void)
5696 struct cnic_dev
*dev
;
5697 struct cnic_uio_dev
*udev
;
5699 while (!list_empty(&cnic_dev_list
)) {
5700 dev
= list_entry(cnic_dev_list
.next
, struct cnic_dev
, list
);
5701 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
5707 cnic_unregister_netdev(dev
);
5708 list_del_init(&dev
->list
);
5711 while (!list_empty(&cnic_udev_list
)) {
5712 udev
= list_entry(cnic_udev_list
.next
, struct cnic_uio_dev
,
5714 cnic_free_uio(udev
);
5718 static int __init
cnic_init(void)
5722 pr_info("%s", version
);
5724 rc
= register_netdevice_notifier(&cnic_netdev_notifier
);
5730 cnic_wq
= create_singlethread_workqueue("cnic_wq");
5733 unregister_netdevice_notifier(&cnic_netdev_notifier
);
5740 static void __exit
cnic_exit(void)
5742 unregister_netdevice_notifier(&cnic_netdev_notifier
);
5744 destroy_workqueue(cnic_wq
);
5747 module_init(cnic_init
);
5748 module_exit(cnic_exit
);