1 /* cnic.c: Broadcom CNIC core network driver.
3 * Copyright (c) 2006-2012 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10 * Modified and maintained by: Michael Chan <mchan@broadcom.com>
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/list.h>
20 #include <linux/slab.h>
21 #include <linux/pci.h>
22 #include <linux/init.h>
23 #include <linux/netdevice.h>
24 #include <linux/uio_driver.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/delay.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_vlan.h>
30 #include <linux/prefetch.h>
31 #include <linux/random.h>
32 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
37 #include <net/route.h>
39 #include <net/ip6_route.h>
40 #include <net/ip6_checksum.h>
41 #include <scsi/iscsi_if.h>
45 #include "bnx2x/bnx2x_reg.h"
46 #include "bnx2x/bnx2x_fw_defs.h"
47 #include "bnx2x/bnx2x_hsi.h"
48 #include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
49 #include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
50 #include "../../../scsi/bnx2fc/bnx2fc_constants.h"
52 #include "cnic_defs.h"
54 #define DRV_MODULE_NAME "cnic"
56 static char version
[] __devinitdata
=
57 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME
" v" CNIC_MODULE_VERSION
" (" CNIC_MODULE_RELDATE
")\n";
59 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
60 "Chen (zongxi@broadcom.com");
61 MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
62 MODULE_LICENSE("GPL");
63 MODULE_VERSION(CNIC_MODULE_VERSION
);
65 /* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
66 static LIST_HEAD(cnic_dev_list
);
67 static LIST_HEAD(cnic_udev_list
);
68 static DEFINE_RWLOCK(cnic_dev_lock
);
69 static DEFINE_MUTEX(cnic_lock
);
71 static struct cnic_ulp_ops __rcu
*cnic_ulp_tbl
[MAX_CNIC_ULP_TYPE
];
73 /* helper function, assuming cnic_lock is held */
74 static inline struct cnic_ulp_ops
*cnic_ulp_tbl_prot(int type
)
76 return rcu_dereference_protected(cnic_ulp_tbl
[type
],
77 lockdep_is_held(&cnic_lock
));
80 static int cnic_service_bnx2(void *, void *);
81 static int cnic_service_bnx2x(void *, void *);
82 static int cnic_ctl(void *, struct cnic_ctl_info
*);
84 static struct cnic_ops cnic_bnx2_ops
= {
85 .cnic_owner
= THIS_MODULE
,
86 .cnic_handler
= cnic_service_bnx2
,
90 static struct cnic_ops cnic_bnx2x_ops
= {
91 .cnic_owner
= THIS_MODULE
,
92 .cnic_handler
= cnic_service_bnx2x
,
96 static struct workqueue_struct
*cnic_wq
;
98 static void cnic_shutdown_rings(struct cnic_dev
*);
99 static void cnic_init_rings(struct cnic_dev
*);
100 static int cnic_cm_set_pg(struct cnic_sock
*);
102 static int cnic_uio_open(struct uio_info
*uinfo
, struct inode
*inode
)
104 struct cnic_uio_dev
*udev
= uinfo
->priv
;
105 struct cnic_dev
*dev
;
107 if (!capable(CAP_NET_ADMIN
))
110 if (udev
->uio_dev
!= -1)
116 if (!dev
|| !test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
121 udev
->uio_dev
= iminor(inode
);
123 cnic_shutdown_rings(dev
);
124 cnic_init_rings(dev
);
130 static int cnic_uio_close(struct uio_info
*uinfo
, struct inode
*inode
)
132 struct cnic_uio_dev
*udev
= uinfo
->priv
;
138 static inline void cnic_hold(struct cnic_dev
*dev
)
140 atomic_inc(&dev
->ref_count
);
143 static inline void cnic_put(struct cnic_dev
*dev
)
145 atomic_dec(&dev
->ref_count
);
148 static inline void csk_hold(struct cnic_sock
*csk
)
150 atomic_inc(&csk
->ref_count
);
153 static inline void csk_put(struct cnic_sock
*csk
)
155 atomic_dec(&csk
->ref_count
);
158 static struct cnic_dev
*cnic_from_netdev(struct net_device
*netdev
)
160 struct cnic_dev
*cdev
;
162 read_lock(&cnic_dev_lock
);
163 list_for_each_entry(cdev
, &cnic_dev_list
, list
) {
164 if (netdev
== cdev
->netdev
) {
166 read_unlock(&cnic_dev_lock
);
170 read_unlock(&cnic_dev_lock
);
174 static inline void ulp_get(struct cnic_ulp_ops
*ulp_ops
)
176 atomic_inc(&ulp_ops
->ref_count
);
179 static inline void ulp_put(struct cnic_ulp_ops
*ulp_ops
)
181 atomic_dec(&ulp_ops
->ref_count
);
184 static void cnic_ctx_wr(struct cnic_dev
*dev
, u32 cid_addr
, u32 off
, u32 val
)
186 struct cnic_local
*cp
= dev
->cnic_priv
;
187 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
188 struct drv_ctl_info info
;
189 struct drv_ctl_io
*io
= &info
.data
.io
;
191 info
.cmd
= DRV_CTL_CTX_WR_CMD
;
192 io
->cid_addr
= cid_addr
;
195 ethdev
->drv_ctl(dev
->netdev
, &info
);
198 static void cnic_ctx_tbl_wr(struct cnic_dev
*dev
, u32 off
, dma_addr_t addr
)
200 struct cnic_local
*cp
= dev
->cnic_priv
;
201 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
202 struct drv_ctl_info info
;
203 struct drv_ctl_io
*io
= &info
.data
.io
;
205 info
.cmd
= DRV_CTL_CTXTBL_WR_CMD
;
208 ethdev
->drv_ctl(dev
->netdev
, &info
);
211 static void cnic_ring_ctl(struct cnic_dev
*dev
, u32 cid
, u32 cl_id
, int start
)
213 struct cnic_local
*cp
= dev
->cnic_priv
;
214 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
215 struct drv_ctl_info info
;
216 struct drv_ctl_l2_ring
*ring
= &info
.data
.ring
;
219 info
.cmd
= DRV_CTL_START_L2_CMD
;
221 info
.cmd
= DRV_CTL_STOP_L2_CMD
;
224 ring
->client_id
= cl_id
;
225 ethdev
->drv_ctl(dev
->netdev
, &info
);
228 static void cnic_reg_wr_ind(struct cnic_dev
*dev
, u32 off
, u32 val
)
230 struct cnic_local
*cp
= dev
->cnic_priv
;
231 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
232 struct drv_ctl_info info
;
233 struct drv_ctl_io
*io
= &info
.data
.io
;
235 info
.cmd
= DRV_CTL_IO_WR_CMD
;
238 ethdev
->drv_ctl(dev
->netdev
, &info
);
241 static u32
cnic_reg_rd_ind(struct cnic_dev
*dev
, u32 off
)
243 struct cnic_local
*cp
= dev
->cnic_priv
;
244 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
245 struct drv_ctl_info info
;
246 struct drv_ctl_io
*io
= &info
.data
.io
;
248 info
.cmd
= DRV_CTL_IO_RD_CMD
;
250 ethdev
->drv_ctl(dev
->netdev
, &info
);
254 static void cnic_ulp_ctl(struct cnic_dev
*dev
, int ulp_type
, bool reg
)
256 struct cnic_local
*cp
= dev
->cnic_priv
;
257 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
258 struct drv_ctl_info info
;
259 struct fcoe_capabilities
*fcoe_cap
=
260 &info
.data
.register_data
.fcoe_features
;
263 info
.cmd
= DRV_CTL_ULP_REGISTER_CMD
;
264 if (ulp_type
== CNIC_ULP_FCOE
&& dev
->fcoe_cap
)
265 memcpy(fcoe_cap
, dev
->fcoe_cap
, sizeof(*fcoe_cap
));
267 info
.cmd
= DRV_CTL_ULP_UNREGISTER_CMD
;
270 info
.data
.ulp_type
= ulp_type
;
271 ethdev
->drv_ctl(dev
->netdev
, &info
);
274 static int cnic_in_use(struct cnic_sock
*csk
)
276 return test_bit(SK_F_INUSE
, &csk
->flags
);
279 static void cnic_spq_completion(struct cnic_dev
*dev
, int cmd
, u32 count
)
281 struct cnic_local
*cp
= dev
->cnic_priv
;
282 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
283 struct drv_ctl_info info
;
286 info
.data
.credit
.credit_count
= count
;
287 ethdev
->drv_ctl(dev
->netdev
, &info
);
290 static int cnic_get_l5_cid(struct cnic_local
*cp
, u32 cid
, u32
*l5_cid
)
297 for (i
= 0; i
< cp
->max_cid_space
; i
++) {
298 if (cp
->ctx_tbl
[i
].cid
== cid
) {
306 static int cnic_send_nlmsg(struct cnic_local
*cp
, u32 type
,
307 struct cnic_sock
*csk
)
309 struct iscsi_path path_req
;
312 u32 msg_type
= ISCSI_KEVENT_IF_DOWN
;
313 struct cnic_ulp_ops
*ulp_ops
;
314 struct cnic_uio_dev
*udev
= cp
->udev
;
315 int rc
= 0, retry
= 0;
317 if (!udev
|| udev
->uio_dev
== -1)
321 len
= sizeof(path_req
);
322 buf
= (char *) &path_req
;
323 memset(&path_req
, 0, len
);
325 msg_type
= ISCSI_KEVENT_PATH_REQ
;
326 path_req
.handle
= (u64
) csk
->l5_cid
;
327 if (test_bit(SK_F_IPV6
, &csk
->flags
)) {
328 memcpy(&path_req
.dst
.v6_addr
, &csk
->dst_ip
[0],
329 sizeof(struct in6_addr
));
330 path_req
.ip_addr_len
= 16;
332 memcpy(&path_req
.dst
.v4_addr
, &csk
->dst_ip
[0],
333 sizeof(struct in_addr
));
334 path_req
.ip_addr_len
= 4;
336 path_req
.vlan_id
= csk
->vlan_id
;
337 path_req
.pmtu
= csk
->mtu
;
343 ulp_ops
= rcu_dereference(cnic_ulp_tbl
[CNIC_ULP_ISCSI
]);
345 rc
= ulp_ops
->iscsi_nl_send_msg(
346 cp
->ulp_handle
[CNIC_ULP_ISCSI
],
349 if (rc
== 0 || msg_type
!= ISCSI_KEVENT_PATH_REQ
)
358 static void cnic_cm_upcall(struct cnic_local
*, struct cnic_sock
*, u8
);
360 static int cnic_iscsi_nl_msg_recv(struct cnic_dev
*dev
, u32 msg_type
,
366 case ISCSI_UEVENT_PATH_UPDATE
: {
367 struct cnic_local
*cp
;
369 struct cnic_sock
*csk
;
370 struct iscsi_path
*path_resp
;
372 if (len
< sizeof(*path_resp
))
375 path_resp
= (struct iscsi_path
*) buf
;
377 l5_cid
= (u32
) path_resp
->handle
;
378 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
382 if (!rcu_dereference(cp
->ulp_ops
[CNIC_ULP_L4
])) {
387 csk
= &cp
->csk_tbl
[l5_cid
];
389 if (cnic_in_use(csk
) &&
390 test_bit(SK_F_CONNECT_START
, &csk
->flags
)) {
392 csk
->vlan_id
= path_resp
->vlan_id
;
394 memcpy(csk
->ha
, path_resp
->mac_addr
, 6);
395 if (test_bit(SK_F_IPV6
, &csk
->flags
))
396 memcpy(&csk
->src_ip
[0], &path_resp
->src
.v6_addr
,
397 sizeof(struct in6_addr
));
399 memcpy(&csk
->src_ip
[0], &path_resp
->src
.v4_addr
,
400 sizeof(struct in_addr
));
402 if (is_valid_ether_addr(csk
->ha
)) {
404 } else if (!test_bit(SK_F_OFFLD_SCHED
, &csk
->flags
) &&
405 !test_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
407 cnic_cm_upcall(cp
, csk
,
408 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
);
409 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
421 static int cnic_offld_prep(struct cnic_sock
*csk
)
423 if (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
426 if (!test_bit(SK_F_CONNECT_START
, &csk
->flags
)) {
427 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
434 static int cnic_close_prep(struct cnic_sock
*csk
)
436 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
437 smp_mb__after_clear_bit();
439 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
440 while (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
448 static int cnic_abort_prep(struct cnic_sock
*csk
)
450 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
451 smp_mb__after_clear_bit();
453 while (test_and_set_bit(SK_F_OFFLD_SCHED
, &csk
->flags
))
456 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
457 csk
->state
= L4_KCQE_OPCODE_VALUE_RESET_COMP
;
464 int cnic_register_driver(int ulp_type
, struct cnic_ulp_ops
*ulp_ops
)
466 struct cnic_dev
*dev
;
468 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
469 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
472 mutex_lock(&cnic_lock
);
473 if (cnic_ulp_tbl_prot(ulp_type
)) {
474 pr_err("%s: Type %d has already been registered\n",
476 mutex_unlock(&cnic_lock
);
480 read_lock(&cnic_dev_lock
);
481 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
482 struct cnic_local
*cp
= dev
->cnic_priv
;
484 clear_bit(ULP_F_INIT
, &cp
->ulp_flags
[ulp_type
]);
486 read_unlock(&cnic_dev_lock
);
488 atomic_set(&ulp_ops
->ref_count
, 0);
489 rcu_assign_pointer(cnic_ulp_tbl
[ulp_type
], ulp_ops
);
490 mutex_unlock(&cnic_lock
);
492 /* Prevent race conditions with netdev_event */
494 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
495 struct cnic_local
*cp
= dev
->cnic_priv
;
497 if (!test_and_set_bit(ULP_F_INIT
, &cp
->ulp_flags
[ulp_type
]))
498 ulp_ops
->cnic_init(dev
);
505 int cnic_unregister_driver(int ulp_type
)
507 struct cnic_dev
*dev
;
508 struct cnic_ulp_ops
*ulp_ops
;
511 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
512 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
515 mutex_lock(&cnic_lock
);
516 ulp_ops
= cnic_ulp_tbl_prot(ulp_type
);
518 pr_err("%s: Type %d has not been registered\n",
522 read_lock(&cnic_dev_lock
);
523 list_for_each_entry(dev
, &cnic_dev_list
, list
) {
524 struct cnic_local
*cp
= dev
->cnic_priv
;
526 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
527 pr_err("%s: Type %d still has devices registered\n",
529 read_unlock(&cnic_dev_lock
);
533 read_unlock(&cnic_dev_lock
);
535 RCU_INIT_POINTER(cnic_ulp_tbl
[ulp_type
], NULL
);
537 mutex_unlock(&cnic_lock
);
539 while ((atomic_read(&ulp_ops
->ref_count
) != 0) && (i
< 20)) {
544 if (atomic_read(&ulp_ops
->ref_count
) != 0)
545 netdev_warn(dev
->netdev
, "Failed waiting for ref count to go to zero\n");
549 mutex_unlock(&cnic_lock
);
553 static int cnic_start_hw(struct cnic_dev
*);
554 static void cnic_stop_hw(struct cnic_dev
*);
556 static int cnic_register_device(struct cnic_dev
*dev
, int ulp_type
,
559 struct cnic_local
*cp
= dev
->cnic_priv
;
560 struct cnic_ulp_ops
*ulp_ops
;
562 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
563 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
566 mutex_lock(&cnic_lock
);
567 if (cnic_ulp_tbl_prot(ulp_type
) == NULL
) {
568 pr_err("%s: Driver with type %d has not been registered\n",
570 mutex_unlock(&cnic_lock
);
573 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
574 pr_err("%s: Type %d has already been registered to this device\n",
576 mutex_unlock(&cnic_lock
);
580 clear_bit(ULP_F_START
, &cp
->ulp_flags
[ulp_type
]);
581 cp
->ulp_handle
[ulp_type
] = ulp_ctx
;
582 ulp_ops
= cnic_ulp_tbl_prot(ulp_type
);
583 rcu_assign_pointer(cp
->ulp_ops
[ulp_type
], ulp_ops
);
586 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
587 if (!test_and_set_bit(ULP_F_START
, &cp
->ulp_flags
[ulp_type
]))
588 ulp_ops
->cnic_start(cp
->ulp_handle
[ulp_type
]);
590 mutex_unlock(&cnic_lock
);
592 cnic_ulp_ctl(dev
, ulp_type
, true);
597 EXPORT_SYMBOL(cnic_register_driver
);
599 static int cnic_unregister_device(struct cnic_dev
*dev
, int ulp_type
)
601 struct cnic_local
*cp
= dev
->cnic_priv
;
604 if (ulp_type
< 0 || ulp_type
>= MAX_CNIC_ULP_TYPE
) {
605 pr_err("%s: Bad type %d\n", __func__
, ulp_type
);
608 mutex_lock(&cnic_lock
);
609 if (rcu_dereference(cp
->ulp_ops
[ulp_type
])) {
610 RCU_INIT_POINTER(cp
->ulp_ops
[ulp_type
], NULL
);
613 pr_err("%s: device not registered to this ulp type %d\n",
615 mutex_unlock(&cnic_lock
);
618 mutex_unlock(&cnic_lock
);
620 if (ulp_type
== CNIC_ULP_ISCSI
)
621 cnic_send_nlmsg(cp
, ISCSI_KEVENT_IF_DOWN
, NULL
);
622 else if (ulp_type
== CNIC_ULP_FCOE
)
623 dev
->fcoe_cap
= NULL
;
627 while (test_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[ulp_type
]) &&
632 if (test_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[ulp_type
]))
633 netdev_warn(dev
->netdev
, "Failed waiting for ULP up call to complete\n");
635 cnic_ulp_ctl(dev
, ulp_type
, false);
639 EXPORT_SYMBOL(cnic_unregister_driver
);
641 static int cnic_init_id_tbl(struct cnic_id_tbl
*id_tbl
, u32 size
, u32 start_id
,
644 id_tbl
->start
= start_id
;
647 spin_lock_init(&id_tbl
->lock
);
648 id_tbl
->table
= kzalloc(DIV_ROUND_UP(size
, 32) * 4, GFP_KERNEL
);
655 static void cnic_free_id_tbl(struct cnic_id_tbl
*id_tbl
)
657 kfree(id_tbl
->table
);
658 id_tbl
->table
= NULL
;
661 static int cnic_alloc_id(struct cnic_id_tbl
*id_tbl
, u32 id
)
666 if (id
>= id_tbl
->max
)
669 spin_lock(&id_tbl
->lock
);
670 if (!test_bit(id
, id_tbl
->table
)) {
671 set_bit(id
, id_tbl
->table
);
674 spin_unlock(&id_tbl
->lock
);
678 /* Returns -1 if not successful */
679 static u32
cnic_alloc_new_id(struct cnic_id_tbl
*id_tbl
)
683 spin_lock(&id_tbl
->lock
);
684 id
= find_next_zero_bit(id_tbl
->table
, id_tbl
->max
, id_tbl
->next
);
685 if (id
>= id_tbl
->max
) {
687 if (id_tbl
->next
!= 0) {
688 id
= find_first_zero_bit(id_tbl
->table
, id_tbl
->next
);
689 if (id
>= id_tbl
->next
)
694 if (id
< id_tbl
->max
) {
695 set_bit(id
, id_tbl
->table
);
696 id_tbl
->next
= (id
+ 1) & (id_tbl
->max
- 1);
700 spin_unlock(&id_tbl
->lock
);
705 static void cnic_free_id(struct cnic_id_tbl
*id_tbl
, u32 id
)
711 if (id
>= id_tbl
->max
)
714 clear_bit(id
, id_tbl
->table
);
717 static void cnic_free_dma(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
724 for (i
= 0; i
< dma
->num_pages
; i
++) {
725 if (dma
->pg_arr
[i
]) {
726 dma_free_coherent(&dev
->pcidev
->dev
, BCM_PAGE_SIZE
,
727 dma
->pg_arr
[i
], dma
->pg_map_arr
[i
]);
728 dma
->pg_arr
[i
] = NULL
;
732 dma_free_coherent(&dev
->pcidev
->dev
, dma
->pgtbl_size
,
733 dma
->pgtbl
, dma
->pgtbl_map
);
741 static void cnic_setup_page_tbl(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
744 __le32
*page_table
= (__le32
*) dma
->pgtbl
;
746 for (i
= 0; i
< dma
->num_pages
; i
++) {
747 /* Each entry needs to be in big endian format. */
748 *page_table
= cpu_to_le32((u64
) dma
->pg_map_arr
[i
] >> 32);
750 *page_table
= cpu_to_le32(dma
->pg_map_arr
[i
] & 0xffffffff);
755 static void cnic_setup_page_tbl_le(struct cnic_dev
*dev
, struct cnic_dma
*dma
)
758 __le32
*page_table
= (__le32
*) dma
->pgtbl
;
760 for (i
= 0; i
< dma
->num_pages
; i
++) {
761 /* Each entry needs to be in little endian format. */
762 *page_table
= cpu_to_le32(dma
->pg_map_arr
[i
] & 0xffffffff);
764 *page_table
= cpu_to_le32((u64
) dma
->pg_map_arr
[i
] >> 32);
769 static int cnic_alloc_dma(struct cnic_dev
*dev
, struct cnic_dma
*dma
,
770 int pages
, int use_pg_tbl
)
773 struct cnic_local
*cp
= dev
->cnic_priv
;
775 size
= pages
* (sizeof(void *) + sizeof(dma_addr_t
));
776 dma
->pg_arr
= kzalloc(size
, GFP_ATOMIC
);
777 if (dma
->pg_arr
== NULL
)
780 dma
->pg_map_arr
= (dma_addr_t
*) (dma
->pg_arr
+ pages
);
781 dma
->num_pages
= pages
;
783 for (i
= 0; i
< pages
; i
++) {
784 dma
->pg_arr
[i
] = dma_alloc_coherent(&dev
->pcidev
->dev
,
788 if (dma
->pg_arr
[i
] == NULL
)
794 dma
->pgtbl_size
= ((pages
* 8) + BCM_PAGE_SIZE
- 1) &
795 ~(BCM_PAGE_SIZE
- 1);
796 dma
->pgtbl
= dma_alloc_coherent(&dev
->pcidev
->dev
, dma
->pgtbl_size
,
797 &dma
->pgtbl_map
, GFP_ATOMIC
);
798 if (dma
->pgtbl
== NULL
)
801 cp
->setup_pgtbl(dev
, dma
);
806 cnic_free_dma(dev
, dma
);
810 static void cnic_free_context(struct cnic_dev
*dev
)
812 struct cnic_local
*cp
= dev
->cnic_priv
;
815 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
816 if (cp
->ctx_arr
[i
].ctx
) {
817 dma_free_coherent(&dev
->pcidev
->dev
, cp
->ctx_blk_size
,
819 cp
->ctx_arr
[i
].mapping
);
820 cp
->ctx_arr
[i
].ctx
= NULL
;
825 static void __cnic_free_uio(struct cnic_uio_dev
*udev
)
827 uio_unregister_device(&udev
->cnic_uinfo
);
830 dma_free_coherent(&udev
->pdev
->dev
, udev
->l2_buf_size
,
831 udev
->l2_buf
, udev
->l2_buf_map
);
836 dma_free_coherent(&udev
->pdev
->dev
, udev
->l2_ring_size
,
837 udev
->l2_ring
, udev
->l2_ring_map
);
838 udev
->l2_ring
= NULL
;
841 pci_dev_put(udev
->pdev
);
845 static void cnic_free_uio(struct cnic_uio_dev
*udev
)
850 write_lock(&cnic_dev_lock
);
851 list_del_init(&udev
->list
);
852 write_unlock(&cnic_dev_lock
);
853 __cnic_free_uio(udev
);
856 static void cnic_free_resc(struct cnic_dev
*dev
)
858 struct cnic_local
*cp
= dev
->cnic_priv
;
859 struct cnic_uio_dev
*udev
= cp
->udev
;
866 cnic_free_context(dev
);
871 cnic_free_dma(dev
, &cp
->gbl_buf_info
);
872 cnic_free_dma(dev
, &cp
->kwq_info
);
873 cnic_free_dma(dev
, &cp
->kwq_16_data_info
);
874 cnic_free_dma(dev
, &cp
->kcq2
.dma
);
875 cnic_free_dma(dev
, &cp
->kcq1
.dma
);
876 kfree(cp
->iscsi_tbl
);
877 cp
->iscsi_tbl
= NULL
;
881 cnic_free_id_tbl(&cp
->fcoe_cid_tbl
);
882 cnic_free_id_tbl(&cp
->cid_tbl
);
885 static int cnic_alloc_context(struct cnic_dev
*dev
)
887 struct cnic_local
*cp
= dev
->cnic_priv
;
889 if (CHIP_NUM(cp
) == CHIP_NUM_5709
) {
892 cp
->ctx_blk_size
= BCM_PAGE_SIZE
;
893 cp
->cids_per_blk
= BCM_PAGE_SIZE
/ 128;
894 arr_size
= BNX2_MAX_CID
/ cp
->cids_per_blk
*
895 sizeof(struct cnic_ctx
);
896 cp
->ctx_arr
= kzalloc(arr_size
, GFP_KERNEL
);
897 if (cp
->ctx_arr
== NULL
)
901 for (i
= 0; i
< 2; i
++) {
902 u32 j
, reg
, off
, lo
, hi
;
905 off
= BNX2_PG_CTX_MAP
;
907 off
= BNX2_ISCSI_CTX_MAP
;
909 reg
= cnic_reg_rd_ind(dev
, off
);
912 for (j
= lo
; j
< hi
; j
+= cp
->cids_per_blk
, k
++)
913 cp
->ctx_arr
[k
].cid
= j
;
917 if (cp
->ctx_blks
>= (BNX2_MAX_CID
/ cp
->cids_per_blk
)) {
922 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
924 dma_alloc_coherent(&dev
->pcidev
->dev
,
926 &cp
->ctx_arr
[i
].mapping
,
928 if (cp
->ctx_arr
[i
].ctx
== NULL
)
935 static u16
cnic_bnx2_next_idx(u16 idx
)
940 static u16
cnic_bnx2_hw_idx(u16 idx
)
945 static u16
cnic_bnx2x_next_idx(u16 idx
)
948 if ((idx
& MAX_KCQE_CNT
) == MAX_KCQE_CNT
)
954 static u16
cnic_bnx2x_hw_idx(u16 idx
)
956 if ((idx
& MAX_KCQE_CNT
) == MAX_KCQE_CNT
)
961 static int cnic_alloc_kcq(struct cnic_dev
*dev
, struct kcq_info
*info
,
964 int err
, i
, use_page_tbl
= 0;
970 err
= cnic_alloc_dma(dev
, &info
->dma
, KCQ_PAGE_CNT
, use_page_tbl
);
974 kcq
= (struct kcqe
**) info
->dma
.pg_arr
;
977 info
->next_idx
= cnic_bnx2_next_idx
;
978 info
->hw_idx
= cnic_bnx2_hw_idx
;
982 info
->next_idx
= cnic_bnx2x_next_idx
;
983 info
->hw_idx
= cnic_bnx2x_hw_idx
;
985 for (i
= 0; i
< KCQ_PAGE_CNT
; i
++) {
986 struct bnx2x_bd_chain_next
*next
=
987 (struct bnx2x_bd_chain_next
*) &kcq
[i
][MAX_KCQE_CNT
];
990 if (j
>= KCQ_PAGE_CNT
)
992 next
->addr_hi
= (u64
) info
->dma
.pg_map_arr
[j
] >> 32;
993 next
->addr_lo
= info
->dma
.pg_map_arr
[j
] & 0xffffffff;
998 static int cnic_alloc_uio_rings(struct cnic_dev
*dev
, int pages
)
1000 struct cnic_local
*cp
= dev
->cnic_priv
;
1001 struct cnic_uio_dev
*udev
;
1003 read_lock(&cnic_dev_lock
);
1004 list_for_each_entry(udev
, &cnic_udev_list
, list
) {
1005 if (udev
->pdev
== dev
->pcidev
) {
1008 read_unlock(&cnic_dev_lock
);
1012 read_unlock(&cnic_dev_lock
);
1014 udev
= kzalloc(sizeof(struct cnic_uio_dev
), GFP_ATOMIC
);
1021 udev
->pdev
= dev
->pcidev
;
1022 udev
->l2_ring_size
= pages
* BCM_PAGE_SIZE
;
1023 udev
->l2_ring
= dma_alloc_coherent(&udev
->pdev
->dev
, udev
->l2_ring_size
,
1025 GFP_KERNEL
| __GFP_COMP
);
1029 udev
->l2_buf_size
= (cp
->l2_rx_ring_size
+ 1) * cp
->l2_single_buf_size
;
1030 udev
->l2_buf_size
= PAGE_ALIGN(udev
->l2_buf_size
);
1031 udev
->l2_buf
= dma_alloc_coherent(&udev
->pdev
->dev
, udev
->l2_buf_size
,
1033 GFP_KERNEL
| __GFP_COMP
);
1037 write_lock(&cnic_dev_lock
);
1038 list_add(&udev
->list
, &cnic_udev_list
);
1039 write_unlock(&cnic_dev_lock
);
1041 pci_dev_get(udev
->pdev
);
1047 dma_free_coherent(&udev
->pdev
->dev
, udev
->l2_ring_size
,
1048 udev
->l2_ring
, udev
->l2_ring_map
);
1054 static int cnic_init_uio(struct cnic_dev
*dev
)
1056 struct cnic_local
*cp
= dev
->cnic_priv
;
1057 struct cnic_uio_dev
*udev
= cp
->udev
;
1058 struct uio_info
*uinfo
;
1064 uinfo
= &udev
->cnic_uinfo
;
1066 uinfo
->mem
[0].addr
= dev
->netdev
->base_addr
;
1067 uinfo
->mem
[0].internal_addr
= dev
->regview
;
1068 uinfo
->mem
[0].size
= dev
->netdev
->mem_end
- dev
->netdev
->mem_start
;
1069 uinfo
->mem
[0].memtype
= UIO_MEM_PHYS
;
1071 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
1072 uinfo
->mem
[1].addr
= (unsigned long) cp
->status_blk
.gen
&
1074 if (cp
->ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
)
1075 uinfo
->mem
[1].size
= BNX2_SBLK_MSIX_ALIGN_SIZE
* 9;
1077 uinfo
->mem
[1].size
= BNX2_SBLK_MSIX_ALIGN_SIZE
;
1079 uinfo
->name
= "bnx2_cnic";
1080 } else if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
)) {
1081 uinfo
->mem
[1].addr
= (unsigned long) cp
->bnx2x_def_status_blk
&
1083 uinfo
->mem
[1].size
= sizeof(*cp
->bnx2x_def_status_blk
);
1085 uinfo
->name
= "bnx2x_cnic";
1088 uinfo
->mem
[1].memtype
= UIO_MEM_LOGICAL
;
1090 uinfo
->mem
[2].addr
= (unsigned long) udev
->l2_ring
;
1091 uinfo
->mem
[2].size
= udev
->l2_ring_size
;
1092 uinfo
->mem
[2].memtype
= UIO_MEM_LOGICAL
;
1094 uinfo
->mem
[3].addr
= (unsigned long) udev
->l2_buf
;
1095 uinfo
->mem
[3].size
= udev
->l2_buf_size
;
1096 uinfo
->mem
[3].memtype
= UIO_MEM_LOGICAL
;
1098 uinfo
->version
= CNIC_MODULE_VERSION
;
1099 uinfo
->irq
= UIO_IRQ_CUSTOM
;
1101 uinfo
->open
= cnic_uio_open
;
1102 uinfo
->release
= cnic_uio_close
;
1104 if (udev
->uio_dev
== -1) {
1108 ret
= uio_register_device(&udev
->pdev
->dev
, uinfo
);
1111 cnic_init_rings(dev
);
1117 static int cnic_alloc_bnx2_resc(struct cnic_dev
*dev
)
1119 struct cnic_local
*cp
= dev
->cnic_priv
;
1122 ret
= cnic_alloc_dma(dev
, &cp
->kwq_info
, KWQ_PAGE_CNT
, 1);
1125 cp
->kwq
= (struct kwqe
**) cp
->kwq_info
.pg_arr
;
1127 ret
= cnic_alloc_kcq(dev
, &cp
->kcq1
, true);
1131 ret
= cnic_alloc_context(dev
);
1135 ret
= cnic_alloc_uio_rings(dev
, 2);
1139 ret
= cnic_init_uio(dev
);
1146 cnic_free_resc(dev
);
1150 static int cnic_alloc_bnx2x_context(struct cnic_dev
*dev
)
1152 struct cnic_local
*cp
= dev
->cnic_priv
;
1153 int ctx_blk_size
= cp
->ethdev
->ctx_blk_size
;
1154 int total_mem
, blks
, i
;
1156 total_mem
= BNX2X_CONTEXT_MEM_SIZE
* cp
->max_cid_space
;
1157 blks
= total_mem
/ ctx_blk_size
;
1158 if (total_mem
% ctx_blk_size
)
1161 if (blks
> cp
->ethdev
->ctx_tbl_len
)
1164 cp
->ctx_arr
= kcalloc(blks
, sizeof(struct cnic_ctx
), GFP_KERNEL
);
1165 if (cp
->ctx_arr
== NULL
)
1168 cp
->ctx_blks
= blks
;
1169 cp
->ctx_blk_size
= ctx_blk_size
;
1170 if (!BNX2X_CHIP_IS_57710(cp
->chip_id
))
1173 cp
->ctx_align
= ctx_blk_size
;
1175 cp
->cids_per_blk
= ctx_blk_size
/ BNX2X_CONTEXT_MEM_SIZE
;
1177 for (i
= 0; i
< blks
; i
++) {
1178 cp
->ctx_arr
[i
].ctx
=
1179 dma_alloc_coherent(&dev
->pcidev
->dev
, cp
->ctx_blk_size
,
1180 &cp
->ctx_arr
[i
].mapping
,
1182 if (cp
->ctx_arr
[i
].ctx
== NULL
)
1185 if (cp
->ctx_align
&& cp
->ctx_blk_size
== ctx_blk_size
) {
1186 if (cp
->ctx_arr
[i
].mapping
& (cp
->ctx_align
- 1)) {
1187 cnic_free_context(dev
);
1188 cp
->ctx_blk_size
+= cp
->ctx_align
;
1197 static int cnic_alloc_bnx2x_resc(struct cnic_dev
*dev
)
1199 struct cnic_local
*cp
= dev
->cnic_priv
;
1200 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
1201 u32 start_cid
= ethdev
->starting_cid
;
1202 int i
, j
, n
, ret
, pages
;
1203 struct cnic_dma
*kwq_16_dma
= &cp
->kwq_16_data_info
;
1205 cp
->iro_arr
= ethdev
->iro_arr
;
1207 cp
->max_cid_space
= MAX_ISCSI_TBL_SZ
;
1208 cp
->iscsi_start_cid
= start_cid
;
1209 cp
->fcoe_start_cid
= start_cid
+ MAX_ISCSI_TBL_SZ
;
1211 if (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
)) {
1212 cp
->max_cid_space
+= dev
->max_fcoe_conn
;
1213 cp
->fcoe_init_cid
= ethdev
->fcoe_init_cid
;
1214 if (!cp
->fcoe_init_cid
)
1215 cp
->fcoe_init_cid
= 0x10;
1218 cp
->iscsi_tbl
= kzalloc(sizeof(struct cnic_iscsi
) * MAX_ISCSI_TBL_SZ
,
1223 cp
->ctx_tbl
= kzalloc(sizeof(struct cnic_context
) *
1224 cp
->max_cid_space
, GFP_KERNEL
);
1228 for (i
= 0; i
< MAX_ISCSI_TBL_SZ
; i
++) {
1229 cp
->ctx_tbl
[i
].proto
.iscsi
= &cp
->iscsi_tbl
[i
];
1230 cp
->ctx_tbl
[i
].ulp_proto_id
= CNIC_ULP_ISCSI
;
1233 for (i
= MAX_ISCSI_TBL_SZ
; i
< cp
->max_cid_space
; i
++)
1234 cp
->ctx_tbl
[i
].ulp_proto_id
= CNIC_ULP_FCOE
;
1236 pages
= PAGE_ALIGN(cp
->max_cid_space
* CNIC_KWQ16_DATA_SIZE
) /
1239 ret
= cnic_alloc_dma(dev
, kwq_16_dma
, pages
, 0);
1243 n
= PAGE_SIZE
/ CNIC_KWQ16_DATA_SIZE
;
1244 for (i
= 0, j
= 0; i
< cp
->max_cid_space
; i
++) {
1245 long off
= CNIC_KWQ16_DATA_SIZE
* (i
% n
);
1247 cp
->ctx_tbl
[i
].kwqe_data
= kwq_16_dma
->pg_arr
[j
] + off
;
1248 cp
->ctx_tbl
[i
].kwqe_data_mapping
= kwq_16_dma
->pg_map_arr
[j
] +
1251 if ((i
% n
) == (n
- 1))
1255 ret
= cnic_alloc_kcq(dev
, &cp
->kcq1
, false);
1259 if (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
)) {
1260 ret
= cnic_alloc_kcq(dev
, &cp
->kcq2
, true);
1265 pages
= PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE
) / PAGE_SIZE
;
1266 ret
= cnic_alloc_dma(dev
, &cp
->gbl_buf_info
, pages
, 0);
1270 ret
= cnic_alloc_bnx2x_context(dev
);
1274 cp
->bnx2x_def_status_blk
= cp
->ethdev
->irq_arr
[1].status_blk
;
1276 cp
->l2_rx_ring_size
= 15;
1278 ret
= cnic_alloc_uio_rings(dev
, 4);
1282 ret
= cnic_init_uio(dev
);
1289 cnic_free_resc(dev
);
1293 static inline u32
cnic_kwq_avail(struct cnic_local
*cp
)
1295 return cp
->max_kwq_idx
-
1296 ((cp
->kwq_prod_idx
- cp
->kwq_con_idx
) & cp
->max_kwq_idx
);
1299 static int cnic_submit_bnx2_kwqes(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1302 struct cnic_local
*cp
= dev
->cnic_priv
;
1303 struct kwqe
*prod_qe
;
1304 u16 prod
, sw_prod
, i
;
1306 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
1307 return -EAGAIN
; /* bnx2 is down */
1309 spin_lock_bh(&cp
->cnic_ulp_lock
);
1310 if (num_wqes
> cnic_kwq_avail(cp
) &&
1311 !test_bit(CNIC_LCL_FL_KWQ_INIT
, &cp
->cnic_local_flags
)) {
1312 spin_unlock_bh(&cp
->cnic_ulp_lock
);
1316 clear_bit(CNIC_LCL_FL_KWQ_INIT
, &cp
->cnic_local_flags
);
1318 prod
= cp
->kwq_prod_idx
;
1319 sw_prod
= prod
& MAX_KWQ_IDX
;
1320 for (i
= 0; i
< num_wqes
; i
++) {
1321 prod_qe
= &cp
->kwq
[KWQ_PG(sw_prod
)][KWQ_IDX(sw_prod
)];
1322 memcpy(prod_qe
, wqes
[i
], sizeof(struct kwqe
));
1324 sw_prod
= prod
& MAX_KWQ_IDX
;
1326 cp
->kwq_prod_idx
= prod
;
1328 CNIC_WR16(dev
, cp
->kwq_io_addr
, cp
->kwq_prod_idx
);
1330 spin_unlock_bh(&cp
->cnic_ulp_lock
);
1334 static void *cnic_get_kwqe_16_data(struct cnic_local
*cp
, u32 l5_cid
,
1335 union l5cm_specific_data
*l5_data
)
1337 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1340 map
= ctx
->kwqe_data_mapping
;
1341 l5_data
->phy_address
.lo
= (u64
) map
& 0xffffffff;
1342 l5_data
->phy_address
.hi
= (u64
) map
>> 32;
1343 return ctx
->kwqe_data
;
1346 static int cnic_submit_kwqe_16(struct cnic_dev
*dev
, u32 cmd
, u32 cid
,
1347 u32 type
, union l5cm_specific_data
*l5_data
)
1349 struct cnic_local
*cp
= dev
->cnic_priv
;
1350 struct l5cm_spe kwqe
;
1351 struct kwqe_16
*kwq
[1];
1355 kwqe
.hdr
.conn_and_cmd_data
=
1356 cpu_to_le32(((cmd
<< SPE_HDR_CMD_ID_SHIFT
) |
1357 BNX2X_HW_CID(cp
, cid
)));
1359 type_16
= (type
<< SPE_HDR_CONN_TYPE_SHIFT
) & SPE_HDR_CONN_TYPE
;
1360 type_16
|= (cp
->pfid
<< SPE_HDR_FUNCTION_ID_SHIFT
) &
1361 SPE_HDR_FUNCTION_ID
;
1363 kwqe
.hdr
.type
= cpu_to_le16(type_16
);
1364 kwqe
.hdr
.reserved1
= 0;
1365 kwqe
.data
.phy_address
.lo
= cpu_to_le32(l5_data
->phy_address
.lo
);
1366 kwqe
.data
.phy_address
.hi
= cpu_to_le32(l5_data
->phy_address
.hi
);
1368 kwq
[0] = (struct kwqe_16
*) &kwqe
;
1370 spin_lock_bh(&cp
->cnic_ulp_lock
);
1371 ret
= cp
->ethdev
->drv_submit_kwqes_16(dev
->netdev
, kwq
, 1);
1372 spin_unlock_bh(&cp
->cnic_ulp_lock
);
1380 static void cnic_reply_bnx2x_kcqes(struct cnic_dev
*dev
, int ulp_type
,
1381 struct kcqe
*cqes
[], u32 num_cqes
)
1383 struct cnic_local
*cp
= dev
->cnic_priv
;
1384 struct cnic_ulp_ops
*ulp_ops
;
1387 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
1388 if (likely(ulp_ops
)) {
1389 ulp_ops
->indicate_kcqes(cp
->ulp_handle
[ulp_type
],
1395 static int cnic_bnx2x_iscsi_init1(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1397 struct cnic_local
*cp
= dev
->cnic_priv
;
1398 struct iscsi_kwqe_init1
*req1
= (struct iscsi_kwqe_init1
*) kwqe
;
1400 u32 pfid
= cp
->pfid
;
1402 cp
->num_iscsi_tasks
= req1
->num_tasks_per_conn
;
1403 cp
->num_ccells
= req1
->num_ccells_per_conn
;
1404 cp
->task_array_size
= BNX2X_ISCSI_TASK_CONTEXT_SIZE
*
1405 cp
->num_iscsi_tasks
;
1406 cp
->r2tq_size
= cp
->num_iscsi_tasks
* BNX2X_ISCSI_MAX_PENDING_R2TS
*
1407 BNX2X_ISCSI_R2TQE_SIZE
;
1408 cp
->hq_size
= cp
->num_ccells
* BNX2X_ISCSI_HQ_BD_SIZE
;
1409 pages
= PAGE_ALIGN(cp
->hq_size
) / PAGE_SIZE
;
1410 hq_bds
= pages
* (PAGE_SIZE
/ BNX2X_ISCSI_HQ_BD_SIZE
);
1411 cp
->num_cqs
= req1
->num_cqs
;
1413 if (!dev
->max_iscsi_conn
)
1416 /* init Tstorm RAM */
1417 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+ TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid
),
1419 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+ TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid
),
1421 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
1422 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid
), PAGE_SHIFT
);
1423 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+
1424 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid
),
1425 req1
->num_tasks_per_conn
);
1427 /* init Ustorm RAM */
1428 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+
1429 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid
),
1430 req1
->rq_buffer_size
);
1431 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid
),
1433 CNIC_WR8(dev
, BAR_USTRORM_INTMEM
+
1434 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid
), PAGE_SHIFT
);
1435 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+
1436 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid
),
1437 req1
->num_tasks_per_conn
);
1438 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_RQ_SIZE_OFFSET(pfid
),
1440 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_CQ_SIZE_OFFSET(pfid
),
1442 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+ USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid
),
1443 cp
->num_iscsi_tasks
* BNX2X_ISCSI_MAX_PENDING_R2TS
);
1445 /* init Xstorm RAM */
1446 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid
),
1448 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
1449 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid
), PAGE_SHIFT
);
1450 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+
1451 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid
),
1452 req1
->num_tasks_per_conn
);
1453 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid
),
1455 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid
),
1456 req1
->num_tasks_per_conn
);
1457 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+ XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid
),
1458 cp
->num_iscsi_tasks
* BNX2X_ISCSI_MAX_PENDING_R2TS
);
1460 /* init Cstorm RAM */
1461 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+ CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid
),
1463 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
1464 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid
), PAGE_SHIFT
);
1465 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
1466 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid
),
1467 req1
->num_tasks_per_conn
);
1468 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+ CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid
),
1470 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+ CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid
),
1476 static int cnic_bnx2x_iscsi_init2(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1478 struct iscsi_kwqe_init2
*req2
= (struct iscsi_kwqe_init2
*) kwqe
;
1479 struct cnic_local
*cp
= dev
->cnic_priv
;
1480 u32 pfid
= cp
->pfid
;
1481 struct iscsi_kcqe kcqe
;
1482 struct kcqe
*cqes
[1];
1484 memset(&kcqe
, 0, sizeof(kcqe
));
1485 if (!dev
->max_iscsi_conn
) {
1486 kcqe
.completion_status
=
1487 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED
;
1491 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
1492 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid
), req2
->error_bit_map
[0]);
1493 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
1494 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid
) + 4,
1495 req2
->error_bit_map
[1]);
1497 CNIC_WR16(dev
, BAR_USTRORM_INTMEM
+
1498 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid
), req2
->max_cq_sqn
);
1499 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
1500 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid
), req2
->error_bit_map
[0]);
1501 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
1502 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid
) + 4,
1503 req2
->error_bit_map
[1]);
1505 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
1506 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid
), req2
->max_cq_sqn
);
1508 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_SUCCESS
;
1511 kcqe
.op_code
= ISCSI_KCQE_OPCODE_INIT
;
1512 cqes
[0] = (struct kcqe
*) &kcqe
;
1513 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_ISCSI
, cqes
, 1);
1518 static void cnic_free_bnx2x_conn_resc(struct cnic_dev
*dev
, u32 l5_cid
)
1520 struct cnic_local
*cp
= dev
->cnic_priv
;
1521 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1523 if (ctx
->ulp_proto_id
== CNIC_ULP_ISCSI
) {
1524 struct cnic_iscsi
*iscsi
= ctx
->proto
.iscsi
;
1526 cnic_free_dma(dev
, &iscsi
->hq_info
);
1527 cnic_free_dma(dev
, &iscsi
->r2tq_info
);
1528 cnic_free_dma(dev
, &iscsi
->task_array_info
);
1529 cnic_free_id(&cp
->cid_tbl
, ctx
->cid
);
1531 cnic_free_id(&cp
->fcoe_cid_tbl
, ctx
->cid
);
1537 static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev
*dev
, u32 l5_cid
)
1541 struct cnic_local
*cp
= dev
->cnic_priv
;
1542 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1543 struct cnic_iscsi
*iscsi
= ctx
->proto
.iscsi
;
1545 if (ctx
->ulp_proto_id
== CNIC_ULP_FCOE
) {
1546 cid
= cnic_alloc_new_id(&cp
->fcoe_cid_tbl
);
1555 cid
= cnic_alloc_new_id(&cp
->cid_tbl
);
1562 pages
= PAGE_ALIGN(cp
->task_array_size
) / PAGE_SIZE
;
1564 ret
= cnic_alloc_dma(dev
, &iscsi
->task_array_info
, pages
, 1);
1568 pages
= PAGE_ALIGN(cp
->r2tq_size
) / PAGE_SIZE
;
1569 ret
= cnic_alloc_dma(dev
, &iscsi
->r2tq_info
, pages
, 1);
1573 pages
= PAGE_ALIGN(cp
->hq_size
) / PAGE_SIZE
;
1574 ret
= cnic_alloc_dma(dev
, &iscsi
->hq_info
, pages
, 1);
1581 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
1585 static void *cnic_get_bnx2x_ctx(struct cnic_dev
*dev
, u32 cid
, int init
,
1586 struct regpair
*ctx_addr
)
1588 struct cnic_local
*cp
= dev
->cnic_priv
;
1589 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
1590 int blk
= (cid
- ethdev
->starting_cid
) / cp
->cids_per_blk
;
1591 int off
= (cid
- ethdev
->starting_cid
) % cp
->cids_per_blk
;
1592 unsigned long align_off
= 0;
1596 if (cp
->ctx_align
) {
1597 unsigned long mask
= cp
->ctx_align
- 1;
1599 if (cp
->ctx_arr
[blk
].mapping
& mask
)
1600 align_off
= cp
->ctx_align
-
1601 (cp
->ctx_arr
[blk
].mapping
& mask
);
1603 ctx_map
= cp
->ctx_arr
[blk
].mapping
+ align_off
+
1604 (off
* BNX2X_CONTEXT_MEM_SIZE
);
1605 ctx
= cp
->ctx_arr
[blk
].ctx
+ align_off
+
1606 (off
* BNX2X_CONTEXT_MEM_SIZE
);
1608 memset(ctx
, 0, BNX2X_CONTEXT_MEM_SIZE
);
1610 ctx_addr
->lo
= ctx_map
& 0xffffffff;
1611 ctx_addr
->hi
= (u64
) ctx_map
>> 32;
1615 static int cnic_setup_bnx2x_ctx(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1618 struct cnic_local
*cp
= dev
->cnic_priv
;
1619 struct iscsi_kwqe_conn_offload1
*req1
=
1620 (struct iscsi_kwqe_conn_offload1
*) wqes
[0];
1621 struct iscsi_kwqe_conn_offload2
*req2
=
1622 (struct iscsi_kwqe_conn_offload2
*) wqes
[1];
1623 struct iscsi_kwqe_conn_offload3
*req3
;
1624 struct cnic_context
*ctx
= &cp
->ctx_tbl
[req1
->iscsi_conn_id
];
1625 struct cnic_iscsi
*iscsi
= ctx
->proto
.iscsi
;
1627 u32 hw_cid
= BNX2X_HW_CID(cp
, cid
);
1628 struct iscsi_context
*ictx
;
1629 struct regpair context_addr
;
1630 int i
, j
, n
= 2, n_max
;
1631 u8 port
= CNIC_PORT(cp
);
1634 if (!req2
->num_additional_wqes
)
1637 n_max
= req2
->num_additional_wqes
+ 2;
1639 ictx
= cnic_get_bnx2x_ctx(dev
, cid
, 1, &context_addr
);
1643 req3
= (struct iscsi_kwqe_conn_offload3
*) wqes
[n
++];
1645 ictx
->xstorm_ag_context
.hq_prod
= 1;
1647 ictx
->xstorm_st_context
.iscsi
.first_burst_length
=
1648 ISCSI_DEF_FIRST_BURST_LEN
;
1649 ictx
->xstorm_st_context
.iscsi
.max_send_pdu_length
=
1650 ISCSI_DEF_MAX_RECV_SEG_LEN
;
1651 ictx
->xstorm_st_context
.iscsi
.sq_pbl_base
.lo
=
1652 req1
->sq_page_table_addr_lo
;
1653 ictx
->xstorm_st_context
.iscsi
.sq_pbl_base
.hi
=
1654 req1
->sq_page_table_addr_hi
;
1655 ictx
->xstorm_st_context
.iscsi
.sq_curr_pbe
.lo
= req2
->sq_first_pte
.hi
;
1656 ictx
->xstorm_st_context
.iscsi
.sq_curr_pbe
.hi
= req2
->sq_first_pte
.lo
;
1657 ictx
->xstorm_st_context
.iscsi
.hq_pbl_base
.lo
=
1658 iscsi
->hq_info
.pgtbl_map
& 0xffffffff;
1659 ictx
->xstorm_st_context
.iscsi
.hq_pbl_base
.hi
=
1660 (u64
) iscsi
->hq_info
.pgtbl_map
>> 32;
1661 ictx
->xstorm_st_context
.iscsi
.hq_curr_pbe_base
.lo
=
1662 iscsi
->hq_info
.pgtbl
[0];
1663 ictx
->xstorm_st_context
.iscsi
.hq_curr_pbe_base
.hi
=
1664 iscsi
->hq_info
.pgtbl
[1];
1665 ictx
->xstorm_st_context
.iscsi
.r2tq_pbl_base
.lo
=
1666 iscsi
->r2tq_info
.pgtbl_map
& 0xffffffff;
1667 ictx
->xstorm_st_context
.iscsi
.r2tq_pbl_base
.hi
=
1668 (u64
) iscsi
->r2tq_info
.pgtbl_map
>> 32;
1669 ictx
->xstorm_st_context
.iscsi
.r2tq_curr_pbe_base
.lo
=
1670 iscsi
->r2tq_info
.pgtbl
[0];
1671 ictx
->xstorm_st_context
.iscsi
.r2tq_curr_pbe_base
.hi
=
1672 iscsi
->r2tq_info
.pgtbl
[1];
1673 ictx
->xstorm_st_context
.iscsi
.task_pbl_base
.lo
=
1674 iscsi
->task_array_info
.pgtbl_map
& 0xffffffff;
1675 ictx
->xstorm_st_context
.iscsi
.task_pbl_base
.hi
=
1676 (u64
) iscsi
->task_array_info
.pgtbl_map
>> 32;
1677 ictx
->xstorm_st_context
.iscsi
.task_pbl_cache_idx
=
1678 BNX2X_ISCSI_PBL_NOT_CACHED
;
1679 ictx
->xstorm_st_context
.iscsi
.flags
.flags
|=
1680 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA
;
1681 ictx
->xstorm_st_context
.iscsi
.flags
.flags
|=
1682 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T
;
1683 ictx
->xstorm_st_context
.common
.ethernet
.reserved_vlan_type
=
1685 if (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
) &&
1686 cp
->port_mode
== CHIP_2_PORT_MODE
) {
1690 ictx
->xstorm_st_context
.common
.flags
=
1691 1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT
;
1692 ictx
->xstorm_st_context
.common
.flags
=
1693 port
<< XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT
;
1695 ictx
->tstorm_st_context
.iscsi
.hdr_bytes_2_fetch
= ISCSI_HEADER_SIZE
;
1696 /* TSTORM requires the base address of RQ DB & not PTE */
1697 ictx
->tstorm_st_context
.iscsi
.rq_db_phy_addr
.lo
=
1698 req2
->rq_page_table_addr_lo
& PAGE_MASK
;
1699 ictx
->tstorm_st_context
.iscsi
.rq_db_phy_addr
.hi
=
1700 req2
->rq_page_table_addr_hi
;
1701 ictx
->tstorm_st_context
.iscsi
.iscsi_conn_id
= req1
->iscsi_conn_id
;
1702 ictx
->tstorm_st_context
.tcp
.cwnd
= 0x5A8;
1703 ictx
->tstorm_st_context
.tcp
.flags2
|=
1704 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN
;
1705 ictx
->tstorm_st_context
.tcp
.ooo_support_mode
=
1706 TCP_TSTORM_OOO_DROP_AND_PROC_ACK
;
1708 ictx
->timers_context
.flags
|= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG
;
1710 ictx
->ustorm_st_context
.ring
.rq
.pbl_base
.lo
=
1711 req2
->rq_page_table_addr_lo
;
1712 ictx
->ustorm_st_context
.ring
.rq
.pbl_base
.hi
=
1713 req2
->rq_page_table_addr_hi
;
1714 ictx
->ustorm_st_context
.ring
.rq
.curr_pbe
.lo
= req3
->qp_first_pte
[0].hi
;
1715 ictx
->ustorm_st_context
.ring
.rq
.curr_pbe
.hi
= req3
->qp_first_pte
[0].lo
;
1716 ictx
->ustorm_st_context
.ring
.r2tq
.pbl_base
.lo
=
1717 iscsi
->r2tq_info
.pgtbl_map
& 0xffffffff;
1718 ictx
->ustorm_st_context
.ring
.r2tq
.pbl_base
.hi
=
1719 (u64
) iscsi
->r2tq_info
.pgtbl_map
>> 32;
1720 ictx
->ustorm_st_context
.ring
.r2tq
.curr_pbe
.lo
=
1721 iscsi
->r2tq_info
.pgtbl
[0];
1722 ictx
->ustorm_st_context
.ring
.r2tq
.curr_pbe
.hi
=
1723 iscsi
->r2tq_info
.pgtbl
[1];
1724 ictx
->ustorm_st_context
.ring
.cq_pbl_base
.lo
=
1725 req1
->cq_page_table_addr_lo
;
1726 ictx
->ustorm_st_context
.ring
.cq_pbl_base
.hi
=
1727 req1
->cq_page_table_addr_hi
;
1728 ictx
->ustorm_st_context
.ring
.cq
[0].cq_sn
= ISCSI_INITIAL_SN
;
1729 ictx
->ustorm_st_context
.ring
.cq
[0].curr_pbe
.lo
= req2
->cq_first_pte
.hi
;
1730 ictx
->ustorm_st_context
.ring
.cq
[0].curr_pbe
.hi
= req2
->cq_first_pte
.lo
;
1731 ictx
->ustorm_st_context
.task_pbe_cache_index
=
1732 BNX2X_ISCSI_PBL_NOT_CACHED
;
1733 ictx
->ustorm_st_context
.task_pdu_cache_index
=
1734 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED
;
1736 for (i
= 1, j
= 1; i
< cp
->num_cqs
; i
++, j
++) {
1740 req3
= (struct iscsi_kwqe_conn_offload3
*) wqes
[n
++];
1743 ictx
->ustorm_st_context
.ring
.cq
[i
].cq_sn
= ISCSI_INITIAL_SN
;
1744 ictx
->ustorm_st_context
.ring
.cq
[i
].curr_pbe
.lo
=
1745 req3
->qp_first_pte
[j
].hi
;
1746 ictx
->ustorm_st_context
.ring
.cq
[i
].curr_pbe
.hi
=
1747 req3
->qp_first_pte
[j
].lo
;
1750 ictx
->ustorm_st_context
.task_pbl_base
.lo
=
1751 iscsi
->task_array_info
.pgtbl_map
& 0xffffffff;
1752 ictx
->ustorm_st_context
.task_pbl_base
.hi
=
1753 (u64
) iscsi
->task_array_info
.pgtbl_map
>> 32;
1754 ictx
->ustorm_st_context
.tce_phy_addr
.lo
=
1755 iscsi
->task_array_info
.pgtbl
[0];
1756 ictx
->ustorm_st_context
.tce_phy_addr
.hi
=
1757 iscsi
->task_array_info
.pgtbl
[1];
1758 ictx
->ustorm_st_context
.iscsi_conn_id
= req1
->iscsi_conn_id
;
1759 ictx
->ustorm_st_context
.num_cqs
= cp
->num_cqs
;
1760 ictx
->ustorm_st_context
.negotiated_rx
|= ISCSI_DEF_MAX_RECV_SEG_LEN
;
1761 ictx
->ustorm_st_context
.negotiated_rx_and_flags
|=
1762 ISCSI_DEF_MAX_BURST_LEN
;
1763 ictx
->ustorm_st_context
.negotiated_rx
|=
1764 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T
<<
1765 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT
;
1767 ictx
->cstorm_st_context
.hq_pbl_base
.lo
=
1768 iscsi
->hq_info
.pgtbl_map
& 0xffffffff;
1769 ictx
->cstorm_st_context
.hq_pbl_base
.hi
=
1770 (u64
) iscsi
->hq_info
.pgtbl_map
>> 32;
1771 ictx
->cstorm_st_context
.hq_curr_pbe
.lo
= iscsi
->hq_info
.pgtbl
[0];
1772 ictx
->cstorm_st_context
.hq_curr_pbe
.hi
= iscsi
->hq_info
.pgtbl
[1];
1773 ictx
->cstorm_st_context
.task_pbl_base
.lo
=
1774 iscsi
->task_array_info
.pgtbl_map
& 0xffffffff;
1775 ictx
->cstorm_st_context
.task_pbl_base
.hi
=
1776 (u64
) iscsi
->task_array_info
.pgtbl_map
>> 32;
1777 /* CSTORM and USTORM initialization is different, CSTORM requires
1778 * CQ DB base & not PTE addr */
1779 ictx
->cstorm_st_context
.cq_db_base
.lo
=
1780 req1
->cq_page_table_addr_lo
& PAGE_MASK
;
1781 ictx
->cstorm_st_context
.cq_db_base
.hi
= req1
->cq_page_table_addr_hi
;
1782 ictx
->cstorm_st_context
.iscsi_conn_id
= req1
->iscsi_conn_id
;
1783 ictx
->cstorm_st_context
.cq_proc_en_bit_map
= (1 << cp
->num_cqs
) - 1;
1784 for (i
= 0; i
< cp
->num_cqs
; i
++) {
1785 ictx
->cstorm_st_context
.cq_c_prod_sqn_arr
.sqn
[i
] =
1787 ictx
->cstorm_st_context
.cq_c_sqn_2_notify_arr
.sqn
[i
] =
1791 ictx
->xstorm_ag_context
.cdu_reserved
=
1792 CDU_RSRVD_VALUE_TYPE_A(hw_cid
, CDU_REGION_NUMBER_XCM_AG
,
1793 ISCSI_CONNECTION_TYPE
);
1794 ictx
->ustorm_ag_context
.cdu_usage
=
1795 CDU_RSRVD_VALUE_TYPE_A(hw_cid
, CDU_REGION_NUMBER_UCM_AG
,
1796 ISCSI_CONNECTION_TYPE
);
1801 static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
1804 struct iscsi_kwqe_conn_offload1
*req1
;
1805 struct iscsi_kwqe_conn_offload2
*req2
;
1806 struct cnic_local
*cp
= dev
->cnic_priv
;
1807 struct cnic_context
*ctx
;
1808 struct iscsi_kcqe kcqe
;
1809 struct kcqe
*cqes
[1];
1818 req1
= (struct iscsi_kwqe_conn_offload1
*) wqes
[0];
1819 req2
= (struct iscsi_kwqe_conn_offload2
*) wqes
[1];
1820 if ((num
- 2) < req2
->num_additional_wqes
) {
1824 *work
= 2 + req2
->num_additional_wqes
;
1826 l5_cid
= req1
->iscsi_conn_id
;
1827 if (l5_cid
>= MAX_ISCSI_TBL_SZ
)
1830 memset(&kcqe
, 0, sizeof(kcqe
));
1831 kcqe
.op_code
= ISCSI_KCQE_OPCODE_OFFLOAD_CONN
;
1832 kcqe
.iscsi_conn_id
= l5_cid
;
1833 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE
;
1835 ctx
= &cp
->ctx_tbl
[l5_cid
];
1836 if (test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
)) {
1837 kcqe
.completion_status
=
1838 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY
;
1842 if (atomic_inc_return(&cp
->iscsi_conn
) > dev
->max_iscsi_conn
) {
1843 atomic_dec(&cp
->iscsi_conn
);
1846 ret
= cnic_alloc_bnx2x_conn_resc(dev
, l5_cid
);
1848 atomic_dec(&cp
->iscsi_conn
);
1852 ret
= cnic_setup_bnx2x_ctx(dev
, wqes
, num
);
1854 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
1855 atomic_dec(&cp
->iscsi_conn
);
1859 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_SUCCESS
;
1860 kcqe
.iscsi_conn_context_id
= BNX2X_HW_CID(cp
, cp
->ctx_tbl
[l5_cid
].cid
);
1863 cqes
[0] = (struct kcqe
*) &kcqe
;
1864 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_ISCSI
, cqes
, 1);
1869 static int cnic_bnx2x_iscsi_update(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1871 struct cnic_local
*cp
= dev
->cnic_priv
;
1872 struct iscsi_kwqe_conn_update
*req
=
1873 (struct iscsi_kwqe_conn_update
*) kwqe
;
1875 union l5cm_specific_data l5_data
;
1876 u32 l5_cid
, cid
= BNX2X_SW_CID(req
->context_id
);
1879 if (cnic_get_l5_cid(cp
, cid
, &l5_cid
) != 0)
1882 data
= cnic_get_kwqe_16_data(cp
, l5_cid
, &l5_data
);
1886 memcpy(data
, kwqe
, sizeof(struct kwqe
));
1888 ret
= cnic_submit_kwqe_16(dev
, ISCSI_RAMROD_CMD_ID_UPDATE_CONN
,
1889 req
->context_id
, ISCSI_CONNECTION_TYPE
, &l5_data
);
1893 static int cnic_bnx2x_destroy_ramrod(struct cnic_dev
*dev
, u32 l5_cid
)
1895 struct cnic_local
*cp
= dev
->cnic_priv
;
1896 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1897 union l5cm_specific_data l5_data
;
1901 init_waitqueue_head(&ctx
->waitq
);
1903 memset(&l5_data
, 0, sizeof(l5_data
));
1904 hw_cid
= BNX2X_HW_CID(cp
, ctx
->cid
);
1906 ret
= cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_COMMON_CFC_DEL
,
1907 hw_cid
, NONE_CONNECTION_TYPE
, &l5_data
);
1910 wait_event_timeout(ctx
->waitq
, ctx
->wait_cond
, CNIC_RAMROD_TMO
);
1911 if (unlikely(test_bit(CTX_FL_CID_ERROR
, &ctx
->ctx_flags
)))
1918 static int cnic_bnx2x_iscsi_destroy(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
1920 struct cnic_local
*cp
= dev
->cnic_priv
;
1921 struct iscsi_kwqe_conn_destroy
*req
=
1922 (struct iscsi_kwqe_conn_destroy
*) kwqe
;
1923 u32 l5_cid
= req
->reserved0
;
1924 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
1926 struct iscsi_kcqe kcqe
;
1927 struct kcqe
*cqes
[1];
1929 if (!test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
))
1930 goto skip_cfc_delete
;
1932 if (!time_after(jiffies
, ctx
->timestamp
+ (2 * HZ
))) {
1933 unsigned long delta
= ctx
->timestamp
+ (2 * HZ
) - jiffies
;
1935 if (delta
> (2 * HZ
))
1938 set_bit(CTX_FL_DELETE_WAIT
, &ctx
->ctx_flags
);
1939 queue_delayed_work(cnic_wq
, &cp
->delete_task
, delta
);
1943 ret
= cnic_bnx2x_destroy_ramrod(dev
, l5_cid
);
1946 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
1949 atomic_dec(&cp
->iscsi_conn
);
1950 clear_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
);
1954 memset(&kcqe
, 0, sizeof(kcqe
));
1955 kcqe
.op_code
= ISCSI_KCQE_OPCODE_DESTROY_CONN
;
1956 kcqe
.iscsi_conn_id
= l5_cid
;
1957 kcqe
.completion_status
= ISCSI_KCQE_COMPLETION_STATUS_SUCCESS
;
1958 kcqe
.iscsi_conn_context_id
= req
->context_id
;
1960 cqes
[0] = (struct kcqe
*) &kcqe
;
1961 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_ISCSI
, cqes
, 1);
1966 static void cnic_init_storm_conn_bufs(struct cnic_dev
*dev
,
1967 struct l4_kwq_connect_req1
*kwqe1
,
1968 struct l4_kwq_connect_req3
*kwqe3
,
1969 struct l5cm_active_conn_buffer
*conn_buf
)
1971 struct l5cm_conn_addr_params
*conn_addr
= &conn_buf
->conn_addr_buf
;
1972 struct l5cm_xstorm_conn_buffer
*xstorm_buf
=
1973 &conn_buf
->xstorm_conn_buffer
;
1974 struct l5cm_tstorm_conn_buffer
*tstorm_buf
=
1975 &conn_buf
->tstorm_conn_buffer
;
1976 struct regpair context_addr
;
1977 u32 cid
= BNX2X_SW_CID(kwqe1
->cid
);
1978 struct in6_addr src_ip
, dst_ip
;
1982 addrp
= (u32
*) &conn_addr
->local_ip_addr
;
1983 for (i
= 0; i
< 4; i
++, addrp
++)
1984 src_ip
.in6_u
.u6_addr32
[i
] = cpu_to_be32(*addrp
);
1986 addrp
= (u32
*) &conn_addr
->remote_ip_addr
;
1987 for (i
= 0; i
< 4; i
++, addrp
++)
1988 dst_ip
.in6_u
.u6_addr32
[i
] = cpu_to_be32(*addrp
);
1990 cnic_get_bnx2x_ctx(dev
, cid
, 0, &context_addr
);
1992 xstorm_buf
->context_addr
.hi
= context_addr
.hi
;
1993 xstorm_buf
->context_addr
.lo
= context_addr
.lo
;
1994 xstorm_buf
->mss
= 0xffff;
1995 xstorm_buf
->rcv_buf
= kwqe3
->rcv_buf
;
1996 if (kwqe1
->tcp_flags
& L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE
)
1997 xstorm_buf
->params
|= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE
;
1998 xstorm_buf
->pseudo_header_checksum
=
1999 swab16(~csum_ipv6_magic(&src_ip
, &dst_ip
, 0, IPPROTO_TCP
, 0));
2001 if (!(kwqe1
->tcp_flags
& L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK
))
2002 tstorm_buf
->params
|=
2003 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE
;
2004 if (kwqe3
->ka_timeout
) {
2005 tstorm_buf
->ka_enable
= 1;
2006 tstorm_buf
->ka_timeout
= kwqe3
->ka_timeout
;
2007 tstorm_buf
->ka_interval
= kwqe3
->ka_interval
;
2008 tstorm_buf
->ka_max_probe_count
= kwqe3
->ka_max_probe_count
;
2010 tstorm_buf
->max_rt_time
= 0xffffffff;
2013 static void cnic_init_bnx2x_mac(struct cnic_dev
*dev
)
2015 struct cnic_local
*cp
= dev
->cnic_priv
;
2016 u32 pfid
= cp
->pfid
;
2017 u8
*mac
= dev
->mac_addr
;
2019 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
2020 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid
), mac
[0]);
2021 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
2022 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid
), mac
[1]);
2023 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
2024 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid
), mac
[2]);
2025 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
2026 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid
), mac
[3]);
2027 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
2028 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid
), mac
[4]);
2029 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
2030 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid
), mac
[5]);
2032 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
2033 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid
), mac
[5]);
2034 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
2035 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid
) + 1,
2037 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
2038 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid
), mac
[3]);
2039 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
2040 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid
) + 1,
2042 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
2043 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid
), mac
[1]);
2044 CNIC_WR8(dev
, BAR_TSTRORM_INTMEM
+
2045 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid
) + 1,
2049 static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev
*dev
, int tcp_ts
)
2051 struct cnic_local
*cp
= dev
->cnic_priv
;
2052 u8 xstorm_flags
= XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN
;
2053 u16 tstorm_flags
= 0;
2056 xstorm_flags
|= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED
;
2057 tstorm_flags
|= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED
;
2060 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
2061 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp
->pfid
), xstorm_flags
);
2063 CNIC_WR16(dev
, BAR_TSTRORM_INTMEM
+
2064 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp
->pfid
), tstorm_flags
);
2067 static int cnic_bnx2x_connect(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
2070 struct cnic_local
*cp
= dev
->cnic_priv
;
2071 struct l4_kwq_connect_req1
*kwqe1
=
2072 (struct l4_kwq_connect_req1
*) wqes
[0];
2073 struct l4_kwq_connect_req3
*kwqe3
;
2074 struct l5cm_active_conn_buffer
*conn_buf
;
2075 struct l5cm_conn_addr_params
*conn_addr
;
2076 union l5cm_specific_data l5_data
;
2077 u32 l5_cid
= kwqe1
->pg_cid
;
2078 struct cnic_sock
*csk
= &cp
->csk_tbl
[l5_cid
];
2079 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
2087 if (kwqe1
->conn_flags
& L4_KWQ_CONNECT_REQ1_IP_V6
)
2097 if (sizeof(*conn_buf
) > CNIC_KWQ16_DATA_SIZE
) {
2098 netdev_err(dev
->netdev
, "conn_buf size too big\n");
2101 conn_buf
= cnic_get_kwqe_16_data(cp
, l5_cid
, &l5_data
);
2105 memset(conn_buf
, 0, sizeof(*conn_buf
));
2107 conn_addr
= &conn_buf
->conn_addr_buf
;
2108 conn_addr
->remote_addr_0
= csk
->ha
[0];
2109 conn_addr
->remote_addr_1
= csk
->ha
[1];
2110 conn_addr
->remote_addr_2
= csk
->ha
[2];
2111 conn_addr
->remote_addr_3
= csk
->ha
[3];
2112 conn_addr
->remote_addr_4
= csk
->ha
[4];
2113 conn_addr
->remote_addr_5
= csk
->ha
[5];
2115 if (kwqe1
->conn_flags
& L4_KWQ_CONNECT_REQ1_IP_V6
) {
2116 struct l4_kwq_connect_req2
*kwqe2
=
2117 (struct l4_kwq_connect_req2
*) wqes
[1];
2119 conn_addr
->local_ip_addr
.ip_addr_hi_hi
= kwqe2
->src_ip_v6_4
;
2120 conn_addr
->local_ip_addr
.ip_addr_hi_lo
= kwqe2
->src_ip_v6_3
;
2121 conn_addr
->local_ip_addr
.ip_addr_lo_hi
= kwqe2
->src_ip_v6_2
;
2123 conn_addr
->remote_ip_addr
.ip_addr_hi_hi
= kwqe2
->dst_ip_v6_4
;
2124 conn_addr
->remote_ip_addr
.ip_addr_hi_lo
= kwqe2
->dst_ip_v6_3
;
2125 conn_addr
->remote_ip_addr
.ip_addr_lo_hi
= kwqe2
->dst_ip_v6_2
;
2126 conn_addr
->params
|= L5CM_CONN_ADDR_PARAMS_IP_VERSION
;
2128 kwqe3
= (struct l4_kwq_connect_req3
*) wqes
[*work
- 1];
2130 conn_addr
->local_ip_addr
.ip_addr_lo_lo
= kwqe1
->src_ip
;
2131 conn_addr
->remote_ip_addr
.ip_addr_lo_lo
= kwqe1
->dst_ip
;
2132 conn_addr
->local_tcp_port
= kwqe1
->src_port
;
2133 conn_addr
->remote_tcp_port
= kwqe1
->dst_port
;
2135 conn_addr
->pmtu
= kwqe3
->pmtu
;
2136 cnic_init_storm_conn_bufs(dev
, kwqe1
, kwqe3
, conn_buf
);
2138 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+
2139 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp
->pfid
), csk
->vlan_id
);
2141 cnic_bnx2x_set_tcp_timestamp(dev
,
2142 kwqe1
->tcp_flags
& L4_KWQ_CONNECT_REQ1_TIME_STAMP
);
2144 ret
= cnic_submit_kwqe_16(dev
, L5CM_RAMROD_CMD_ID_TCP_CONNECT
,
2145 kwqe1
->cid
, ISCSI_CONNECTION_TYPE
, &l5_data
);
2147 set_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
);
2152 static int cnic_bnx2x_close(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2154 struct l4_kwq_close_req
*req
= (struct l4_kwq_close_req
*) kwqe
;
2155 union l5cm_specific_data l5_data
;
2158 memset(&l5_data
, 0, sizeof(l5_data
));
2159 ret
= cnic_submit_kwqe_16(dev
, L5CM_RAMROD_CMD_ID_CLOSE
,
2160 req
->cid
, ISCSI_CONNECTION_TYPE
, &l5_data
);
2164 static int cnic_bnx2x_reset(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2166 struct l4_kwq_reset_req
*req
= (struct l4_kwq_reset_req
*) kwqe
;
2167 union l5cm_specific_data l5_data
;
2170 memset(&l5_data
, 0, sizeof(l5_data
));
2171 ret
= cnic_submit_kwqe_16(dev
, L5CM_RAMROD_CMD_ID_ABORT
,
2172 req
->cid
, ISCSI_CONNECTION_TYPE
, &l5_data
);
2175 static int cnic_bnx2x_offload_pg(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2177 struct l4_kwq_offload_pg
*req
= (struct l4_kwq_offload_pg
*) kwqe
;
2179 struct kcqe
*cqes
[1];
2181 memset(&kcqe
, 0, sizeof(kcqe
));
2182 kcqe
.pg_host_opaque
= req
->host_opaque
;
2183 kcqe
.pg_cid
= req
->host_opaque
;
2184 kcqe
.op_code
= L4_KCQE_OPCODE_VALUE_OFFLOAD_PG
;
2185 cqes
[0] = (struct kcqe
*) &kcqe
;
2186 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_L4
, cqes
, 1);
2190 static int cnic_bnx2x_update_pg(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2192 struct l4_kwq_update_pg
*req
= (struct l4_kwq_update_pg
*) kwqe
;
2194 struct kcqe
*cqes
[1];
2196 memset(&kcqe
, 0, sizeof(kcqe
));
2197 kcqe
.pg_host_opaque
= req
->pg_host_opaque
;
2198 kcqe
.pg_cid
= req
->pg_cid
;
2199 kcqe
.op_code
= L4_KCQE_OPCODE_VALUE_UPDATE_PG
;
2200 cqes
[0] = (struct kcqe
*) &kcqe
;
2201 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_L4
, cqes
, 1);
2205 static int cnic_bnx2x_fcoe_stat(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2207 struct fcoe_kwqe_stat
*req
;
2208 struct fcoe_stat_ramrod_params
*fcoe_stat
;
2209 union l5cm_specific_data l5_data
;
2210 struct cnic_local
*cp
= dev
->cnic_priv
;
2214 req
= (struct fcoe_kwqe_stat
*) kwqe
;
2215 cid
= BNX2X_HW_CID(cp
, cp
->fcoe_init_cid
);
2217 fcoe_stat
= cnic_get_kwqe_16_data(cp
, BNX2X_FCOE_L5_CID_BASE
, &l5_data
);
2221 memset(fcoe_stat
, 0, sizeof(*fcoe_stat
));
2222 memcpy(&fcoe_stat
->stat_kwqe
, req
, sizeof(*req
));
2224 ret
= cnic_submit_kwqe_16(dev
, FCOE_RAMROD_CMD_ID_STAT_FUNC
, cid
,
2225 FCOE_CONNECTION_TYPE
, &l5_data
);
2229 static int cnic_bnx2x_fcoe_init1(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
2233 struct cnic_local
*cp
= dev
->cnic_priv
;
2235 struct fcoe_init_ramrod_params
*fcoe_init
;
2236 struct fcoe_kwqe_init1
*req1
;
2237 struct fcoe_kwqe_init2
*req2
;
2238 struct fcoe_kwqe_init3
*req3
;
2239 union l5cm_specific_data l5_data
;
2245 req1
= (struct fcoe_kwqe_init1
*) wqes
[0];
2246 req2
= (struct fcoe_kwqe_init2
*) wqes
[1];
2247 req3
= (struct fcoe_kwqe_init3
*) wqes
[2];
2248 if (req2
->hdr
.op_code
!= FCOE_KWQE_OPCODE_INIT2
) {
2252 if (req3
->hdr
.op_code
!= FCOE_KWQE_OPCODE_INIT3
) {
2257 if (sizeof(*fcoe_init
) > CNIC_KWQ16_DATA_SIZE
) {
2258 netdev_err(dev
->netdev
, "fcoe_init size too big\n");
2261 fcoe_init
= cnic_get_kwqe_16_data(cp
, BNX2X_FCOE_L5_CID_BASE
, &l5_data
);
2265 memset(fcoe_init
, 0, sizeof(*fcoe_init
));
2266 memcpy(&fcoe_init
->init_kwqe1
, req1
, sizeof(*req1
));
2267 memcpy(&fcoe_init
->init_kwqe2
, req2
, sizeof(*req2
));
2268 memcpy(&fcoe_init
->init_kwqe3
, req3
, sizeof(*req3
));
2269 fcoe_init
->eq_pbl_base
.lo
= cp
->kcq2
.dma
.pgtbl_map
& 0xffffffff;
2270 fcoe_init
->eq_pbl_base
.hi
= (u64
) cp
->kcq2
.dma
.pgtbl_map
>> 32;
2271 fcoe_init
->eq_pbl_size
= cp
->kcq2
.dma
.num_pages
;
2273 fcoe_init
->sb_num
= cp
->status_blk_num
;
2274 fcoe_init
->eq_prod
= MAX_KCQ_IDX
;
2275 fcoe_init
->sb_id
= HC_INDEX_FCOE_EQ_CONS
;
2276 cp
->kcq2
.sw_prod_idx
= 0;
2278 cid
= BNX2X_HW_CID(cp
, cp
->fcoe_init_cid
);
2279 ret
= cnic_submit_kwqe_16(dev
, FCOE_RAMROD_CMD_ID_INIT_FUNC
, cid
,
2280 FCOE_CONNECTION_TYPE
, &l5_data
);
2285 static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
2289 u32 cid
= -1, l5_cid
;
2290 struct cnic_local
*cp
= dev
->cnic_priv
;
2291 struct fcoe_kwqe_conn_offload1
*req1
;
2292 struct fcoe_kwqe_conn_offload2
*req2
;
2293 struct fcoe_kwqe_conn_offload3
*req3
;
2294 struct fcoe_kwqe_conn_offload4
*req4
;
2295 struct fcoe_conn_offload_ramrod_params
*fcoe_offload
;
2296 struct cnic_context
*ctx
;
2297 struct fcoe_context
*fctx
;
2298 struct regpair ctx_addr
;
2299 union l5cm_specific_data l5_data
;
2300 struct fcoe_kcqe kcqe
;
2301 struct kcqe
*cqes
[1];
2307 req1
= (struct fcoe_kwqe_conn_offload1
*) wqes
[0];
2308 req2
= (struct fcoe_kwqe_conn_offload2
*) wqes
[1];
2309 req3
= (struct fcoe_kwqe_conn_offload3
*) wqes
[2];
2310 req4
= (struct fcoe_kwqe_conn_offload4
*) wqes
[3];
2314 l5_cid
= req1
->fcoe_conn_id
;
2315 if (l5_cid
>= dev
->max_fcoe_conn
)
2318 l5_cid
+= BNX2X_FCOE_L5_CID_BASE
;
2320 ctx
= &cp
->ctx_tbl
[l5_cid
];
2321 if (test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
))
2324 ret
= cnic_alloc_bnx2x_conn_resc(dev
, l5_cid
);
2331 fctx
= cnic_get_bnx2x_ctx(dev
, cid
, 1, &ctx_addr
);
2333 u32 hw_cid
= BNX2X_HW_CID(cp
, cid
);
2336 val
= CDU_RSRVD_VALUE_TYPE_A(hw_cid
, CDU_REGION_NUMBER_XCM_AG
,
2337 FCOE_CONNECTION_TYPE
);
2338 fctx
->xstorm_ag_context
.cdu_reserved
= val
;
2339 val
= CDU_RSRVD_VALUE_TYPE_A(hw_cid
, CDU_REGION_NUMBER_UCM_AG
,
2340 FCOE_CONNECTION_TYPE
);
2341 fctx
->ustorm_ag_context
.cdu_usage
= val
;
2343 if (sizeof(*fcoe_offload
) > CNIC_KWQ16_DATA_SIZE
) {
2344 netdev_err(dev
->netdev
, "fcoe_offload size too big\n");
2347 fcoe_offload
= cnic_get_kwqe_16_data(cp
, l5_cid
, &l5_data
);
2351 memset(fcoe_offload
, 0, sizeof(*fcoe_offload
));
2352 memcpy(&fcoe_offload
->offload_kwqe1
, req1
, sizeof(*req1
));
2353 memcpy(&fcoe_offload
->offload_kwqe2
, req2
, sizeof(*req2
));
2354 memcpy(&fcoe_offload
->offload_kwqe3
, req3
, sizeof(*req3
));
2355 memcpy(&fcoe_offload
->offload_kwqe4
, req4
, sizeof(*req4
));
2357 cid
= BNX2X_HW_CID(cp
, cid
);
2358 ret
= cnic_submit_kwqe_16(dev
, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN
, cid
,
2359 FCOE_CONNECTION_TYPE
, &l5_data
);
2361 set_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
);
2367 cnic_free_bnx2x_conn_resc(dev
, l5_cid
);
2369 memset(&kcqe
, 0, sizeof(kcqe
));
2370 kcqe
.op_code
= FCOE_KCQE_OPCODE_OFFLOAD_CONN
;
2371 kcqe
.fcoe_conn_id
= req1
->fcoe_conn_id
;
2372 kcqe
.completion_status
= FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE
;
2374 cqes
[0] = (struct kcqe
*) &kcqe
;
2375 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_FCOE
, cqes
, 1);
2379 static int cnic_bnx2x_fcoe_enable(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2381 struct fcoe_kwqe_conn_enable_disable
*req
;
2382 struct fcoe_conn_enable_disable_ramrod_params
*fcoe_enable
;
2383 union l5cm_specific_data l5_data
;
2386 struct cnic_local
*cp
= dev
->cnic_priv
;
2388 req
= (struct fcoe_kwqe_conn_enable_disable
*) kwqe
;
2389 cid
= req
->context_id
;
2390 l5_cid
= req
->conn_id
+ BNX2X_FCOE_L5_CID_BASE
;
2392 if (sizeof(*fcoe_enable
) > CNIC_KWQ16_DATA_SIZE
) {
2393 netdev_err(dev
->netdev
, "fcoe_enable size too big\n");
2396 fcoe_enable
= cnic_get_kwqe_16_data(cp
, l5_cid
, &l5_data
);
2400 memset(fcoe_enable
, 0, sizeof(*fcoe_enable
));
2401 memcpy(&fcoe_enable
->enable_disable_kwqe
, req
, sizeof(*req
));
2402 ret
= cnic_submit_kwqe_16(dev
, FCOE_RAMROD_CMD_ID_ENABLE_CONN
, cid
,
2403 FCOE_CONNECTION_TYPE
, &l5_data
);
2407 static int cnic_bnx2x_fcoe_disable(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2409 struct fcoe_kwqe_conn_enable_disable
*req
;
2410 struct fcoe_conn_enable_disable_ramrod_params
*fcoe_disable
;
2411 union l5cm_specific_data l5_data
;
2414 struct cnic_local
*cp
= dev
->cnic_priv
;
2416 req
= (struct fcoe_kwqe_conn_enable_disable
*) kwqe
;
2417 cid
= req
->context_id
;
2418 l5_cid
= req
->conn_id
;
2419 if (l5_cid
>= dev
->max_fcoe_conn
)
2422 l5_cid
+= BNX2X_FCOE_L5_CID_BASE
;
2424 if (sizeof(*fcoe_disable
) > CNIC_KWQ16_DATA_SIZE
) {
2425 netdev_err(dev
->netdev
, "fcoe_disable size too big\n");
2428 fcoe_disable
= cnic_get_kwqe_16_data(cp
, l5_cid
, &l5_data
);
2432 memset(fcoe_disable
, 0, sizeof(*fcoe_disable
));
2433 memcpy(&fcoe_disable
->enable_disable_kwqe
, req
, sizeof(*req
));
2434 ret
= cnic_submit_kwqe_16(dev
, FCOE_RAMROD_CMD_ID_DISABLE_CONN
, cid
,
2435 FCOE_CONNECTION_TYPE
, &l5_data
);
2439 static int cnic_bnx2x_fcoe_destroy(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2441 struct fcoe_kwqe_conn_destroy
*req
;
2442 union l5cm_specific_data l5_data
;
2445 struct cnic_local
*cp
= dev
->cnic_priv
;
2446 struct cnic_context
*ctx
;
2447 struct fcoe_kcqe kcqe
;
2448 struct kcqe
*cqes
[1];
2450 req
= (struct fcoe_kwqe_conn_destroy
*) kwqe
;
2451 cid
= req
->context_id
;
2452 l5_cid
= req
->conn_id
;
2453 if (l5_cid
>= dev
->max_fcoe_conn
)
2456 l5_cid
+= BNX2X_FCOE_L5_CID_BASE
;
2458 ctx
= &cp
->ctx_tbl
[l5_cid
];
2460 init_waitqueue_head(&ctx
->waitq
);
2463 memset(&kcqe
, 0, sizeof(kcqe
));
2464 kcqe
.completion_status
= FCOE_KCQE_COMPLETION_STATUS_ERROR
;
2465 memset(&l5_data
, 0, sizeof(l5_data
));
2466 ret
= cnic_submit_kwqe_16(dev
, FCOE_RAMROD_CMD_ID_TERMINATE_CONN
, cid
,
2467 FCOE_CONNECTION_TYPE
, &l5_data
);
2469 wait_event_timeout(ctx
->waitq
, ctx
->wait_cond
, CNIC_RAMROD_TMO
);
2471 kcqe
.completion_status
= 0;
2474 set_bit(CTX_FL_DELETE_WAIT
, &ctx
->ctx_flags
);
2475 queue_delayed_work(cnic_wq
, &cp
->delete_task
, msecs_to_jiffies(2000));
2477 kcqe
.op_code
= FCOE_KCQE_OPCODE_DESTROY_CONN
;
2478 kcqe
.fcoe_conn_id
= req
->conn_id
;
2479 kcqe
.fcoe_conn_context_id
= cid
;
2481 cqes
[0] = (struct kcqe
*) &kcqe
;
2482 cnic_reply_bnx2x_kcqes(dev
, CNIC_ULP_FCOE
, cqes
, 1);
2486 static void cnic_bnx2x_delete_wait(struct cnic_dev
*dev
, u32 start_cid
)
2488 struct cnic_local
*cp
= dev
->cnic_priv
;
2491 for (i
= start_cid
; i
< cp
->max_cid_space
; i
++) {
2492 struct cnic_context
*ctx
= &cp
->ctx_tbl
[i
];
2495 while (test_bit(CTX_FL_DELETE_WAIT
, &ctx
->ctx_flags
))
2498 for (j
= 0; j
< 5; j
++) {
2499 if (!test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
))
2504 if (test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
))
2505 netdev_warn(dev
->netdev
, "CID %x not deleted\n",
2510 static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2512 struct fcoe_kwqe_destroy
*req
;
2513 union l5cm_specific_data l5_data
;
2514 struct cnic_local
*cp
= dev
->cnic_priv
;
2518 cnic_bnx2x_delete_wait(dev
, MAX_ISCSI_TBL_SZ
);
2520 req
= (struct fcoe_kwqe_destroy
*) kwqe
;
2521 cid
= BNX2X_HW_CID(cp
, cp
->fcoe_init_cid
);
2523 memset(&l5_data
, 0, sizeof(l5_data
));
2524 ret
= cnic_submit_kwqe_16(dev
, FCOE_RAMROD_CMD_ID_DESTROY_FUNC
, cid
,
2525 FCOE_CONNECTION_TYPE
, &l5_data
);
2529 static void cnic_bnx2x_kwqe_err(struct cnic_dev
*dev
, struct kwqe
*kwqe
)
2531 struct cnic_local
*cp
= dev
->cnic_priv
;
2533 struct kcqe
*cqes
[1];
2535 u32 opcode
= KWQE_OPCODE(kwqe
->kwqe_op_flag
);
2536 u32 layer_code
= kwqe
->kwqe_op_flag
& KWQE_LAYER_MASK
;
2540 cid
= kwqe
->kwqe_info0
;
2541 memset(&kcqe
, 0, sizeof(kcqe
));
2543 if (layer_code
== KWQE_FLAGS_LAYER_MASK_L5_FCOE
) {
2546 ulp_type
= CNIC_ULP_FCOE
;
2547 if (opcode
== FCOE_KWQE_OPCODE_DISABLE_CONN
) {
2548 struct fcoe_kwqe_conn_enable_disable
*req
;
2550 req
= (struct fcoe_kwqe_conn_enable_disable
*) kwqe
;
2551 kcqe_op
= FCOE_KCQE_OPCODE_DISABLE_CONN
;
2552 cid
= req
->context_id
;
2553 l5_cid
= req
->conn_id
;
2554 } else if (opcode
== FCOE_KWQE_OPCODE_DESTROY
) {
2555 kcqe_op
= FCOE_KCQE_OPCODE_DESTROY_FUNC
;
2559 kcqe
.kcqe_op_flag
= kcqe_op
<< KCQE_FLAGS_OPCODE_SHIFT
;
2560 kcqe
.kcqe_op_flag
|= KCQE_FLAGS_LAYER_MASK_L5_FCOE
;
2561 kcqe
.kcqe_info1
= FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR
;
2562 kcqe
.kcqe_info2
= cid
;
2563 kcqe
.kcqe_info0
= l5_cid
;
2565 } else if (layer_code
== KWQE_FLAGS_LAYER_MASK_L5_ISCSI
) {
2566 ulp_type
= CNIC_ULP_ISCSI
;
2567 if (opcode
== ISCSI_KWQE_OPCODE_UPDATE_CONN
)
2568 cid
= kwqe
->kwqe_info1
;
2570 kcqe
.kcqe_op_flag
= (opcode
+ 0x10) << KCQE_FLAGS_OPCODE_SHIFT
;
2571 kcqe
.kcqe_op_flag
|= KCQE_FLAGS_LAYER_MASK_L5_ISCSI
;
2572 kcqe
.kcqe_info1
= ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR
;
2573 kcqe
.kcqe_info2
= cid
;
2574 cnic_get_l5_cid(cp
, BNX2X_SW_CID(cid
), &kcqe
.kcqe_info0
);
2576 } else if (layer_code
== KWQE_FLAGS_LAYER_MASK_L4
) {
2577 struct l4_kcq
*l4kcqe
= (struct l4_kcq
*) &kcqe
;
2579 ulp_type
= CNIC_ULP_L4
;
2580 if (opcode
== L4_KWQE_OPCODE_VALUE_CONNECT1
)
2581 kcqe_op
= L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
;
2582 else if (opcode
== L4_KWQE_OPCODE_VALUE_RESET
)
2583 kcqe_op
= L4_KCQE_OPCODE_VALUE_RESET_COMP
;
2584 else if (opcode
== L4_KWQE_OPCODE_VALUE_CLOSE
)
2585 kcqe_op
= L4_KCQE_OPCODE_VALUE_CLOSE_COMP
;
2589 kcqe
.kcqe_op_flag
= (kcqe_op
<< KCQE_FLAGS_OPCODE_SHIFT
) |
2590 KCQE_FLAGS_LAYER_MASK_L4
;
2591 l4kcqe
->status
= L4_KCQE_COMPLETION_STATUS_PARITY_ERROR
;
2593 cnic_get_l5_cid(cp
, BNX2X_SW_CID(cid
), &l4kcqe
->conn_id
);
2599 cnic_reply_bnx2x_kcqes(dev
, ulp_type
, cqes
, 1);
2602 static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev
*dev
,
2603 struct kwqe
*wqes
[], u32 num_wqes
)
2609 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
2610 return -EAGAIN
; /* bnx2 is down */
2612 for (i
= 0; i
< num_wqes
; ) {
2614 opcode
= KWQE_OPCODE(kwqe
->kwqe_op_flag
);
2618 case ISCSI_KWQE_OPCODE_INIT1
:
2619 ret
= cnic_bnx2x_iscsi_init1(dev
, kwqe
);
2621 case ISCSI_KWQE_OPCODE_INIT2
:
2622 ret
= cnic_bnx2x_iscsi_init2(dev
, kwqe
);
2624 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1
:
2625 ret
= cnic_bnx2x_iscsi_ofld1(dev
, &wqes
[i
],
2626 num_wqes
- i
, &work
);
2628 case ISCSI_KWQE_OPCODE_UPDATE_CONN
:
2629 ret
= cnic_bnx2x_iscsi_update(dev
, kwqe
);
2631 case ISCSI_KWQE_OPCODE_DESTROY_CONN
:
2632 ret
= cnic_bnx2x_iscsi_destroy(dev
, kwqe
);
2634 case L4_KWQE_OPCODE_VALUE_CONNECT1
:
2635 ret
= cnic_bnx2x_connect(dev
, &wqes
[i
], num_wqes
- i
,
2638 case L4_KWQE_OPCODE_VALUE_CLOSE
:
2639 ret
= cnic_bnx2x_close(dev
, kwqe
);
2641 case L4_KWQE_OPCODE_VALUE_RESET
:
2642 ret
= cnic_bnx2x_reset(dev
, kwqe
);
2644 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG
:
2645 ret
= cnic_bnx2x_offload_pg(dev
, kwqe
);
2647 case L4_KWQE_OPCODE_VALUE_UPDATE_PG
:
2648 ret
= cnic_bnx2x_update_pg(dev
, kwqe
);
2650 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG
:
2655 netdev_err(dev
->netdev
, "Unknown type of KWQE(0x%x)\n",
2660 netdev_err(dev
->netdev
, "KWQE(0x%x) failed\n",
2663 /* Possibly bnx2x parity error, send completion
2664 * to ulp drivers with error code to speed up
2665 * cleanup and reset recovery.
2667 if (ret
== -EIO
|| ret
== -EAGAIN
)
2668 cnic_bnx2x_kwqe_err(dev
, kwqe
);
2675 static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev
*dev
,
2676 struct kwqe
*wqes
[], u32 num_wqes
)
2678 struct cnic_local
*cp
= dev
->cnic_priv
;
2683 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
2684 return -EAGAIN
; /* bnx2 is down */
2686 if (!BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
))
2689 for (i
= 0; i
< num_wqes
; ) {
2691 opcode
= KWQE_OPCODE(kwqe
->kwqe_op_flag
);
2695 case FCOE_KWQE_OPCODE_INIT1
:
2696 ret
= cnic_bnx2x_fcoe_init1(dev
, &wqes
[i
],
2697 num_wqes
- i
, &work
);
2699 case FCOE_KWQE_OPCODE_OFFLOAD_CONN1
:
2700 ret
= cnic_bnx2x_fcoe_ofld1(dev
, &wqes
[i
],
2701 num_wqes
- i
, &work
);
2703 case FCOE_KWQE_OPCODE_ENABLE_CONN
:
2704 ret
= cnic_bnx2x_fcoe_enable(dev
, kwqe
);
2706 case FCOE_KWQE_OPCODE_DISABLE_CONN
:
2707 ret
= cnic_bnx2x_fcoe_disable(dev
, kwqe
);
2709 case FCOE_KWQE_OPCODE_DESTROY_CONN
:
2710 ret
= cnic_bnx2x_fcoe_destroy(dev
, kwqe
);
2712 case FCOE_KWQE_OPCODE_DESTROY
:
2713 ret
= cnic_bnx2x_fcoe_fw_destroy(dev
, kwqe
);
2715 case FCOE_KWQE_OPCODE_STAT
:
2716 ret
= cnic_bnx2x_fcoe_stat(dev
, kwqe
);
2720 netdev_err(dev
->netdev
, "Unknown type of KWQE(0x%x)\n",
2725 netdev_err(dev
->netdev
, "KWQE(0x%x) failed\n",
2728 /* Possibly bnx2x parity error, send completion
2729 * to ulp drivers with error code to speed up
2730 * cleanup and reset recovery.
2732 if (ret
== -EIO
|| ret
== -EAGAIN
)
2733 cnic_bnx2x_kwqe_err(dev
, kwqe
);
2740 static int cnic_submit_bnx2x_kwqes(struct cnic_dev
*dev
, struct kwqe
*wqes
[],
2746 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
2747 return -EAGAIN
; /* bnx2x is down */
2752 layer_code
= wqes
[0]->kwqe_op_flag
& KWQE_LAYER_MASK
;
2753 switch (layer_code
) {
2754 case KWQE_FLAGS_LAYER_MASK_L5_ISCSI
:
2755 case KWQE_FLAGS_LAYER_MASK_L4
:
2756 case KWQE_FLAGS_LAYER_MASK_L2
:
2757 ret
= cnic_submit_bnx2x_iscsi_kwqes(dev
, wqes
, num_wqes
);
2760 case KWQE_FLAGS_LAYER_MASK_L5_FCOE
:
2761 ret
= cnic_submit_bnx2x_fcoe_kwqes(dev
, wqes
, num_wqes
);
2767 static inline u32
cnic_get_kcqe_layer_mask(u32 opflag
)
2769 if (unlikely(KCQE_OPCODE(opflag
) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN
))
2770 return KCQE_FLAGS_LAYER_MASK_L4
;
2772 return opflag
& KCQE_FLAGS_LAYER_MASK
;
2775 static void service_kcqes(struct cnic_dev
*dev
, int num_cqes
)
2777 struct cnic_local
*cp
= dev
->cnic_priv
;
2783 struct cnic_ulp_ops
*ulp_ops
;
2785 u32 kcqe_op_flag
= cp
->completed_kcq
[i
]->kcqe_op_flag
;
2786 u32 kcqe_layer
= cnic_get_kcqe_layer_mask(kcqe_op_flag
);
2788 if (unlikely(kcqe_op_flag
& KCQE_RAMROD_COMPLETION
))
2791 while (j
< num_cqes
) {
2792 u32 next_op
= cp
->completed_kcq
[i
+ j
]->kcqe_op_flag
;
2794 if (cnic_get_kcqe_layer_mask(next_op
) != kcqe_layer
)
2797 if (unlikely(next_op
& KCQE_RAMROD_COMPLETION
))
2802 if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L5_RDMA
)
2803 ulp_type
= CNIC_ULP_RDMA
;
2804 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L5_ISCSI
)
2805 ulp_type
= CNIC_ULP_ISCSI
;
2806 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L5_FCOE
)
2807 ulp_type
= CNIC_ULP_FCOE
;
2808 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L4
)
2809 ulp_type
= CNIC_ULP_L4
;
2810 else if (kcqe_layer
== KCQE_FLAGS_LAYER_MASK_L2
)
2813 netdev_err(dev
->netdev
, "Unknown type of KCQE(0x%x)\n",
2819 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
2820 if (likely(ulp_ops
)) {
2821 ulp_ops
->indicate_kcqes(cp
->ulp_handle
[ulp_type
],
2822 cp
->completed_kcq
+ i
, j
);
2831 cnic_spq_completion(dev
, DRV_CTL_RET_L5_SPQ_CREDIT_CMD
, comp
);
2834 static int cnic_get_kcqes(struct cnic_dev
*dev
, struct kcq_info
*info
)
2836 struct cnic_local
*cp
= dev
->cnic_priv
;
2837 u16 i
, ri
, hw_prod
, last
;
2839 int kcqe_cnt
= 0, last_cnt
= 0;
2841 i
= ri
= last
= info
->sw_prod_idx
;
2843 hw_prod
= *info
->hw_prod_idx_ptr
;
2844 hw_prod
= info
->hw_idx(hw_prod
);
2846 while ((i
!= hw_prod
) && (kcqe_cnt
< MAX_COMPLETED_KCQE
)) {
2847 kcqe
= &info
->kcq
[KCQ_PG(ri
)][KCQ_IDX(ri
)];
2848 cp
->completed_kcq
[kcqe_cnt
++] = kcqe
;
2849 i
= info
->next_idx(i
);
2850 ri
= i
& MAX_KCQ_IDX
;
2851 if (likely(!(kcqe
->kcqe_op_flag
& KCQE_FLAGS_NEXT
))) {
2852 last_cnt
= kcqe_cnt
;
2857 info
->sw_prod_idx
= last
;
2861 static int cnic_l2_completion(struct cnic_local
*cp
)
2863 u16 hw_cons
, sw_cons
;
2864 struct cnic_uio_dev
*udev
= cp
->udev
;
2865 union eth_rx_cqe
*cqe
, *cqe_ring
= (union eth_rx_cqe
*)
2866 (udev
->l2_ring
+ (2 * BCM_PAGE_SIZE
));
2870 if (!test_bit(CNIC_F_BNX2X_CLASS
, &cp
->dev
->flags
))
2873 hw_cons
= *cp
->rx_cons_ptr
;
2874 if ((hw_cons
& BNX2X_MAX_RCQ_DESC_CNT
) == BNX2X_MAX_RCQ_DESC_CNT
)
2877 sw_cons
= cp
->rx_cons
;
2878 while (sw_cons
!= hw_cons
) {
2881 cqe
= &cqe_ring
[sw_cons
& BNX2X_MAX_RCQ_DESC_CNT
];
2882 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
2883 if (cqe_fp_flags
& ETH_FAST_PATH_RX_CQE_TYPE
) {
2884 cmd
= le32_to_cpu(cqe
->ramrod_cqe
.conn_and_cmd_data
);
2885 cmd
>>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT
;
2886 if (cmd
== RAMROD_CMD_ID_ETH_CLIENT_SETUP
||
2887 cmd
== RAMROD_CMD_ID_ETH_HALT
)
2890 sw_cons
= BNX2X_NEXT_RCQE(sw_cons
);
2895 static void cnic_chk_pkt_rings(struct cnic_local
*cp
)
2897 u16 rx_cons
, tx_cons
;
2900 if (!test_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
))
2903 rx_cons
= *cp
->rx_cons_ptr
;
2904 tx_cons
= *cp
->tx_cons_ptr
;
2905 if (cp
->tx_cons
!= tx_cons
|| cp
->rx_cons
!= rx_cons
) {
2906 if (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
))
2907 comp
= cnic_l2_completion(cp
);
2909 cp
->tx_cons
= tx_cons
;
2910 cp
->rx_cons
= rx_cons
;
2913 uio_event_notify(&cp
->udev
->cnic_uinfo
);
2916 clear_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
);
2919 static u32
cnic_service_bnx2_queues(struct cnic_dev
*dev
)
2921 struct cnic_local
*cp
= dev
->cnic_priv
;
2922 u32 status_idx
= (u16
) *cp
->kcq1
.status_idx_ptr
;
2925 /* status block index must be read before reading other fields */
2927 cp
->kwq_con_idx
= *cp
->kwq_con_idx_ptr
;
2929 while ((kcqe_cnt
= cnic_get_kcqes(dev
, &cp
->kcq1
))) {
2931 service_kcqes(dev
, kcqe_cnt
);
2933 /* Tell compiler that status_blk fields can change. */
2935 status_idx
= (u16
) *cp
->kcq1
.status_idx_ptr
;
2936 /* status block index must be read first */
2938 cp
->kwq_con_idx
= *cp
->kwq_con_idx_ptr
;
2941 CNIC_WR16(dev
, cp
->kcq1
.io_addr
, cp
->kcq1
.sw_prod_idx
);
2943 cnic_chk_pkt_rings(cp
);
2948 static int cnic_service_bnx2(void *data
, void *status_blk
)
2950 struct cnic_dev
*dev
= data
;
2952 if (unlikely(!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))) {
2953 struct status_block
*sblk
= status_blk
;
2955 return sblk
->status_idx
;
2958 return cnic_service_bnx2_queues(dev
);
2961 static void cnic_service_bnx2_msix(unsigned long data
)
2963 struct cnic_dev
*dev
= (struct cnic_dev
*) data
;
2964 struct cnic_local
*cp
= dev
->cnic_priv
;
2966 cp
->last_status_idx
= cnic_service_bnx2_queues(dev
);
2968 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
2969 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| cp
->last_status_idx
);
2972 static void cnic_doirq(struct cnic_dev
*dev
)
2974 struct cnic_local
*cp
= dev
->cnic_priv
;
2976 if (likely(test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))) {
2977 u16 prod
= cp
->kcq1
.sw_prod_idx
& MAX_KCQ_IDX
;
2979 prefetch(cp
->status_blk
.gen
);
2980 prefetch(&cp
->kcq1
.kcq
[KCQ_PG(prod
)][KCQ_IDX(prod
)]);
2982 tasklet_schedule(&cp
->cnic_irq_task
);
2986 static irqreturn_t
cnic_irq(int irq
, void *dev_instance
)
2988 struct cnic_dev
*dev
= dev_instance
;
2989 struct cnic_local
*cp
= dev
->cnic_priv
;
2999 static inline void cnic_ack_bnx2x_int(struct cnic_dev
*dev
, u8 id
, u8 storm
,
3000 u16 index
, u8 op
, u8 update
)
3002 struct cnic_local
*cp
= dev
->cnic_priv
;
3003 u32 hc_addr
= (HC_REG_COMMAND_REG
+ CNIC_PORT(cp
) * 32 +
3004 COMMAND_REG_INT_ACK
);
3005 struct igu_ack_register igu_ack
;
3007 igu_ack
.status_block_index
= index
;
3008 igu_ack
.sb_id_and_flags
=
3009 ((id
<< IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT
) |
3010 (storm
<< IGU_ACK_REGISTER_STORM_ID_SHIFT
) |
3011 (update
<< IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT
) |
3012 (op
<< IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT
));
3014 CNIC_WR(dev
, hc_addr
, (*(u32
*)&igu_ack
));
3017 static void cnic_ack_igu_sb(struct cnic_dev
*dev
, u8 igu_sb_id
, u8 segment
,
3018 u16 index
, u8 op
, u8 update
)
3020 struct igu_regular cmd_data
;
3021 u32 igu_addr
= BAR_IGU_INTMEM
+ (IGU_CMD_INT_ACK_BASE
+ igu_sb_id
) * 8;
3023 cmd_data
.sb_id_and_flags
=
3024 (index
<< IGU_REGULAR_SB_INDEX_SHIFT
) |
3025 (segment
<< IGU_REGULAR_SEGMENT_ACCESS_SHIFT
) |
3026 (update
<< IGU_REGULAR_BUPDATE_SHIFT
) |
3027 (op
<< IGU_REGULAR_ENABLE_INT_SHIFT
);
3030 CNIC_WR(dev
, igu_addr
, cmd_data
.sb_id_and_flags
);
3033 static void cnic_ack_bnx2x_msix(struct cnic_dev
*dev
)
3035 struct cnic_local
*cp
= dev
->cnic_priv
;
3037 cnic_ack_bnx2x_int(dev
, cp
->bnx2x_igu_sb_id
, CSTORM_ID
, 0,
3038 IGU_INT_DISABLE
, 0);
3041 static void cnic_ack_bnx2x_e2_msix(struct cnic_dev
*dev
)
3043 struct cnic_local
*cp
= dev
->cnic_priv
;
3045 cnic_ack_igu_sb(dev
, cp
->bnx2x_igu_sb_id
, IGU_SEG_ACCESS_DEF
, 0,
3046 IGU_INT_DISABLE
, 0);
3049 static u32
cnic_service_bnx2x_kcq(struct cnic_dev
*dev
, struct kcq_info
*info
)
3051 u32 last_status
= *info
->status_idx_ptr
;
3054 /* status block index must be read before reading the KCQ */
3056 while ((kcqe_cnt
= cnic_get_kcqes(dev
, info
))) {
3058 service_kcqes(dev
, kcqe_cnt
);
3060 /* Tell compiler that sblk fields can change. */
3063 last_status
= *info
->status_idx_ptr
;
3064 /* status block index must be read before reading the KCQ */
3070 static void cnic_service_bnx2x_bh(unsigned long data
)
3072 struct cnic_dev
*dev
= (struct cnic_dev
*) data
;
3073 struct cnic_local
*cp
= dev
->cnic_priv
;
3074 u32 status_idx
, new_status_idx
;
3076 if (unlikely(!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)))
3080 status_idx
= cnic_service_bnx2x_kcq(dev
, &cp
->kcq1
);
3082 CNIC_WR16(dev
, cp
->kcq1
.io_addr
,
3083 cp
->kcq1
.sw_prod_idx
+ MAX_KCQ_IDX
);
3085 if (!BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
)) {
3086 cnic_ack_bnx2x_int(dev
, cp
->bnx2x_igu_sb_id
, USTORM_ID
,
3087 status_idx
, IGU_INT_ENABLE
, 1);
3091 new_status_idx
= cnic_service_bnx2x_kcq(dev
, &cp
->kcq2
);
3093 if (new_status_idx
!= status_idx
)
3096 CNIC_WR16(dev
, cp
->kcq2
.io_addr
, cp
->kcq2
.sw_prod_idx
+
3099 cnic_ack_igu_sb(dev
, cp
->bnx2x_igu_sb_id
, IGU_SEG_ACCESS_DEF
,
3100 status_idx
, IGU_INT_ENABLE
, 1);
3106 static int cnic_service_bnx2x(void *data
, void *status_blk
)
3108 struct cnic_dev
*dev
= data
;
3109 struct cnic_local
*cp
= dev
->cnic_priv
;
3111 if (!(cp
->ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
3114 cnic_chk_pkt_rings(cp
);
3119 static void cnic_ulp_stop_one(struct cnic_local
*cp
, int if_type
)
3121 struct cnic_ulp_ops
*ulp_ops
;
3123 if (if_type
== CNIC_ULP_ISCSI
)
3124 cnic_send_nlmsg(cp
, ISCSI_KEVENT_IF_DOWN
, NULL
);
3126 mutex_lock(&cnic_lock
);
3127 ulp_ops
= rcu_dereference_protected(cp
->ulp_ops
[if_type
],
3128 lockdep_is_held(&cnic_lock
));
3130 mutex_unlock(&cnic_lock
);
3133 set_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
3134 mutex_unlock(&cnic_lock
);
3136 if (test_and_clear_bit(ULP_F_START
, &cp
->ulp_flags
[if_type
]))
3137 ulp_ops
->cnic_stop(cp
->ulp_handle
[if_type
]);
3139 clear_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
3142 static void cnic_ulp_stop(struct cnic_dev
*dev
)
3144 struct cnic_local
*cp
= dev
->cnic_priv
;
3147 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++)
3148 cnic_ulp_stop_one(cp
, if_type
);
3151 static void cnic_ulp_start(struct cnic_dev
*dev
)
3153 struct cnic_local
*cp
= dev
->cnic_priv
;
3156 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
3157 struct cnic_ulp_ops
*ulp_ops
;
3159 mutex_lock(&cnic_lock
);
3160 ulp_ops
= rcu_dereference_protected(cp
->ulp_ops
[if_type
],
3161 lockdep_is_held(&cnic_lock
));
3162 if (!ulp_ops
|| !ulp_ops
->cnic_start
) {
3163 mutex_unlock(&cnic_lock
);
3166 set_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
3167 mutex_unlock(&cnic_lock
);
3169 if (!test_and_set_bit(ULP_F_START
, &cp
->ulp_flags
[if_type
]))
3170 ulp_ops
->cnic_start(cp
->ulp_handle
[if_type
]);
3172 clear_bit(ULP_F_CALL_PENDING
, &cp
->ulp_flags
[if_type
]);
3176 static int cnic_copy_ulp_stats(struct cnic_dev
*dev
, int ulp_type
)
3178 struct cnic_local
*cp
= dev
->cnic_priv
;
3179 struct cnic_ulp_ops
*ulp_ops
;
3182 mutex_lock(&cnic_lock
);
3183 ulp_ops
= cnic_ulp_tbl_prot(ulp_type
);
3184 if (ulp_ops
&& ulp_ops
->cnic_get_stats
)
3185 rc
= ulp_ops
->cnic_get_stats(cp
->ulp_handle
[ulp_type
]);
3188 mutex_unlock(&cnic_lock
);
3192 static int cnic_ctl(void *data
, struct cnic_ctl_info
*info
)
3194 struct cnic_dev
*dev
= data
;
3195 int ulp_type
= CNIC_ULP_ISCSI
;
3197 switch (info
->cmd
) {
3198 case CNIC_CTL_STOP_CMD
:
3206 case CNIC_CTL_START_CMD
:
3209 if (!cnic_start_hw(dev
))
3210 cnic_ulp_start(dev
);
3214 case CNIC_CTL_STOP_ISCSI_CMD
: {
3215 struct cnic_local
*cp
= dev
->cnic_priv
;
3216 set_bit(CNIC_LCL_FL_STOP_ISCSI
, &cp
->cnic_local_flags
);
3217 queue_delayed_work(cnic_wq
, &cp
->delete_task
, 0);
3220 case CNIC_CTL_COMPLETION_CMD
: {
3221 struct cnic_ctl_completion
*comp
= &info
->data
.comp
;
3222 u32 cid
= BNX2X_SW_CID(comp
->cid
);
3224 struct cnic_local
*cp
= dev
->cnic_priv
;
3226 if (!test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
3229 if (cnic_get_l5_cid(cp
, cid
, &l5_cid
) == 0) {
3230 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
3232 if (unlikely(comp
->error
)) {
3233 set_bit(CTX_FL_CID_ERROR
, &ctx
->ctx_flags
);
3234 netdev_err(dev
->netdev
,
3235 "CID %x CFC delete comp error %x\n",
3240 wake_up(&ctx
->waitq
);
3244 case CNIC_CTL_FCOE_STATS_GET_CMD
:
3245 ulp_type
= CNIC_ULP_FCOE
;
3247 case CNIC_CTL_ISCSI_STATS_GET_CMD
:
3249 cnic_copy_ulp_stats(dev
, ulp_type
);
3259 static void cnic_ulp_init(struct cnic_dev
*dev
)
3262 struct cnic_local
*cp
= dev
->cnic_priv
;
3264 for (i
= 0; i
< MAX_CNIC_ULP_TYPE_EXT
; i
++) {
3265 struct cnic_ulp_ops
*ulp_ops
;
3267 mutex_lock(&cnic_lock
);
3268 ulp_ops
= cnic_ulp_tbl_prot(i
);
3269 if (!ulp_ops
|| !ulp_ops
->cnic_init
) {
3270 mutex_unlock(&cnic_lock
);
3274 mutex_unlock(&cnic_lock
);
3276 if (!test_and_set_bit(ULP_F_INIT
, &cp
->ulp_flags
[i
]))
3277 ulp_ops
->cnic_init(dev
);
3283 static void cnic_ulp_exit(struct cnic_dev
*dev
)
3286 struct cnic_local
*cp
= dev
->cnic_priv
;
3288 for (i
= 0; i
< MAX_CNIC_ULP_TYPE_EXT
; i
++) {
3289 struct cnic_ulp_ops
*ulp_ops
;
3291 mutex_lock(&cnic_lock
);
3292 ulp_ops
= cnic_ulp_tbl_prot(i
);
3293 if (!ulp_ops
|| !ulp_ops
->cnic_exit
) {
3294 mutex_unlock(&cnic_lock
);
3298 mutex_unlock(&cnic_lock
);
3300 if (test_and_clear_bit(ULP_F_INIT
, &cp
->ulp_flags
[i
]))
3301 ulp_ops
->cnic_exit(dev
);
3307 static int cnic_cm_offload_pg(struct cnic_sock
*csk
)
3309 struct cnic_dev
*dev
= csk
->dev
;
3310 struct l4_kwq_offload_pg
*l4kwqe
;
3311 struct kwqe
*wqes
[1];
3313 l4kwqe
= (struct l4_kwq_offload_pg
*) &csk
->kwqe1
;
3314 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
3315 wqes
[0] = (struct kwqe
*) l4kwqe
;
3317 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_OFFLOAD_PG
;
3319 L4_LAYER_CODE
<< L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT
;
3320 l4kwqe
->l2hdr_nbytes
= ETH_HLEN
;
3322 l4kwqe
->da0
= csk
->ha
[0];
3323 l4kwqe
->da1
= csk
->ha
[1];
3324 l4kwqe
->da2
= csk
->ha
[2];
3325 l4kwqe
->da3
= csk
->ha
[3];
3326 l4kwqe
->da4
= csk
->ha
[4];
3327 l4kwqe
->da5
= csk
->ha
[5];
3329 l4kwqe
->sa0
= dev
->mac_addr
[0];
3330 l4kwqe
->sa1
= dev
->mac_addr
[1];
3331 l4kwqe
->sa2
= dev
->mac_addr
[2];
3332 l4kwqe
->sa3
= dev
->mac_addr
[3];
3333 l4kwqe
->sa4
= dev
->mac_addr
[4];
3334 l4kwqe
->sa5
= dev
->mac_addr
[5];
3336 l4kwqe
->etype
= ETH_P_IP
;
3337 l4kwqe
->ipid_start
= DEF_IPID_START
;
3338 l4kwqe
->host_opaque
= csk
->l5_cid
;
3341 l4kwqe
->pg_flags
|= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING
;
3342 l4kwqe
->vlan_tag
= csk
->vlan_id
;
3343 l4kwqe
->l2hdr_nbytes
+= 4;
3346 return dev
->submit_kwqes(dev
, wqes
, 1);
3349 static int cnic_cm_update_pg(struct cnic_sock
*csk
)
3351 struct cnic_dev
*dev
= csk
->dev
;
3352 struct l4_kwq_update_pg
*l4kwqe
;
3353 struct kwqe
*wqes
[1];
3355 l4kwqe
= (struct l4_kwq_update_pg
*) &csk
->kwqe1
;
3356 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
3357 wqes
[0] = (struct kwqe
*) l4kwqe
;
3359 l4kwqe
->opcode
= L4_KWQE_OPCODE_VALUE_UPDATE_PG
;
3361 L4_LAYER_CODE
<< L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT
;
3362 l4kwqe
->pg_cid
= csk
->pg_cid
;
3364 l4kwqe
->da0
= csk
->ha
[0];
3365 l4kwqe
->da1
= csk
->ha
[1];
3366 l4kwqe
->da2
= csk
->ha
[2];
3367 l4kwqe
->da3
= csk
->ha
[3];
3368 l4kwqe
->da4
= csk
->ha
[4];
3369 l4kwqe
->da5
= csk
->ha
[5];
3371 l4kwqe
->pg_host_opaque
= csk
->l5_cid
;
3372 l4kwqe
->pg_valids
= L4_KWQ_UPDATE_PG_VALIDS_DA
;
3374 return dev
->submit_kwqes(dev
, wqes
, 1);
3377 static int cnic_cm_upload_pg(struct cnic_sock
*csk
)
3379 struct cnic_dev
*dev
= csk
->dev
;
3380 struct l4_kwq_upload
*l4kwqe
;
3381 struct kwqe
*wqes
[1];
3383 l4kwqe
= (struct l4_kwq_upload
*) &csk
->kwqe1
;
3384 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
3385 wqes
[0] = (struct kwqe
*) l4kwqe
;
3387 l4kwqe
->opcode
= L4_KWQE_OPCODE_VALUE_UPLOAD_PG
;
3389 L4_LAYER_CODE
<< L4_KWQ_UPLOAD_LAYER_CODE_SHIFT
;
3390 l4kwqe
->cid
= csk
->pg_cid
;
3392 return dev
->submit_kwqes(dev
, wqes
, 1);
3395 static int cnic_cm_conn_req(struct cnic_sock
*csk
)
3397 struct cnic_dev
*dev
= csk
->dev
;
3398 struct l4_kwq_connect_req1
*l4kwqe1
;
3399 struct l4_kwq_connect_req2
*l4kwqe2
;
3400 struct l4_kwq_connect_req3
*l4kwqe3
;
3401 struct kwqe
*wqes
[3];
3405 l4kwqe1
= (struct l4_kwq_connect_req1
*) &csk
->kwqe1
;
3406 l4kwqe2
= (struct l4_kwq_connect_req2
*) &csk
->kwqe2
;
3407 l4kwqe3
= (struct l4_kwq_connect_req3
*) &csk
->kwqe3
;
3408 memset(l4kwqe1
, 0, sizeof(*l4kwqe1
));
3409 memset(l4kwqe2
, 0, sizeof(*l4kwqe2
));
3410 memset(l4kwqe3
, 0, sizeof(*l4kwqe3
));
3412 l4kwqe3
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT3
;
3414 L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT
;
3415 l4kwqe3
->ka_timeout
= csk
->ka_timeout
;
3416 l4kwqe3
->ka_interval
= csk
->ka_interval
;
3417 l4kwqe3
->ka_max_probe_count
= csk
->ka_max_probe_count
;
3418 l4kwqe3
->tos
= csk
->tos
;
3419 l4kwqe3
->ttl
= csk
->ttl
;
3420 l4kwqe3
->snd_seq_scale
= csk
->snd_seq_scale
;
3421 l4kwqe3
->pmtu
= csk
->mtu
;
3422 l4kwqe3
->rcv_buf
= csk
->rcv_buf
;
3423 l4kwqe3
->snd_buf
= csk
->snd_buf
;
3424 l4kwqe3
->seed
= csk
->seed
;
3426 wqes
[0] = (struct kwqe
*) l4kwqe1
;
3427 if (test_bit(SK_F_IPV6
, &csk
->flags
)) {
3428 wqes
[1] = (struct kwqe
*) l4kwqe2
;
3429 wqes
[2] = (struct kwqe
*) l4kwqe3
;
3432 l4kwqe1
->conn_flags
= L4_KWQ_CONNECT_REQ1_IP_V6
;
3433 l4kwqe2
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT2
;
3435 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT
|
3436 L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT
;
3437 l4kwqe2
->src_ip_v6_2
= be32_to_cpu(csk
->src_ip
[1]);
3438 l4kwqe2
->src_ip_v6_3
= be32_to_cpu(csk
->src_ip
[2]);
3439 l4kwqe2
->src_ip_v6_4
= be32_to_cpu(csk
->src_ip
[3]);
3440 l4kwqe2
->dst_ip_v6_2
= be32_to_cpu(csk
->dst_ip
[1]);
3441 l4kwqe2
->dst_ip_v6_3
= be32_to_cpu(csk
->dst_ip
[2]);
3442 l4kwqe2
->dst_ip_v6_4
= be32_to_cpu(csk
->dst_ip
[3]);
3443 l4kwqe3
->mss
= l4kwqe3
->pmtu
- sizeof(struct ipv6hdr
) -
3444 sizeof(struct tcphdr
);
3446 wqes
[1] = (struct kwqe
*) l4kwqe3
;
3447 l4kwqe3
->mss
= l4kwqe3
->pmtu
- sizeof(struct iphdr
) -
3448 sizeof(struct tcphdr
);
3451 l4kwqe1
->op_code
= L4_KWQE_OPCODE_VALUE_CONNECT1
;
3453 (L4_LAYER_CODE
<< L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT
) |
3454 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT
;
3455 l4kwqe1
->cid
= csk
->cid
;
3456 l4kwqe1
->pg_cid
= csk
->pg_cid
;
3457 l4kwqe1
->src_ip
= be32_to_cpu(csk
->src_ip
[0]);
3458 l4kwqe1
->dst_ip
= be32_to_cpu(csk
->dst_ip
[0]);
3459 l4kwqe1
->src_port
= be16_to_cpu(csk
->src_port
);
3460 l4kwqe1
->dst_port
= be16_to_cpu(csk
->dst_port
);
3461 if (csk
->tcp_flags
& SK_TCP_NO_DELAY_ACK
)
3462 tcp_flags
|= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK
;
3463 if (csk
->tcp_flags
& SK_TCP_KEEP_ALIVE
)
3464 tcp_flags
|= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE
;
3465 if (csk
->tcp_flags
& SK_TCP_NAGLE
)
3466 tcp_flags
|= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE
;
3467 if (csk
->tcp_flags
& SK_TCP_TIMESTAMP
)
3468 tcp_flags
|= L4_KWQ_CONNECT_REQ1_TIME_STAMP
;
3469 if (csk
->tcp_flags
& SK_TCP_SACK
)
3470 tcp_flags
|= L4_KWQ_CONNECT_REQ1_SACK
;
3471 if (csk
->tcp_flags
& SK_TCP_SEG_SCALING
)
3472 tcp_flags
|= L4_KWQ_CONNECT_REQ1_SEG_SCALING
;
3474 l4kwqe1
->tcp_flags
= tcp_flags
;
3476 return dev
->submit_kwqes(dev
, wqes
, num_wqes
);
3479 static int cnic_cm_close_req(struct cnic_sock
*csk
)
3481 struct cnic_dev
*dev
= csk
->dev
;
3482 struct l4_kwq_close_req
*l4kwqe
;
3483 struct kwqe
*wqes
[1];
3485 l4kwqe
= (struct l4_kwq_close_req
*) &csk
->kwqe2
;
3486 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
3487 wqes
[0] = (struct kwqe
*) l4kwqe
;
3489 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_CLOSE
;
3490 l4kwqe
->flags
= L4_LAYER_CODE
<< L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT
;
3491 l4kwqe
->cid
= csk
->cid
;
3493 return dev
->submit_kwqes(dev
, wqes
, 1);
3496 static int cnic_cm_abort_req(struct cnic_sock
*csk
)
3498 struct cnic_dev
*dev
= csk
->dev
;
3499 struct l4_kwq_reset_req
*l4kwqe
;
3500 struct kwqe
*wqes
[1];
3502 l4kwqe
= (struct l4_kwq_reset_req
*) &csk
->kwqe2
;
3503 memset(l4kwqe
, 0, sizeof(*l4kwqe
));
3504 wqes
[0] = (struct kwqe
*) l4kwqe
;
3506 l4kwqe
->op_code
= L4_KWQE_OPCODE_VALUE_RESET
;
3507 l4kwqe
->flags
= L4_LAYER_CODE
<< L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT
;
3508 l4kwqe
->cid
= csk
->cid
;
3510 return dev
->submit_kwqes(dev
, wqes
, 1);
3513 static int cnic_cm_create(struct cnic_dev
*dev
, int ulp_type
, u32 cid
,
3514 u32 l5_cid
, struct cnic_sock
**csk
, void *context
)
3516 struct cnic_local
*cp
= dev
->cnic_priv
;
3517 struct cnic_sock
*csk1
;
3519 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
3523 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
3525 if (test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
))
3529 csk1
= &cp
->csk_tbl
[l5_cid
];
3530 if (atomic_read(&csk1
->ref_count
))
3533 if (test_and_set_bit(SK_F_INUSE
, &csk1
->flags
))
3538 csk1
->l5_cid
= l5_cid
;
3539 csk1
->ulp_type
= ulp_type
;
3540 csk1
->context
= context
;
3542 csk1
->ka_timeout
= DEF_KA_TIMEOUT
;
3543 csk1
->ka_interval
= DEF_KA_INTERVAL
;
3544 csk1
->ka_max_probe_count
= DEF_KA_MAX_PROBE_COUNT
;
3545 csk1
->tos
= DEF_TOS
;
3546 csk1
->ttl
= DEF_TTL
;
3547 csk1
->snd_seq_scale
= DEF_SND_SEQ_SCALE
;
3548 csk1
->rcv_buf
= DEF_RCV_BUF
;
3549 csk1
->snd_buf
= DEF_SND_BUF
;
3550 csk1
->seed
= DEF_SEED
;
3556 static void cnic_cm_cleanup(struct cnic_sock
*csk
)
3558 if (csk
->src_port
) {
3559 struct cnic_dev
*dev
= csk
->dev
;
3560 struct cnic_local
*cp
= dev
->cnic_priv
;
3562 cnic_free_id(&cp
->csk_port_tbl
, be16_to_cpu(csk
->src_port
));
3567 static void cnic_close_conn(struct cnic_sock
*csk
)
3569 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
)) {
3570 cnic_cm_upload_pg(csk
);
3571 clear_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
);
3573 cnic_cm_cleanup(csk
);
3576 static int cnic_cm_destroy(struct cnic_sock
*csk
)
3578 if (!cnic_in_use(csk
))
3582 clear_bit(SK_F_INUSE
, &csk
->flags
);
3583 smp_mb__after_clear_bit();
3584 while (atomic_read(&csk
->ref_count
) != 1)
3586 cnic_cm_cleanup(csk
);
3593 static inline u16
cnic_get_vlan(struct net_device
*dev
,
3594 struct net_device
**vlan_dev
)
3596 if (dev
->priv_flags
& IFF_802_1Q_VLAN
) {
3597 *vlan_dev
= vlan_dev_real_dev(dev
);
3598 return vlan_dev_vlan_id(dev
);
3604 static int cnic_get_v4_route(struct sockaddr_in
*dst_addr
,
3605 struct dst_entry
**dst
)
3607 #if defined(CONFIG_INET)
3610 rt
= ip_route_output(&init_net
, dst_addr
->sin_addr
.s_addr
, 0, 0, 0);
3617 return -ENETUNREACH
;
3621 static int cnic_get_v6_route(struct sockaddr_in6
*dst_addr
,
3622 struct dst_entry
**dst
)
3624 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
3627 memset(&fl6
, 0, sizeof(fl6
));
3628 fl6
.daddr
= dst_addr
->sin6_addr
;
3629 if (ipv6_addr_type(&fl6
.daddr
) & IPV6_ADDR_LINKLOCAL
)
3630 fl6
.flowi6_oif
= dst_addr
->sin6_scope_id
;
3632 *dst
= ip6_route_output(&init_net
, NULL
, &fl6
);
3633 if ((*dst
)->error
) {
3636 return -ENETUNREACH
;
3641 return -ENETUNREACH
;
3644 static struct cnic_dev
*cnic_cm_select_dev(struct sockaddr_in
*dst_addr
,
3647 struct cnic_dev
*dev
= NULL
;
3648 struct dst_entry
*dst
;
3649 struct net_device
*netdev
= NULL
;
3650 int err
= -ENETUNREACH
;
3652 if (dst_addr
->sin_family
== AF_INET
)
3653 err
= cnic_get_v4_route(dst_addr
, &dst
);
3654 else if (dst_addr
->sin_family
== AF_INET6
) {
3655 struct sockaddr_in6
*dst_addr6
=
3656 (struct sockaddr_in6
*) dst_addr
;
3658 err
= cnic_get_v6_route(dst_addr6
, &dst
);
3668 cnic_get_vlan(dst
->dev
, &netdev
);
3670 dev
= cnic_from_netdev(netdev
);
3679 static int cnic_resolve_addr(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
3681 struct cnic_dev
*dev
= csk
->dev
;
3682 struct cnic_local
*cp
= dev
->cnic_priv
;
3684 return cnic_send_nlmsg(cp
, ISCSI_KEVENT_PATH_REQ
, csk
);
3687 static int cnic_get_route(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
3689 struct cnic_dev
*dev
= csk
->dev
;
3690 struct cnic_local
*cp
= dev
->cnic_priv
;
3692 struct dst_entry
*dst
= NULL
;
3693 struct net_device
*realdev
;
3697 if (saddr
->local
.v6
.sin6_family
== AF_INET6
&&
3698 saddr
->remote
.v6
.sin6_family
== AF_INET6
)
3700 else if (saddr
->local
.v4
.sin_family
== AF_INET
&&
3701 saddr
->remote
.v4
.sin_family
== AF_INET
)
3706 clear_bit(SK_F_IPV6
, &csk
->flags
);
3709 set_bit(SK_F_IPV6
, &csk
->flags
);
3710 cnic_get_v6_route(&saddr
->remote
.v6
, &dst
);
3712 memcpy(&csk
->dst_ip
[0], &saddr
->remote
.v6
.sin6_addr
,
3713 sizeof(struct in6_addr
));
3714 csk
->dst_port
= saddr
->remote
.v6
.sin6_port
;
3715 local_port
= saddr
->local
.v6
.sin6_port
;
3718 cnic_get_v4_route(&saddr
->remote
.v4
, &dst
);
3720 csk
->dst_ip
[0] = saddr
->remote
.v4
.sin_addr
.s_addr
;
3721 csk
->dst_port
= saddr
->remote
.v4
.sin_port
;
3722 local_port
= saddr
->local
.v4
.sin_port
;
3726 csk
->mtu
= dev
->netdev
->mtu
;
3727 if (dst
&& dst
->dev
) {
3728 u16 vlan
= cnic_get_vlan(dst
->dev
, &realdev
);
3729 if (realdev
== dev
->netdev
) {
3730 csk
->vlan_id
= vlan
;
3731 csk
->mtu
= dst_mtu(dst
);
3735 port_id
= be16_to_cpu(local_port
);
3736 if (port_id
>= CNIC_LOCAL_PORT_MIN
&&
3737 port_id
< CNIC_LOCAL_PORT_MAX
) {
3738 if (cnic_alloc_id(&cp
->csk_port_tbl
, port_id
))
3744 port_id
= cnic_alloc_new_id(&cp
->csk_port_tbl
);
3745 if (port_id
== -1) {
3749 local_port
= cpu_to_be16(port_id
);
3751 csk
->src_port
= local_port
;
3758 static void cnic_init_csk_state(struct cnic_sock
*csk
)
3761 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3762 clear_bit(SK_F_CLOSING
, &csk
->flags
);
3765 static int cnic_cm_connect(struct cnic_sock
*csk
, struct cnic_sockaddr
*saddr
)
3767 struct cnic_local
*cp
= csk
->dev
->cnic_priv
;
3770 if (cp
->ethdev
->drv_state
& CNIC_DRV_STATE_NO_ISCSI
)
3773 if (!cnic_in_use(csk
))
3776 if (test_and_set_bit(SK_F_CONNECT_START
, &csk
->flags
))
3779 cnic_init_csk_state(csk
);
3781 err
= cnic_get_route(csk
, saddr
);
3785 err
= cnic_resolve_addr(csk
, saddr
);
3790 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
3794 static int cnic_cm_abort(struct cnic_sock
*csk
)
3796 struct cnic_local
*cp
= csk
->dev
->cnic_priv
;
3797 u32 opcode
= L4_KCQE_OPCODE_VALUE_RESET_COMP
;
3799 if (!cnic_in_use(csk
))
3802 if (cnic_abort_prep(csk
))
3803 return cnic_cm_abort_req(csk
);
3805 /* Getting here means that we haven't started connect, or
3806 * connect was not successful.
3809 cp
->close_conn(csk
, opcode
);
3810 if (csk
->state
!= opcode
)
3816 static int cnic_cm_close(struct cnic_sock
*csk
)
3818 if (!cnic_in_use(csk
))
3821 if (cnic_close_prep(csk
)) {
3822 csk
->state
= L4_KCQE_OPCODE_VALUE_CLOSE_COMP
;
3823 return cnic_cm_close_req(csk
);
3830 static void cnic_cm_upcall(struct cnic_local
*cp
, struct cnic_sock
*csk
,
3833 struct cnic_ulp_ops
*ulp_ops
;
3834 int ulp_type
= csk
->ulp_type
;
3837 ulp_ops
= rcu_dereference(cp
->ulp_ops
[ulp_type
]);
3839 if (opcode
== L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
)
3840 ulp_ops
->cm_connect_complete(csk
);
3841 else if (opcode
== L4_KCQE_OPCODE_VALUE_CLOSE_COMP
)
3842 ulp_ops
->cm_close_complete(csk
);
3843 else if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
)
3844 ulp_ops
->cm_remote_abort(csk
);
3845 else if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_COMP
)
3846 ulp_ops
->cm_abort_complete(csk
);
3847 else if (opcode
== L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED
)
3848 ulp_ops
->cm_remote_close(csk
);
3853 static int cnic_cm_set_pg(struct cnic_sock
*csk
)
3855 if (cnic_offld_prep(csk
)) {
3856 if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
))
3857 cnic_cm_update_pg(csk
);
3859 cnic_cm_offload_pg(csk
);
3864 static void cnic_cm_process_offld_pg(struct cnic_dev
*dev
, struct l4_kcq
*kcqe
)
3866 struct cnic_local
*cp
= dev
->cnic_priv
;
3867 u32 l5_cid
= kcqe
->pg_host_opaque
;
3868 u8 opcode
= kcqe
->op_code
;
3869 struct cnic_sock
*csk
= &cp
->csk_tbl
[l5_cid
];
3872 if (!cnic_in_use(csk
))
3875 if (opcode
== L4_KCQE_OPCODE_VALUE_UPDATE_PG
) {
3876 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3879 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3880 if (kcqe
->status
== L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL
) {
3881 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3882 cnic_cm_upcall(cp
, csk
,
3883 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
);
3887 csk
->pg_cid
= kcqe
->pg_cid
;
3888 set_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
);
3889 cnic_cm_conn_req(csk
);
3895 static void cnic_process_fcoe_term_conn(struct cnic_dev
*dev
, struct kcqe
*kcqe
)
3897 struct cnic_local
*cp
= dev
->cnic_priv
;
3898 struct fcoe_kcqe
*fc_kcqe
= (struct fcoe_kcqe
*) kcqe
;
3899 u32 l5_cid
= fc_kcqe
->fcoe_conn_id
+ BNX2X_FCOE_L5_CID_BASE
;
3900 struct cnic_context
*ctx
= &cp
->ctx_tbl
[l5_cid
];
3902 ctx
->timestamp
= jiffies
;
3904 wake_up(&ctx
->waitq
);
3907 static void cnic_cm_process_kcqe(struct cnic_dev
*dev
, struct kcqe
*kcqe
)
3909 struct cnic_local
*cp
= dev
->cnic_priv
;
3910 struct l4_kcq
*l4kcqe
= (struct l4_kcq
*) kcqe
;
3911 u8 opcode
= l4kcqe
->op_code
;
3913 struct cnic_sock
*csk
;
3915 if (opcode
== FCOE_RAMROD_CMD_ID_TERMINATE_CONN
) {
3916 cnic_process_fcoe_term_conn(dev
, kcqe
);
3919 if (opcode
== L4_KCQE_OPCODE_VALUE_OFFLOAD_PG
||
3920 opcode
== L4_KCQE_OPCODE_VALUE_UPDATE_PG
) {
3921 cnic_cm_process_offld_pg(dev
, l4kcqe
);
3925 l5_cid
= l4kcqe
->conn_id
;
3927 l5_cid
= l4kcqe
->cid
;
3928 if (l5_cid
>= MAX_CM_SK_TBL_SZ
)
3931 csk
= &cp
->csk_tbl
[l5_cid
];
3934 if (!cnic_in_use(csk
)) {
3940 case L5CM_RAMROD_CMD_ID_TCP_CONNECT
:
3941 if (l4kcqe
->status
!= 0) {
3942 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3943 cnic_cm_upcall(cp
, csk
,
3944 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
);
3947 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE
:
3948 if (l4kcqe
->status
== 0)
3949 set_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
);
3950 else if (l4kcqe
->status
==
3951 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR
)
3952 set_bit(SK_F_HW_ERR
, &csk
->flags
);
3954 smp_mb__before_clear_bit();
3955 clear_bit(SK_F_OFFLD_SCHED
, &csk
->flags
);
3956 cnic_cm_upcall(cp
, csk
, opcode
);
3959 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
:
3960 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP
:
3961 case L4_KCQE_OPCODE_VALUE_RESET_COMP
:
3962 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE
:
3963 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
:
3964 if (l4kcqe
->status
== L4_KCQE_COMPLETION_STATUS_PARITY_ERROR
)
3965 set_bit(SK_F_HW_ERR
, &csk
->flags
);
3967 cp
->close_conn(csk
, opcode
);
3970 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED
:
3971 /* after we already sent CLOSE_REQ */
3972 if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
) &&
3973 !test_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
) &&
3974 csk
->state
== L4_KCQE_OPCODE_VALUE_CLOSE_COMP
)
3975 cp
->close_conn(csk
, L4_KCQE_OPCODE_VALUE_RESET_COMP
);
3977 cnic_cm_upcall(cp
, csk
, opcode
);
3983 static void cnic_cm_indicate_kcqe(void *data
, struct kcqe
*kcqe
[], u32 num
)
3985 struct cnic_dev
*dev
= data
;
3988 for (i
= 0; i
< num
; i
++)
3989 cnic_cm_process_kcqe(dev
, kcqe
[i
]);
3992 static struct cnic_ulp_ops cm_ulp_ops
= {
3993 .indicate_kcqes
= cnic_cm_indicate_kcqe
,
3996 static void cnic_cm_free_mem(struct cnic_dev
*dev
)
3998 struct cnic_local
*cp
= dev
->cnic_priv
;
4002 cnic_free_id_tbl(&cp
->csk_port_tbl
);
4005 static int cnic_cm_alloc_mem(struct cnic_dev
*dev
)
4007 struct cnic_local
*cp
= dev
->cnic_priv
;
4010 cp
->csk_tbl
= kzalloc(sizeof(struct cnic_sock
) * MAX_CM_SK_TBL_SZ
,
4015 port_id
= random32();
4016 port_id
%= CNIC_LOCAL_PORT_RANGE
;
4017 if (cnic_init_id_tbl(&cp
->csk_port_tbl
, CNIC_LOCAL_PORT_RANGE
,
4018 CNIC_LOCAL_PORT_MIN
, port_id
)) {
4019 cnic_cm_free_mem(dev
);
4025 static int cnic_ready_to_close(struct cnic_sock
*csk
, u32 opcode
)
4027 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE
, &csk
->flags
)) {
4028 /* Unsolicited RESET_COMP or RESET_RECEIVED */
4029 opcode
= L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
;
4030 csk
->state
= opcode
;
4033 /* 1. If event opcode matches the expected event in csk->state
4034 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
4036 * 3. If the expected event is 0, meaning the connection was never
4037 * never established, we accept the opcode from cm_abort.
4039 if (opcode
== csk
->state
|| csk
->state
== 0 ||
4040 csk
->state
== L4_KCQE_OPCODE_VALUE_CLOSE_COMP
||
4041 csk
->state
== L4_KCQE_OPCODE_VALUE_RESET_COMP
) {
4042 if (!test_and_set_bit(SK_F_CLOSING
, &csk
->flags
)) {
4043 if (csk
->state
== 0)
4044 csk
->state
= opcode
;
4051 static void cnic_close_bnx2_conn(struct cnic_sock
*csk
, u32 opcode
)
4053 struct cnic_dev
*dev
= csk
->dev
;
4054 struct cnic_local
*cp
= dev
->cnic_priv
;
4056 if (opcode
== L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
) {
4057 cnic_cm_upcall(cp
, csk
, opcode
);
4061 clear_bit(SK_F_CONNECT_START
, &csk
->flags
);
4062 cnic_close_conn(csk
);
4063 csk
->state
= opcode
;
4064 cnic_cm_upcall(cp
, csk
, opcode
);
4067 static void cnic_cm_stop_bnx2_hw(struct cnic_dev
*dev
)
4071 static int cnic_cm_init_bnx2_hw(struct cnic_dev
*dev
)
4076 cnic_ctx_wr(dev
, 45, 0, seed
);
4080 static void cnic_close_bnx2x_conn(struct cnic_sock
*csk
, u32 opcode
)
4082 struct cnic_dev
*dev
= csk
->dev
;
4083 struct cnic_local
*cp
= dev
->cnic_priv
;
4084 struct cnic_context
*ctx
= &cp
->ctx_tbl
[csk
->l5_cid
];
4085 union l5cm_specific_data l5_data
;
4087 int close_complete
= 0;
4090 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED
:
4091 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP
:
4092 case L4_KCQE_OPCODE_VALUE_RESET_COMP
:
4093 if (cnic_ready_to_close(csk
, opcode
)) {
4094 if (test_bit(SK_F_HW_ERR
, &csk
->flags
))
4096 else if (test_bit(SK_F_PG_OFFLD_COMPLETE
, &csk
->flags
))
4097 cmd
= L5CM_RAMROD_CMD_ID_SEARCHER_DELETE
;
4102 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE
:
4103 cmd
= L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
;
4105 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD
:
4110 memset(&l5_data
, 0, sizeof(l5_data
));
4112 cnic_submit_kwqe_16(dev
, cmd
, csk
->cid
, ISCSI_CONNECTION_TYPE
,
4114 } else if (close_complete
) {
4115 ctx
->timestamp
= jiffies
;
4116 cnic_close_conn(csk
);
4117 cnic_cm_upcall(cp
, csk
, csk
->state
);
4121 static void cnic_cm_stop_bnx2x_hw(struct cnic_dev
*dev
)
4123 struct cnic_local
*cp
= dev
->cnic_priv
;
4128 if (!netif_running(dev
->netdev
))
4131 cnic_bnx2x_delete_wait(dev
, 0);
4133 cancel_delayed_work(&cp
->delete_task
);
4134 flush_workqueue(cnic_wq
);
4136 if (atomic_read(&cp
->iscsi_conn
) != 0)
4137 netdev_warn(dev
->netdev
, "%d iSCSI connections not destroyed\n",
4138 atomic_read(&cp
->iscsi_conn
));
4141 static int cnic_cm_init_bnx2x_hw(struct cnic_dev
*dev
)
4143 struct cnic_local
*cp
= dev
->cnic_priv
;
4144 u32 pfid
= cp
->pfid
;
4145 u32 port
= CNIC_PORT(cp
);
4147 cnic_init_bnx2x_mac(dev
);
4148 cnic_bnx2x_set_tcp_timestamp(dev
, 1);
4150 CNIC_WR16(dev
, BAR_XSTRORM_INTMEM
+
4151 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid
), 0);
4153 CNIC_WR(dev
, BAR_XSTRORM_INTMEM
+
4154 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port
), 1);
4155 CNIC_WR(dev
, BAR_XSTRORM_INTMEM
+
4156 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port
),
4159 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
4160 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid
), DEF_TTL
);
4161 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
4162 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid
), DEF_TOS
);
4163 CNIC_WR8(dev
, BAR_XSTRORM_INTMEM
+
4164 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid
), 2);
4165 CNIC_WR(dev
, BAR_XSTRORM_INTMEM
+
4166 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid
), DEF_SWS_TIMER
);
4168 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+ TSTORM_TCP_MAX_CWND_OFFSET(pfid
),
4173 static void cnic_delete_task(struct work_struct
*work
)
4175 struct cnic_local
*cp
;
4176 struct cnic_dev
*dev
;
4178 int need_resched
= 0;
4180 cp
= container_of(work
, struct cnic_local
, delete_task
.work
);
4183 if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI
, &cp
->cnic_local_flags
)) {
4184 struct drv_ctl_info info
;
4186 cnic_ulp_stop_one(cp
, CNIC_ULP_ISCSI
);
4188 info
.cmd
= DRV_CTL_ISCSI_STOPPED_CMD
;
4189 cp
->ethdev
->drv_ctl(dev
->netdev
, &info
);
4192 for (i
= 0; i
< cp
->max_cid_space
; i
++) {
4193 struct cnic_context
*ctx
= &cp
->ctx_tbl
[i
];
4196 if (!test_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
) ||
4197 !test_bit(CTX_FL_DELETE_WAIT
, &ctx
->ctx_flags
))
4200 if (!time_after(jiffies
, ctx
->timestamp
+ (2 * HZ
))) {
4205 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT
, &ctx
->ctx_flags
))
4208 err
= cnic_bnx2x_destroy_ramrod(dev
, i
);
4210 cnic_free_bnx2x_conn_resc(dev
, i
);
4212 if (ctx
->ulp_proto_id
== CNIC_ULP_ISCSI
)
4213 atomic_dec(&cp
->iscsi_conn
);
4215 clear_bit(CTX_FL_OFFLD_START
, &ctx
->ctx_flags
);
4220 queue_delayed_work(cnic_wq
, &cp
->delete_task
,
4221 msecs_to_jiffies(10));
4225 static int cnic_cm_open(struct cnic_dev
*dev
)
4227 struct cnic_local
*cp
= dev
->cnic_priv
;
4230 err
= cnic_cm_alloc_mem(dev
);
4234 err
= cp
->start_cm(dev
);
4239 INIT_DELAYED_WORK(&cp
->delete_task
, cnic_delete_task
);
4241 dev
->cm_create
= cnic_cm_create
;
4242 dev
->cm_destroy
= cnic_cm_destroy
;
4243 dev
->cm_connect
= cnic_cm_connect
;
4244 dev
->cm_abort
= cnic_cm_abort
;
4245 dev
->cm_close
= cnic_cm_close
;
4246 dev
->cm_select_dev
= cnic_cm_select_dev
;
4248 cp
->ulp_handle
[CNIC_ULP_L4
] = dev
;
4249 rcu_assign_pointer(cp
->ulp_ops
[CNIC_ULP_L4
], &cm_ulp_ops
);
4253 cnic_cm_free_mem(dev
);
4257 static int cnic_cm_shutdown(struct cnic_dev
*dev
)
4259 struct cnic_local
*cp
= dev
->cnic_priv
;
4265 for (i
= 0; i
< MAX_CM_SK_TBL_SZ
; i
++) {
4266 struct cnic_sock
*csk
= &cp
->csk_tbl
[i
];
4268 clear_bit(SK_F_INUSE
, &csk
->flags
);
4269 cnic_cm_cleanup(csk
);
4271 cnic_cm_free_mem(dev
);
4276 static void cnic_init_context(struct cnic_dev
*dev
, u32 cid
)
4281 cid_addr
= GET_CID_ADDR(cid
);
4283 for (i
= 0; i
< CTX_SIZE
; i
+= 4)
4284 cnic_ctx_wr(dev
, cid_addr
, i
, 0);
4287 static int cnic_setup_5709_context(struct cnic_dev
*dev
, int valid
)
4289 struct cnic_local
*cp
= dev
->cnic_priv
;
4291 u32 valid_bit
= valid
? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID
: 0;
4293 if (CHIP_NUM(cp
) != CHIP_NUM_5709
)
4296 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
4298 u32 idx
= cp
->ctx_arr
[i
].cid
/ cp
->cids_per_blk
;
4301 memset(cp
->ctx_arr
[i
].ctx
, 0, BCM_PAGE_SIZE
);
4303 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_DATA0
,
4304 (cp
->ctx_arr
[i
].mapping
& 0xffffffff) | valid_bit
);
4305 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_DATA1
,
4306 (u64
) cp
->ctx_arr
[i
].mapping
>> 32);
4307 CNIC_WR(dev
, BNX2_CTX_HOST_PAGE_TBL_CTRL
, idx
|
4308 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
);
4309 for (j
= 0; j
< 10; j
++) {
4311 val
= CNIC_RD(dev
, BNX2_CTX_HOST_PAGE_TBL_CTRL
);
4312 if (!(val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
))
4316 if (val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
) {
4324 static void cnic_free_irq(struct cnic_dev
*dev
)
4326 struct cnic_local
*cp
= dev
->cnic_priv
;
4327 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4329 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
4330 cp
->disable_int_sync(dev
);
4331 tasklet_kill(&cp
->cnic_irq_task
);
4332 free_irq(ethdev
->irq_arr
[0].vector
, dev
);
4336 static int cnic_request_irq(struct cnic_dev
*dev
)
4338 struct cnic_local
*cp
= dev
->cnic_priv
;
4339 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4342 err
= request_irq(ethdev
->irq_arr
[0].vector
, cnic_irq
, 0, "cnic", dev
);
4344 tasklet_disable(&cp
->cnic_irq_task
);
4349 static int cnic_init_bnx2_irq(struct cnic_dev
*dev
)
4351 struct cnic_local
*cp
= dev
->cnic_priv
;
4352 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4354 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
4356 int sblk_num
= cp
->status_blk_num
;
4357 u32 base
= ((sblk_num
- 1) * BNX2_HC_SB_CONFIG_SIZE
) +
4358 BNX2_HC_SB_CONFIG_1
;
4360 CNIC_WR(dev
, base
, BNX2_HC_SB_CONFIG_1_ONE_SHOT
);
4362 CNIC_WR(dev
, base
+ BNX2_HC_COMP_PROD_TRIP_OFF
, (2 << 16) | 8);
4363 CNIC_WR(dev
, base
+ BNX2_HC_COM_TICKS_OFF
, (64 << 16) | 220);
4364 CNIC_WR(dev
, base
+ BNX2_HC_CMD_TICKS_OFF
, (64 << 16) | 220);
4366 cp
->last_status_idx
= cp
->status_blk
.bnx2
->status_idx
;
4367 tasklet_init(&cp
->cnic_irq_task
, cnic_service_bnx2_msix
,
4368 (unsigned long) dev
);
4369 err
= cnic_request_irq(dev
);
4373 while (cp
->status_blk
.bnx2
->status_completion_producer_index
&&
4375 CNIC_WR(dev
, BNX2_HC_COALESCE_NOW
,
4376 1 << (11 + sblk_num
));
4381 if (cp
->status_blk
.bnx2
->status_completion_producer_index
) {
4387 struct status_block
*sblk
= cp
->status_blk
.gen
;
4388 u32 hc_cmd
= CNIC_RD(dev
, BNX2_HC_COMMAND
);
4391 while (sblk
->status_completion_producer_index
&& i
< 10) {
4392 CNIC_WR(dev
, BNX2_HC_COMMAND
,
4393 hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
4398 if (sblk
->status_completion_producer_index
)
4405 netdev_err(dev
->netdev
, "KCQ index not resetting to 0\n");
4409 static void cnic_enable_bnx2_int(struct cnic_dev
*dev
)
4411 struct cnic_local
*cp
= dev
->cnic_priv
;
4412 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4414 if (!(ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
4417 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
4418 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| cp
->last_status_idx
);
4421 static void cnic_disable_bnx2_int_sync(struct cnic_dev
*dev
)
4423 struct cnic_local
*cp
= dev
->cnic_priv
;
4424 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4426 if (!(ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
))
4429 CNIC_WR(dev
, BNX2_PCICFG_INT_ACK_CMD
, cp
->int_num
|
4430 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
4431 CNIC_RD(dev
, BNX2_PCICFG_INT_ACK_CMD
);
4432 synchronize_irq(ethdev
->irq_arr
[0].vector
);
4435 static void cnic_init_bnx2_tx_ring(struct cnic_dev
*dev
)
4437 struct cnic_local
*cp
= dev
->cnic_priv
;
4438 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4439 struct cnic_uio_dev
*udev
= cp
->udev
;
4440 u32 cid_addr
, tx_cid
, sb_id
;
4441 u32 val
, offset0
, offset1
, offset2
, offset3
;
4444 dma_addr_t buf_map
, ring_map
= udev
->l2_ring_map
;
4445 struct status_block
*s_blk
= cp
->status_blk
.gen
;
4447 sb_id
= cp
->status_blk_num
;
4449 cp
->tx_cons_ptr
= &s_blk
->status_tx_quick_consumer_index2
;
4450 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
4451 struct status_block_msix
*sblk
= cp
->status_blk
.bnx2
;
4453 tx_cid
= TX_TSS_CID
+ sb_id
- 1;
4454 CNIC_WR(dev
, BNX2_TSCH_TSS_CFG
, (sb_id
<< 24) |
4456 cp
->tx_cons_ptr
= &sblk
->status_tx_quick_consumer_index
;
4458 cp
->tx_cons
= *cp
->tx_cons_ptr
;
4460 cid_addr
= GET_CID_ADDR(tx_cid
);
4461 if (CHIP_NUM(cp
) == CHIP_NUM_5709
) {
4462 u32 cid_addr2
= GET_CID_ADDR(tx_cid
+ 4) + 0x40;
4464 for (i
= 0; i
< PHY_CTX_SIZE
; i
+= 4)
4465 cnic_ctx_wr(dev
, cid_addr2
, i
, 0);
4467 offset0
= BNX2_L2CTX_TYPE_XI
;
4468 offset1
= BNX2_L2CTX_CMD_TYPE_XI
;
4469 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI_XI
;
4470 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO_XI
;
4472 cnic_init_context(dev
, tx_cid
);
4473 cnic_init_context(dev
, tx_cid
+ 1);
4475 offset0
= BNX2_L2CTX_TYPE
;
4476 offset1
= BNX2_L2CTX_CMD_TYPE
;
4477 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI
;
4478 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO
;
4480 val
= BNX2_L2CTX_TYPE_TYPE_L2
| BNX2_L2CTX_TYPE_SIZE_L2
;
4481 cnic_ctx_wr(dev
, cid_addr
, offset0
, val
);
4483 val
= BNX2_L2CTX_CMD_TYPE_TYPE_L2
| (8 << 16);
4484 cnic_ctx_wr(dev
, cid_addr
, offset1
, val
);
4486 txbd
= udev
->l2_ring
;
4488 buf_map
= udev
->l2_buf_map
;
4489 for (i
= 0; i
< MAX_TX_DESC_CNT
; i
++, txbd
++) {
4490 txbd
->tx_bd_haddr_hi
= (u64
) buf_map
>> 32;
4491 txbd
->tx_bd_haddr_lo
= (u64
) buf_map
& 0xffffffff;
4493 val
= (u64
) ring_map
>> 32;
4494 cnic_ctx_wr(dev
, cid_addr
, offset2
, val
);
4495 txbd
->tx_bd_haddr_hi
= val
;
4497 val
= (u64
) ring_map
& 0xffffffff;
4498 cnic_ctx_wr(dev
, cid_addr
, offset3
, val
);
4499 txbd
->tx_bd_haddr_lo
= val
;
4502 static void cnic_init_bnx2_rx_ring(struct cnic_dev
*dev
)
4504 struct cnic_local
*cp
= dev
->cnic_priv
;
4505 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4506 struct cnic_uio_dev
*udev
= cp
->udev
;
4507 u32 cid_addr
, sb_id
, val
, coal_reg
, coal_val
;
4510 struct status_block
*s_blk
= cp
->status_blk
.gen
;
4511 dma_addr_t ring_map
= udev
->l2_ring_map
;
4513 sb_id
= cp
->status_blk_num
;
4514 cnic_init_context(dev
, 2);
4515 cp
->rx_cons_ptr
= &s_blk
->status_rx_quick_consumer_index2
;
4516 coal_reg
= BNX2_HC_COMMAND
;
4517 coal_val
= CNIC_RD(dev
, coal_reg
);
4518 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
4519 struct status_block_msix
*sblk
= cp
->status_blk
.bnx2
;
4521 cp
->rx_cons_ptr
= &sblk
->status_rx_quick_consumer_index
;
4522 coal_reg
= BNX2_HC_COALESCE_NOW
;
4523 coal_val
= 1 << (11 + sb_id
);
4526 while (!(*cp
->rx_cons_ptr
!= 0) && i
< 10) {
4527 CNIC_WR(dev
, coal_reg
, coal_val
);
4532 cp
->rx_cons
= *cp
->rx_cons_ptr
;
4534 cid_addr
= GET_CID_ADDR(2);
4535 val
= BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE
|
4536 BNX2_L2CTX_CTX_TYPE_SIZE_L2
| (0x02 << 8);
4537 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_CTX_TYPE
, val
);
4540 val
= 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT
;
4542 val
= BNX2_L2CTX_L2_STATUSB_NUM(sb_id
);
4543 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_HOST_BDIDX
, val
);
4545 rxbd
= udev
->l2_ring
+ BCM_PAGE_SIZE
;
4546 for (i
= 0; i
< MAX_RX_DESC_CNT
; i
++, rxbd
++) {
4548 int n
= (i
% cp
->l2_rx_ring_size
) + 1;
4550 buf_map
= udev
->l2_buf_map
+ (n
* cp
->l2_single_buf_size
);
4551 rxbd
->rx_bd_len
= cp
->l2_single_buf_size
;
4552 rxbd
->rx_bd_flags
= RX_BD_FLAGS_START
| RX_BD_FLAGS_END
;
4553 rxbd
->rx_bd_haddr_hi
= (u64
) buf_map
>> 32;
4554 rxbd
->rx_bd_haddr_lo
= (u64
) buf_map
& 0xffffffff;
4556 val
= (u64
) (ring_map
+ BCM_PAGE_SIZE
) >> 32;
4557 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_NX_BDHADDR_HI
, val
);
4558 rxbd
->rx_bd_haddr_hi
= val
;
4560 val
= (u64
) (ring_map
+ BCM_PAGE_SIZE
) & 0xffffffff;
4561 cnic_ctx_wr(dev
, cid_addr
, BNX2_L2CTX_NX_BDHADDR_LO
, val
);
4562 rxbd
->rx_bd_haddr_lo
= val
;
4564 val
= cnic_reg_rd_ind(dev
, BNX2_RXP_SCRATCH_RXP_FLOOD
);
4565 cnic_reg_wr_ind(dev
, BNX2_RXP_SCRATCH_RXP_FLOOD
, val
| (1 << 2));
4568 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev
*dev
)
4570 struct kwqe
*wqes
[1], l2kwqe
;
4572 memset(&l2kwqe
, 0, sizeof(l2kwqe
));
4574 l2kwqe
.kwqe_op_flag
= (L2_LAYER_CODE
<< KWQE_LAYER_SHIFT
) |
4575 (L2_KWQE_OPCODE_VALUE_FLUSH
<<
4576 KWQE_OPCODE_SHIFT
) | 2;
4577 dev
->submit_kwqes(dev
, wqes
, 1);
4580 static void cnic_set_bnx2_mac(struct cnic_dev
*dev
)
4582 struct cnic_local
*cp
= dev
->cnic_priv
;
4585 val
= cp
->func
<< 2;
4587 cp
->shmem_base
= cnic_reg_rd_ind(dev
, BNX2_SHM_HDR_ADDR_0
+ val
);
4589 val
= cnic_reg_rd_ind(dev
, cp
->shmem_base
+
4590 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER
);
4591 dev
->mac_addr
[0] = (u8
) (val
>> 8);
4592 dev
->mac_addr
[1] = (u8
) val
;
4594 CNIC_WR(dev
, BNX2_EMAC_MAC_MATCH4
, val
);
4596 val
= cnic_reg_rd_ind(dev
, cp
->shmem_base
+
4597 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER
);
4598 dev
->mac_addr
[2] = (u8
) (val
>> 24);
4599 dev
->mac_addr
[3] = (u8
) (val
>> 16);
4600 dev
->mac_addr
[4] = (u8
) (val
>> 8);
4601 dev
->mac_addr
[5] = (u8
) val
;
4603 CNIC_WR(dev
, BNX2_EMAC_MAC_MATCH5
, val
);
4605 val
= 4 | BNX2_RPM_SORT_USER2_BC_EN
;
4606 if (CHIP_NUM(cp
) != CHIP_NUM_5709
)
4607 val
|= BNX2_RPM_SORT_USER2_PROM_VLAN
;
4609 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, 0x0);
4610 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, val
);
4611 CNIC_WR(dev
, BNX2_RPM_SORT_USER2
, val
| BNX2_RPM_SORT_USER2_ENA
);
4614 static int cnic_start_bnx2_hw(struct cnic_dev
*dev
)
4616 struct cnic_local
*cp
= dev
->cnic_priv
;
4617 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4618 struct status_block
*sblk
= cp
->status_blk
.gen
;
4619 u32 val
, kcq_cid_addr
, kwq_cid_addr
;
4622 cnic_set_bnx2_mac(dev
);
4624 val
= CNIC_RD(dev
, BNX2_MQ_CONFIG
);
4625 val
&= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE
;
4626 if (BCM_PAGE_BITS
> 12)
4627 val
|= (12 - 8) << 4;
4629 val
|= (BCM_PAGE_BITS
- 8) << 4;
4631 CNIC_WR(dev
, BNX2_MQ_CONFIG
, val
);
4633 CNIC_WR(dev
, BNX2_HC_COMP_PROD_TRIP
, (2 << 16) | 8);
4634 CNIC_WR(dev
, BNX2_HC_COM_TICKS
, (64 << 16) | 220);
4635 CNIC_WR(dev
, BNX2_HC_CMD_TICKS
, (64 << 16) | 220);
4637 err
= cnic_setup_5709_context(dev
, 1);
4641 cnic_init_context(dev
, KWQ_CID
);
4642 cnic_init_context(dev
, KCQ_CID
);
4644 kwq_cid_addr
= GET_CID_ADDR(KWQ_CID
);
4645 cp
->kwq_io_addr
= MB_GET_CID_ADDR(KWQ_CID
) + L5_KRNLQ_HOST_QIDX
;
4647 cp
->max_kwq_idx
= MAX_KWQ_IDX
;
4648 cp
->kwq_prod_idx
= 0;
4649 cp
->kwq_con_idx
= 0;
4650 set_bit(CNIC_LCL_FL_KWQ_INIT
, &cp
->cnic_local_flags
);
4652 if (CHIP_NUM(cp
) == CHIP_NUM_5706
|| CHIP_NUM(cp
) == CHIP_NUM_5708
)
4653 cp
->kwq_con_idx_ptr
= &sblk
->status_rx_quick_consumer_index15
;
4655 cp
->kwq_con_idx_ptr
= &sblk
->status_cmd_consumer_index
;
4657 /* Initialize the kernel work queue context. */
4658 val
= KRNLQ_TYPE_TYPE_KRNLQ
| KRNLQ_SIZE_TYPE_SIZE
|
4659 (BCM_PAGE_BITS
- 8) | KRNLQ_FLAGS_QE_SELF_SEQ
;
4660 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_TYPE
, val
);
4662 val
= (BCM_PAGE_SIZE
/ sizeof(struct kwqe
) - 1) << 16;
4663 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_QE_SELF_SEQ_MAX
, val
);
4665 val
= ((BCM_PAGE_SIZE
/ sizeof(struct kwqe
)) << 16) | KWQ_PAGE_CNT
;
4666 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_PGTBL_NPAGES
, val
);
4668 val
= (u32
) ((u64
) cp
->kwq_info
.pgtbl_map
>> 32);
4669 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_HI
, val
);
4671 val
= (u32
) cp
->kwq_info
.pgtbl_map
;
4672 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_LO
, val
);
4674 kcq_cid_addr
= GET_CID_ADDR(KCQ_CID
);
4675 cp
->kcq1
.io_addr
= MB_GET_CID_ADDR(KCQ_CID
) + L5_KRNLQ_HOST_QIDX
;
4677 cp
->kcq1
.sw_prod_idx
= 0;
4678 cp
->kcq1
.hw_prod_idx_ptr
=
4679 &sblk
->status_completion_producer_index
;
4681 cp
->kcq1
.status_idx_ptr
= &sblk
->status_idx
;
4683 /* Initialize the kernel complete queue context. */
4684 val
= KRNLQ_TYPE_TYPE_KRNLQ
| KRNLQ_SIZE_TYPE_SIZE
|
4685 (BCM_PAGE_BITS
- 8) | KRNLQ_FLAGS_QE_SELF_SEQ
;
4686 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_TYPE
, val
);
4688 val
= (BCM_PAGE_SIZE
/ sizeof(struct kcqe
) - 1) << 16;
4689 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_QE_SELF_SEQ_MAX
, val
);
4691 val
= ((BCM_PAGE_SIZE
/ sizeof(struct kcqe
)) << 16) | KCQ_PAGE_CNT
;
4692 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_PGTBL_NPAGES
, val
);
4694 val
= (u32
) ((u64
) cp
->kcq1
.dma
.pgtbl_map
>> 32);
4695 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_HI
, val
);
4697 val
= (u32
) cp
->kcq1
.dma
.pgtbl_map
;
4698 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_PGTBL_HADDR_LO
, val
);
4701 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
) {
4702 struct status_block_msix
*msblk
= cp
->status_blk
.bnx2
;
4703 u32 sb_id
= cp
->status_blk_num
;
4704 u32 sb
= BNX2_L2CTX_L5_STATUSB_NUM(sb_id
);
4706 cp
->kcq1
.hw_prod_idx_ptr
=
4707 &msblk
->status_completion_producer_index
;
4708 cp
->kcq1
.status_idx_ptr
= &msblk
->status_idx
;
4709 cp
->kwq_con_idx_ptr
= &msblk
->status_cmd_consumer_index
;
4710 cp
->int_num
= sb_id
<< BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT
;
4711 cnic_ctx_wr(dev
, kwq_cid_addr
, L5_KRNLQ_HOST_QIDX
, sb
);
4712 cnic_ctx_wr(dev
, kcq_cid_addr
, L5_KRNLQ_HOST_QIDX
, sb
);
4715 /* Enable Commnad Scheduler notification when we write to the
4716 * host producer index of the kernel contexts. */
4717 CNIC_WR(dev
, BNX2_MQ_KNL_CMD_MASK1
, 2);
4719 /* Enable Command Scheduler notification when we write to either
4720 * the Send Queue or Receive Queue producer indexes of the kernel
4721 * bypass contexts. */
4722 CNIC_WR(dev
, BNX2_MQ_KNL_BYP_CMD_MASK1
, 7);
4723 CNIC_WR(dev
, BNX2_MQ_KNL_BYP_WRITE_MASK1
, 7);
4725 /* Notify COM when the driver post an application buffer. */
4726 CNIC_WR(dev
, BNX2_MQ_KNL_RX_V2P_MASK2
, 0x2000);
4728 /* Set the CP and COM doorbells. These two processors polls the
4729 * doorbell for a non zero value before running. This must be done
4730 * after setting up the kernel queue contexts. */
4731 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 1);
4732 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 1);
4734 cnic_init_bnx2_tx_ring(dev
);
4735 cnic_init_bnx2_rx_ring(dev
);
4737 err
= cnic_init_bnx2_irq(dev
);
4739 netdev_err(dev
->netdev
, "cnic_init_irq failed\n");
4740 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 0);
4741 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 0);
4748 static void cnic_setup_bnx2x_context(struct cnic_dev
*dev
)
4750 struct cnic_local
*cp
= dev
->cnic_priv
;
4751 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4752 u32 start_offset
= ethdev
->ctx_tbl_offset
;
4755 for (i
= 0; i
< cp
->ctx_blks
; i
++) {
4756 struct cnic_ctx
*ctx
= &cp
->ctx_arr
[i
];
4757 dma_addr_t map
= ctx
->mapping
;
4759 if (cp
->ctx_align
) {
4760 unsigned long mask
= cp
->ctx_align
- 1;
4762 map
= (map
+ mask
) & ~mask
;
4765 cnic_ctx_tbl_wr(dev
, start_offset
+ i
, map
);
4769 static int cnic_init_bnx2x_irq(struct cnic_dev
*dev
)
4771 struct cnic_local
*cp
= dev
->cnic_priv
;
4772 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4775 tasklet_init(&cp
->cnic_irq_task
, cnic_service_bnx2x_bh
,
4776 (unsigned long) dev
);
4777 if (ethdev
->drv_state
& CNIC_DRV_STATE_USING_MSIX
)
4778 err
= cnic_request_irq(dev
);
4783 static inline void cnic_storm_memset_hc_disable(struct cnic_dev
*dev
,
4784 u16 sb_id
, u8 sb_index
,
4788 u32 addr
= BAR_CSTRORM_INTMEM
+
4789 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id
) +
4790 offsetof(struct hc_status_block_data_e1x
, index_data
) +
4791 sizeof(struct hc_index_data
)*sb_index
+
4792 offsetof(struct hc_index_data
, flags
);
4793 u16 flags
= CNIC_RD16(dev
, addr
);
4795 flags
&= ~HC_INDEX_DATA_HC_ENABLED
;
4796 flags
|= (((~disable
) << HC_INDEX_DATA_HC_ENABLED_SHIFT
) &
4797 HC_INDEX_DATA_HC_ENABLED
);
4798 CNIC_WR16(dev
, addr
, flags
);
4801 static void cnic_enable_bnx2x_int(struct cnic_dev
*dev
)
4803 struct cnic_local
*cp
= dev
->cnic_priv
;
4804 u8 sb_id
= cp
->status_blk_num
;
4806 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
4807 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id
) +
4808 offsetof(struct hc_status_block_data_e1x
, index_data
) +
4809 sizeof(struct hc_index_data
)*HC_INDEX_ISCSI_EQ_CONS
+
4810 offsetof(struct hc_index_data
, timeout
), 64 / 4);
4811 cnic_storm_memset_hc_disable(dev
, sb_id
, HC_INDEX_ISCSI_EQ_CONS
, 0);
4814 static void cnic_disable_bnx2x_int_sync(struct cnic_dev
*dev
)
4818 static void cnic_init_bnx2x_tx_ring(struct cnic_dev
*dev
,
4819 struct client_init_ramrod_data
*data
)
4821 struct cnic_local
*cp
= dev
->cnic_priv
;
4822 struct cnic_uio_dev
*udev
= cp
->udev
;
4823 union eth_tx_bd_types
*txbd
= (union eth_tx_bd_types
*) udev
->l2_ring
;
4824 dma_addr_t buf_map
, ring_map
= udev
->l2_ring_map
;
4825 struct host_sp_status_block
*sb
= cp
->bnx2x_def_status_blk
;
4827 u32 cli
= cp
->ethdev
->iscsi_l2_client_id
;
4830 memset(txbd
, 0, BCM_PAGE_SIZE
);
4832 buf_map
= udev
->l2_buf_map
;
4833 for (i
= 0; i
< MAX_TX_DESC_CNT
; i
+= 3, txbd
+= 3) {
4834 struct eth_tx_start_bd
*start_bd
= &txbd
->start_bd
;
4835 struct eth_tx_bd
*reg_bd
= &((txbd
+ 2)->reg_bd
);
4837 start_bd
->addr_hi
= cpu_to_le32((u64
) buf_map
>> 32);
4838 start_bd
->addr_lo
= cpu_to_le32(buf_map
& 0xffffffff);
4839 reg_bd
->addr_hi
= start_bd
->addr_hi
;
4840 reg_bd
->addr_lo
= start_bd
->addr_lo
+ 0x10;
4841 start_bd
->nbytes
= cpu_to_le16(0x10);
4842 start_bd
->nbd
= cpu_to_le16(3);
4843 start_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
4844 start_bd
->general_data
= (UNICAST_ADDRESS
<<
4845 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT
);
4846 start_bd
->general_data
|= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT
);
4850 val
= (u64
) ring_map
>> 32;
4851 txbd
->next_bd
.addr_hi
= cpu_to_le32(val
);
4853 data
->tx
.tx_bd_page_base
.hi
= cpu_to_le32(val
);
4855 val
= (u64
) ring_map
& 0xffffffff;
4856 txbd
->next_bd
.addr_lo
= cpu_to_le32(val
);
4858 data
->tx
.tx_bd_page_base
.lo
= cpu_to_le32(val
);
4860 /* Other ramrod params */
4861 data
->tx
.tx_sb_index_number
= HC_SP_INDEX_ETH_ISCSI_CQ_CONS
;
4862 data
->tx
.tx_status_block_id
= BNX2X_DEF_SB_ID
;
4864 /* reset xstorm per client statistics */
4865 if (cli
< MAX_STAT_COUNTER_ID
) {
4866 data
->general
.statistics_zero_flg
= 1;
4867 data
->general
.statistics_en_flg
= 1;
4868 data
->general
.statistics_counter_id
= cli
;
4872 &sb
->sp_sb
.index_values
[HC_SP_INDEX_ETH_ISCSI_CQ_CONS
];
4875 static void cnic_init_bnx2x_rx_ring(struct cnic_dev
*dev
,
4876 struct client_init_ramrod_data
*data
)
4878 struct cnic_local
*cp
= dev
->cnic_priv
;
4879 struct cnic_uio_dev
*udev
= cp
->udev
;
4880 struct eth_rx_bd
*rxbd
= (struct eth_rx_bd
*) (udev
->l2_ring
+
4882 struct eth_rx_cqe_next_page
*rxcqe
= (struct eth_rx_cqe_next_page
*)
4883 (udev
->l2_ring
+ (2 * BCM_PAGE_SIZE
));
4884 struct host_sp_status_block
*sb
= cp
->bnx2x_def_status_blk
;
4886 u32 cli
= cp
->ethdev
->iscsi_l2_client_id
;
4887 int cl_qzone_id
= BNX2X_CL_QZONE_ID(cp
, cli
);
4889 dma_addr_t ring_map
= udev
->l2_ring_map
;
4892 data
->general
.client_id
= cli
;
4893 data
->general
.activate_flg
= 1;
4894 data
->general
.sp_client_id
= cli
;
4895 data
->general
.mtu
= cpu_to_le16(cp
->l2_single_buf_size
- 14);
4896 data
->general
.func_id
= cp
->pfid
;
4898 for (i
= 0; i
< BNX2X_MAX_RX_DESC_CNT
; i
++, rxbd
++) {
4900 int n
= (i
% cp
->l2_rx_ring_size
) + 1;
4902 buf_map
= udev
->l2_buf_map
+ (n
* cp
->l2_single_buf_size
);
4903 rxbd
->addr_hi
= cpu_to_le32((u64
) buf_map
>> 32);
4904 rxbd
->addr_lo
= cpu_to_le32(buf_map
& 0xffffffff);
4907 val
= (u64
) (ring_map
+ BCM_PAGE_SIZE
) >> 32;
4908 rxbd
->addr_hi
= cpu_to_le32(val
);
4909 data
->rx
.bd_page_base
.hi
= cpu_to_le32(val
);
4911 val
= (u64
) (ring_map
+ BCM_PAGE_SIZE
) & 0xffffffff;
4912 rxbd
->addr_lo
= cpu_to_le32(val
);
4913 data
->rx
.bd_page_base
.lo
= cpu_to_le32(val
);
4915 rxcqe
+= BNX2X_MAX_RCQ_DESC_CNT
;
4916 val
= (u64
) (ring_map
+ (2 * BCM_PAGE_SIZE
)) >> 32;
4917 rxcqe
->addr_hi
= cpu_to_le32(val
);
4918 data
->rx
.cqe_page_base
.hi
= cpu_to_le32(val
);
4920 val
= (u64
) (ring_map
+ (2 * BCM_PAGE_SIZE
)) & 0xffffffff;
4921 rxcqe
->addr_lo
= cpu_to_le32(val
);
4922 data
->rx
.cqe_page_base
.lo
= cpu_to_le32(val
);
4924 /* Other ramrod params */
4925 data
->rx
.client_qzone_id
= cl_qzone_id
;
4926 data
->rx
.rx_sb_index_number
= HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS
;
4927 data
->rx
.status_block_id
= BNX2X_DEF_SB_ID
;
4929 data
->rx
.cache_line_alignment_log_size
= L1_CACHE_SHIFT
;
4931 data
->rx
.max_bytes_on_bd
= cpu_to_le16(cp
->l2_single_buf_size
);
4932 data
->rx
.outer_vlan_removal_enable_flg
= 1;
4933 data
->rx
.silent_vlan_removal_flg
= 1;
4934 data
->rx
.silent_vlan_value
= 0;
4935 data
->rx
.silent_vlan_mask
= 0xffff;
4938 &sb
->sp_sb
.index_values
[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS
];
4939 cp
->rx_cons
= *cp
->rx_cons_ptr
;
4942 static void cnic_init_bnx2x_kcq(struct cnic_dev
*dev
)
4944 struct cnic_local
*cp
= dev
->cnic_priv
;
4945 u32 pfid
= cp
->pfid
;
4947 cp
->kcq1
.io_addr
= BAR_CSTRORM_INTMEM
+
4948 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid
, 0);
4949 cp
->kcq1
.sw_prod_idx
= 0;
4951 if (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
)) {
4952 struct host_hc_status_block_e2
*sb
= cp
->status_blk
.gen
;
4954 cp
->kcq1
.hw_prod_idx_ptr
=
4955 &sb
->sb
.index_values
[HC_INDEX_ISCSI_EQ_CONS
];
4956 cp
->kcq1
.status_idx_ptr
=
4957 &sb
->sb
.running_index
[SM_RX_ID
];
4959 struct host_hc_status_block_e1x
*sb
= cp
->status_blk
.gen
;
4961 cp
->kcq1
.hw_prod_idx_ptr
=
4962 &sb
->sb
.index_values
[HC_INDEX_ISCSI_EQ_CONS
];
4963 cp
->kcq1
.status_idx_ptr
=
4964 &sb
->sb
.running_index
[SM_RX_ID
];
4967 if (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
)) {
4968 struct host_hc_status_block_e2
*sb
= cp
->status_blk
.gen
;
4970 cp
->kcq2
.io_addr
= BAR_USTRORM_INTMEM
+
4971 USTORM_FCOE_EQ_PROD_OFFSET(pfid
);
4972 cp
->kcq2
.sw_prod_idx
= 0;
4973 cp
->kcq2
.hw_prod_idx_ptr
=
4974 &sb
->sb
.index_values
[HC_INDEX_FCOE_EQ_CONS
];
4975 cp
->kcq2
.status_idx_ptr
=
4976 &sb
->sb
.running_index
[SM_RX_ID
];
4980 static int cnic_start_bnx2x_hw(struct cnic_dev
*dev
)
4982 struct cnic_local
*cp
= dev
->cnic_priv
;
4983 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
4984 int func
= CNIC_FUNC(cp
), ret
;
4987 dev
->stats_addr
= ethdev
->addr_drv_info_to_mcp
;
4988 cp
->port_mode
= CHIP_PORT_MODE_NONE
;
4990 if (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
)) {
4991 u32 val
= CNIC_RD(dev
, MISC_REG_PORT4MODE_EN_OVWR
);
4994 val
= CNIC_RD(dev
, MISC_REG_PORT4MODE_EN
);
4996 val
= (val
>> 1) & 1;
4999 cp
->port_mode
= CHIP_4_PORT_MODE
;
5000 cp
->pfid
= func
>> 1;
5002 cp
->port_mode
= CHIP_2_PORT_MODE
;
5003 cp
->pfid
= func
& 0x6;
5010 ret
= cnic_init_id_tbl(&cp
->cid_tbl
, MAX_ISCSI_TBL_SZ
,
5011 cp
->iscsi_start_cid
, 0);
5016 if (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
)) {
5017 ret
= cnic_init_id_tbl(&cp
->fcoe_cid_tbl
, dev
->max_fcoe_conn
,
5018 cp
->fcoe_start_cid
, 0);
5024 cp
->bnx2x_igu_sb_id
= ethdev
->irq_arr
[0].status_blk_num2
;
5026 cnic_init_bnx2x_kcq(dev
);
5029 CNIC_WR16(dev
, cp
->kcq1
.io_addr
, MAX_KCQ_IDX
);
5030 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
5031 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid
, 0), 0);
5032 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
5033 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid
, 0),
5034 cp
->kcq1
.dma
.pg_map_arr
[1] & 0xffffffff);
5035 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
5036 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid
, 0) + 4,
5037 (u64
) cp
->kcq1
.dma
.pg_map_arr
[1] >> 32);
5038 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
5039 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid
, 0),
5040 cp
->kcq1
.dma
.pg_map_arr
[0] & 0xffffffff);
5041 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
5042 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid
, 0) + 4,
5043 (u64
) cp
->kcq1
.dma
.pg_map_arr
[0] >> 32);
5044 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
5045 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid
, 0), 1);
5046 CNIC_WR16(dev
, BAR_CSTRORM_INTMEM
+
5047 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid
, 0), cp
->status_blk_num
);
5048 CNIC_WR8(dev
, BAR_CSTRORM_INTMEM
+
5049 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid
, 0),
5050 HC_INDEX_ISCSI_EQ_CONS
);
5052 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
5053 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid
),
5054 cp
->gbl_buf_info
.pg_map_arr
[0] & 0xffffffff);
5055 CNIC_WR(dev
, BAR_USTRORM_INTMEM
+
5056 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid
) + 4,
5057 (u64
) cp
->gbl_buf_info
.pg_map_arr
[0] >> 32);
5059 CNIC_WR(dev
, BAR_TSTRORM_INTMEM
+
5060 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid
), DEF_RCV_BUF
);
5062 cnic_setup_bnx2x_context(dev
);
5064 ret
= cnic_init_bnx2x_irq(dev
);
5071 static void cnic_init_rings(struct cnic_dev
*dev
)
5073 struct cnic_local
*cp
= dev
->cnic_priv
;
5074 struct cnic_uio_dev
*udev
= cp
->udev
;
5076 if (test_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
))
5079 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
5080 cnic_init_bnx2_tx_ring(dev
);
5081 cnic_init_bnx2_rx_ring(dev
);
5082 set_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
);
5083 } else if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
)) {
5084 u32 cli
= cp
->ethdev
->iscsi_l2_client_id
;
5085 u32 cid
= cp
->ethdev
->iscsi_l2_cid
;
5087 struct client_init_ramrod_data
*data
;
5088 union l5cm_specific_data l5_data
;
5089 struct ustorm_eth_rx_producers rx_prods
= {0};
5090 u32 off
, i
, *cid_ptr
;
5092 rx_prods
.bd_prod
= 0;
5093 rx_prods
.cqe_prod
= BNX2X_MAX_RCQ_DESC_CNT
;
5096 cl_qzone_id
= BNX2X_CL_QZONE_ID(cp
, cli
);
5098 off
= BAR_USTRORM_INTMEM
+
5099 (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
) ?
5100 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id
) :
5101 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp
), cli
));
5103 for (i
= 0; i
< sizeof(struct ustorm_eth_rx_producers
) / 4; i
++)
5104 CNIC_WR(dev
, off
+ i
* 4, ((u32
*) &rx_prods
)[i
]);
5106 set_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
);
5108 data
= udev
->l2_buf
;
5109 cid_ptr
= udev
->l2_buf
+ 12;
5111 memset(data
, 0, sizeof(*data
));
5113 cnic_init_bnx2x_tx_ring(dev
, data
);
5114 cnic_init_bnx2x_rx_ring(dev
, data
);
5116 l5_data
.phy_address
.lo
= udev
->l2_buf_map
& 0xffffffff;
5117 l5_data
.phy_address
.hi
= (u64
) udev
->l2_buf_map
>> 32;
5119 set_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
);
5121 cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_ETH_CLIENT_SETUP
,
5122 cid
, ETH_CONNECTION_TYPE
, &l5_data
);
5125 while (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
) &&
5129 if (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
))
5130 netdev_err(dev
->netdev
,
5131 "iSCSI CLIENT_SETUP did not complete\n");
5132 cnic_spq_completion(dev
, DRV_CTL_RET_L2_SPQ_CREDIT_CMD
, 1);
5133 cnic_ring_ctl(dev
, cid
, cli
, 1);
5138 static void cnic_shutdown_rings(struct cnic_dev
*dev
)
5140 struct cnic_local
*cp
= dev
->cnic_priv
;
5141 struct cnic_uio_dev
*udev
= cp
->udev
;
5144 if (!test_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
))
5147 if (test_bit(CNIC_F_BNX2_CLASS
, &dev
->flags
)) {
5148 cnic_shutdown_bnx2_rx_ring(dev
);
5149 } else if (test_bit(CNIC_F_BNX2X_CLASS
, &dev
->flags
)) {
5150 u32 cli
= cp
->ethdev
->iscsi_l2_client_id
;
5151 u32 cid
= cp
->ethdev
->iscsi_l2_cid
;
5152 union l5cm_specific_data l5_data
;
5155 cnic_ring_ctl(dev
, cid
, cli
, 0);
5157 set_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
);
5159 l5_data
.phy_address
.lo
= cli
;
5160 l5_data
.phy_address
.hi
= 0;
5161 cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_ETH_HALT
,
5162 cid
, ETH_CONNECTION_TYPE
, &l5_data
);
5164 while (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
) &&
5168 if (test_bit(CNIC_LCL_FL_L2_WAIT
, &cp
->cnic_local_flags
))
5169 netdev_err(dev
->netdev
,
5170 "iSCSI CLIENT_HALT did not complete\n");
5171 cnic_spq_completion(dev
, DRV_CTL_RET_L2_SPQ_CREDIT_CMD
, 1);
5173 memset(&l5_data
, 0, sizeof(l5_data
));
5174 cnic_submit_kwqe_16(dev
, RAMROD_CMD_ID_COMMON_CFC_DEL
,
5175 cid
, NONE_CONNECTION_TYPE
, &l5_data
);
5178 clear_bit(CNIC_LCL_FL_RINGS_INITED
, &cp
->cnic_local_flags
);
5179 rx_ring
= udev
->l2_ring
+ BCM_PAGE_SIZE
;
5180 memset(rx_ring
, 0, BCM_PAGE_SIZE
);
5183 static int cnic_register_netdev(struct cnic_dev
*dev
)
5185 struct cnic_local
*cp
= dev
->cnic_priv
;
5186 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
5192 if (ethdev
->drv_state
& CNIC_DRV_STATE_REGD
)
5195 err
= ethdev
->drv_register_cnic(dev
->netdev
, cp
->cnic_ops
, dev
);
5197 netdev_err(dev
->netdev
, "register_cnic failed\n");
5202 static void cnic_unregister_netdev(struct cnic_dev
*dev
)
5204 struct cnic_local
*cp
= dev
->cnic_priv
;
5205 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
5210 ethdev
->drv_unregister_cnic(dev
->netdev
);
5213 static int cnic_start_hw(struct cnic_dev
*dev
)
5215 struct cnic_local
*cp
= dev
->cnic_priv
;
5216 struct cnic_eth_dev
*ethdev
= cp
->ethdev
;
5219 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
))
5222 dev
->regview
= ethdev
->io_base
;
5223 pci_dev_get(dev
->pcidev
);
5224 cp
->func
= PCI_FUNC(dev
->pcidev
->devfn
);
5225 cp
->status_blk
.gen
= ethdev
->irq_arr
[0].status_blk
;
5226 cp
->status_blk_num
= ethdev
->irq_arr
[0].status_blk_num
;
5228 err
= cp
->alloc_resc(dev
);
5230 netdev_err(dev
->netdev
, "allocate resource failure\n");
5234 err
= cp
->start_hw(dev
);
5238 err
= cnic_cm_open(dev
);
5242 set_bit(CNIC_F_CNIC_UP
, &dev
->flags
);
5244 cp
->enable_int(dev
);
5250 pci_dev_put(dev
->pcidev
);
5254 static void cnic_stop_bnx2_hw(struct cnic_dev
*dev
)
5256 cnic_disable_bnx2_int_sync(dev
);
5258 cnic_reg_wr_ind(dev
, BNX2_CP_SCRATCH
+ 0x20, 0);
5259 cnic_reg_wr_ind(dev
, BNX2_COM_SCRATCH
+ 0x20, 0);
5261 cnic_init_context(dev
, KWQ_CID
);
5262 cnic_init_context(dev
, KCQ_CID
);
5264 cnic_setup_5709_context(dev
, 0);
5267 cnic_free_resc(dev
);
5271 static void cnic_stop_bnx2x_hw(struct cnic_dev
*dev
)
5273 struct cnic_local
*cp
= dev
->cnic_priv
;
5276 *cp
->kcq1
.hw_prod_idx_ptr
= 0;
5277 CNIC_WR(dev
, BAR_CSTRORM_INTMEM
+
5278 CSTORM_ISCSI_EQ_CONS_OFFSET(cp
->pfid
, 0), 0);
5279 CNIC_WR16(dev
, cp
->kcq1
.io_addr
, 0);
5280 cnic_free_resc(dev
);
5283 static void cnic_stop_hw(struct cnic_dev
*dev
)
5285 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
5286 struct cnic_local
*cp
= dev
->cnic_priv
;
5289 /* Need to wait for the ring shutdown event to complete
5290 * before clearing the CNIC_UP flag.
5292 while (cp
->udev
->uio_dev
!= -1 && i
< 15) {
5296 cnic_shutdown_rings(dev
);
5298 clear_bit(CNIC_F_CNIC_UP
, &dev
->flags
);
5299 RCU_INIT_POINTER(cp
->ulp_ops
[CNIC_ULP_L4
], NULL
);
5301 cnic_cm_shutdown(dev
);
5303 pci_dev_put(dev
->pcidev
);
5307 static void cnic_free_dev(struct cnic_dev
*dev
)
5311 while ((atomic_read(&dev
->ref_count
) != 0) && i
< 10) {
5315 if (atomic_read(&dev
->ref_count
) != 0)
5316 netdev_err(dev
->netdev
, "Failed waiting for ref count to go to zero\n");
5318 netdev_info(dev
->netdev
, "Removed CNIC device\n");
5319 dev_put(dev
->netdev
);
5323 static struct cnic_dev
*cnic_alloc_dev(struct net_device
*dev
,
5324 struct pci_dev
*pdev
)
5326 struct cnic_dev
*cdev
;
5327 struct cnic_local
*cp
;
5330 alloc_size
= sizeof(struct cnic_dev
) + sizeof(struct cnic_local
);
5332 cdev
= kzalloc(alloc_size
, GFP_KERNEL
);
5334 netdev_err(dev
, "allocate dev struct failure\n");
5339 cdev
->cnic_priv
= (char *)cdev
+ sizeof(struct cnic_dev
);
5340 cdev
->register_device
= cnic_register_device
;
5341 cdev
->unregister_device
= cnic_unregister_device
;
5342 cdev
->iscsi_nl_msg_recv
= cnic_iscsi_nl_msg_recv
;
5344 cp
= cdev
->cnic_priv
;
5346 cp
->l2_single_buf_size
= 0x400;
5347 cp
->l2_rx_ring_size
= 3;
5349 spin_lock_init(&cp
->cnic_ulp_lock
);
5351 netdev_info(dev
, "Added CNIC device\n");
5356 static struct cnic_dev
*init_bnx2_cnic(struct net_device
*dev
)
5358 struct pci_dev
*pdev
;
5359 struct cnic_dev
*cdev
;
5360 struct cnic_local
*cp
;
5361 struct cnic_eth_dev
*ethdev
= NULL
;
5362 struct cnic_eth_dev
*(*probe
)(struct net_device
*) = NULL
;
5364 probe
= symbol_get(bnx2_cnic_probe
);
5366 ethdev
= (*probe
)(dev
);
5367 symbol_put(bnx2_cnic_probe
);
5372 pdev
= ethdev
->pdev
;
5378 if ((pdev
->device
== PCI_DEVICE_ID_NX2_5709
||
5379 pdev
->device
== PCI_DEVICE_ID_NX2_5709S
) &&
5380 (pdev
->revision
< 0x10)) {
5386 cdev
= cnic_alloc_dev(dev
, pdev
);
5390 set_bit(CNIC_F_BNX2_CLASS
, &cdev
->flags
);
5391 cdev
->submit_kwqes
= cnic_submit_bnx2_kwqes
;
5393 cp
= cdev
->cnic_priv
;
5394 cp
->ethdev
= ethdev
;
5395 cdev
->pcidev
= pdev
;
5396 cp
->chip_id
= ethdev
->chip_id
;
5398 cdev
->max_iscsi_conn
= ethdev
->max_iscsi_conn
;
5400 cp
->cnic_ops
= &cnic_bnx2_ops
;
5401 cp
->start_hw
= cnic_start_bnx2_hw
;
5402 cp
->stop_hw
= cnic_stop_bnx2_hw
;
5403 cp
->setup_pgtbl
= cnic_setup_page_tbl
;
5404 cp
->alloc_resc
= cnic_alloc_bnx2_resc
;
5405 cp
->free_resc
= cnic_free_resc
;
5406 cp
->start_cm
= cnic_cm_init_bnx2_hw
;
5407 cp
->stop_cm
= cnic_cm_stop_bnx2_hw
;
5408 cp
->enable_int
= cnic_enable_bnx2_int
;
5409 cp
->disable_int_sync
= cnic_disable_bnx2_int_sync
;
5410 cp
->close_conn
= cnic_close_bnx2_conn
;
5418 static struct cnic_dev
*init_bnx2x_cnic(struct net_device
*dev
)
5420 struct pci_dev
*pdev
;
5421 struct cnic_dev
*cdev
;
5422 struct cnic_local
*cp
;
5423 struct cnic_eth_dev
*ethdev
= NULL
;
5424 struct cnic_eth_dev
*(*probe
)(struct net_device
*) = NULL
;
5426 probe
= symbol_get(bnx2x_cnic_probe
);
5428 ethdev
= (*probe
)(dev
);
5429 symbol_put(bnx2x_cnic_probe
);
5434 pdev
= ethdev
->pdev
;
5439 cdev
= cnic_alloc_dev(dev
, pdev
);
5445 set_bit(CNIC_F_BNX2X_CLASS
, &cdev
->flags
);
5446 cdev
->submit_kwqes
= cnic_submit_bnx2x_kwqes
;
5448 cp
= cdev
->cnic_priv
;
5449 cp
->ethdev
= ethdev
;
5450 cdev
->pcidev
= pdev
;
5451 cp
->chip_id
= ethdev
->chip_id
;
5453 cdev
->stats_addr
= ethdev
->addr_drv_info_to_mcp
;
5455 if (!(ethdev
->drv_state
& CNIC_DRV_STATE_NO_ISCSI
))
5456 cdev
->max_iscsi_conn
= ethdev
->max_iscsi_conn
;
5457 if (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
) &&
5458 !(ethdev
->drv_state
& CNIC_DRV_STATE_NO_FCOE
))
5459 cdev
->max_fcoe_conn
= ethdev
->max_fcoe_conn
;
5461 if (cdev
->max_fcoe_conn
> BNX2X_FCOE_NUM_CONNECTIONS
)
5462 cdev
->max_fcoe_conn
= BNX2X_FCOE_NUM_CONNECTIONS
;
5464 memcpy(cdev
->mac_addr
, ethdev
->iscsi_mac
, 6);
5466 cp
->cnic_ops
= &cnic_bnx2x_ops
;
5467 cp
->start_hw
= cnic_start_bnx2x_hw
;
5468 cp
->stop_hw
= cnic_stop_bnx2x_hw
;
5469 cp
->setup_pgtbl
= cnic_setup_page_tbl_le
;
5470 cp
->alloc_resc
= cnic_alloc_bnx2x_resc
;
5471 cp
->free_resc
= cnic_free_resc
;
5472 cp
->start_cm
= cnic_cm_init_bnx2x_hw
;
5473 cp
->stop_cm
= cnic_cm_stop_bnx2x_hw
;
5474 cp
->enable_int
= cnic_enable_bnx2x_int
;
5475 cp
->disable_int_sync
= cnic_disable_bnx2x_int_sync
;
5476 if (BNX2X_CHIP_IS_E2_PLUS(cp
->chip_id
))
5477 cp
->ack_int
= cnic_ack_bnx2x_e2_msix
;
5479 cp
->ack_int
= cnic_ack_bnx2x_msix
;
5480 cp
->close_conn
= cnic_close_bnx2x_conn
;
5484 static struct cnic_dev
*is_cnic_dev(struct net_device
*dev
)
5486 struct ethtool_drvinfo drvinfo
;
5487 struct cnic_dev
*cdev
= NULL
;
5489 if (dev
->ethtool_ops
&& dev
->ethtool_ops
->get_drvinfo
) {
5490 memset(&drvinfo
, 0, sizeof(drvinfo
));
5491 dev
->ethtool_ops
->get_drvinfo(dev
, &drvinfo
);
5493 if (!strcmp(drvinfo
.driver
, "bnx2"))
5494 cdev
= init_bnx2_cnic(dev
);
5495 if (!strcmp(drvinfo
.driver
, "bnx2x"))
5496 cdev
= init_bnx2x_cnic(dev
);
5498 write_lock(&cnic_dev_lock
);
5499 list_add(&cdev
->list
, &cnic_dev_list
);
5500 write_unlock(&cnic_dev_lock
);
5506 static void cnic_rcv_netevent(struct cnic_local
*cp
, unsigned long event
,
5512 for (if_type
= 0; if_type
< MAX_CNIC_ULP_TYPE
; if_type
++) {
5513 struct cnic_ulp_ops
*ulp_ops
;
5516 ulp_ops
= rcu_dereference(cp
->ulp_ops
[if_type
]);
5517 if (!ulp_ops
|| !ulp_ops
->indicate_netevent
)
5520 ctx
= cp
->ulp_handle
[if_type
];
5522 ulp_ops
->indicate_netevent(ctx
, event
, vlan_id
);
5528 * netdev event handler
5530 static int cnic_netdev_event(struct notifier_block
*this, unsigned long event
,
5533 struct net_device
*netdev
= ptr
;
5534 struct cnic_dev
*dev
;
5537 dev
= cnic_from_netdev(netdev
);
5539 if (!dev
&& (event
== NETDEV_REGISTER
|| netif_running(netdev
))) {
5540 /* Check for the hot-plug device */
5541 dev
= is_cnic_dev(netdev
);
5548 struct cnic_local
*cp
= dev
->cnic_priv
;
5552 else if (event
== NETDEV_UNREGISTER
)
5555 if (event
== NETDEV_UP
|| (new_dev
&& netif_running(netdev
))) {
5556 if (cnic_register_netdev(dev
) != 0) {
5560 if (!cnic_start_hw(dev
))
5561 cnic_ulp_start(dev
);
5564 cnic_rcv_netevent(cp
, event
, 0);
5566 if (event
== NETDEV_GOING_DOWN
) {
5569 cnic_unregister_netdev(dev
);
5570 } else if (event
== NETDEV_UNREGISTER
) {
5571 write_lock(&cnic_dev_lock
);
5572 list_del_init(&dev
->list
);
5573 write_unlock(&cnic_dev_lock
);
5581 struct net_device
*realdev
;
5584 vid
= cnic_get_vlan(netdev
, &realdev
);
5586 dev
= cnic_from_netdev(realdev
);
5588 vid
|= VLAN_TAG_PRESENT
;
5589 cnic_rcv_netevent(dev
->cnic_priv
, event
, vid
);
5598 static struct notifier_block cnic_netdev_notifier
= {
5599 .notifier_call
= cnic_netdev_event
5602 static void cnic_release(void)
5604 struct cnic_dev
*dev
;
5605 struct cnic_uio_dev
*udev
;
5607 while (!list_empty(&cnic_dev_list
)) {
5608 dev
= list_entry(cnic_dev_list
.next
, struct cnic_dev
, list
);
5609 if (test_bit(CNIC_F_CNIC_UP
, &dev
->flags
)) {
5615 cnic_unregister_netdev(dev
);
5616 list_del_init(&dev
->list
);
5619 while (!list_empty(&cnic_udev_list
)) {
5620 udev
= list_entry(cnic_udev_list
.next
, struct cnic_uio_dev
,
5622 cnic_free_uio(udev
);
5626 static int __init
cnic_init(void)
5630 pr_info("%s", version
);
5632 rc
= register_netdevice_notifier(&cnic_netdev_notifier
);
5638 cnic_wq
= create_singlethread_workqueue("cnic_wq");
5641 unregister_netdevice_notifier(&cnic_netdev_notifier
);
5648 static void __exit
cnic_exit(void)
5650 unregister_netdevice_notifier(&cnic_netdev_notifier
);
5652 destroy_workqueue(cnic_wq
);
5655 module_init(cnic_init
);
5656 module_exit(cnic_exit
);