2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: Main component of the bnxt_re driver
39 #include <linux/module.h>
40 #include <linux/netdevice.h>
41 #include <linux/ethtool.h>
42 #include <linux/mutex.h>
43 #include <linux/list.h>
44 #include <linux/rculist.h>
45 #include <linux/spinlock.h>
46 #include <linux/pci.h>
47 #include <net/dcbnl.h>
49 #include <net/addrconf.h>
50 #include <linux/if_ether.h>
52 #include <rdma/ib_verbs.h>
53 #include <rdma/ib_user_verbs.h>
54 #include <rdma/ib_umem.h>
55 #include <rdma/ib_addr.h>
59 #include "qplib_res.h"
62 #include "qplib_rcfw.h"
65 #include <rdma/bnxt_re-abi.h>
67 #include "hw_counters.h"
69 static char version
[] =
70 BNXT_RE_DESC
" v" ROCE_DRV_MODULE_VERSION
"\n";
72 MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>");
73 MODULE_DESCRIPTION(BNXT_RE_DESC
" Driver");
74 MODULE_LICENSE("Dual BSD/GPL");
77 static struct list_head bnxt_re_dev_list
= LIST_HEAD_INIT(bnxt_re_dev_list
);
78 /* Mutex to protect the list of bnxt_re devices added */
79 static DEFINE_MUTEX(bnxt_re_dev_lock
);
80 static struct workqueue_struct
*bnxt_re_wq
;
82 /* for handling bnxt_en callbacks later */
83 static void bnxt_re_stop(void *p
)
87 static void bnxt_re_start(void *p
)
91 static void bnxt_re_sriov_config(void *p
, int num_vfs
)
95 static struct bnxt_ulp_ops bnxt_re_ulp_ops
= {
96 .ulp_async_notifier
= NULL
,
97 .ulp_stop
= bnxt_re_stop
,
98 .ulp_start
= bnxt_re_start
,
99 .ulp_sriov_config
= bnxt_re_sriov_config
102 /* RoCE -> Net driver */
104 /* Driver registration routines used to let the networking driver (bnxt_en)
105 * to know that the RoCE driver is now installed
107 static int bnxt_re_unregister_netdev(struct bnxt_re_dev
*rdev
, bool lock_wait
)
109 struct bnxt_en_dev
*en_dev
;
115 en_dev
= rdev
->en_dev
;
116 /* Acquire rtnl lock if it is not invokded from netdev event */
120 rc
= en_dev
->en_ops
->bnxt_unregister_device(rdev
->en_dev
,
127 static int bnxt_re_register_netdev(struct bnxt_re_dev
*rdev
)
129 struct bnxt_en_dev
*en_dev
;
135 en_dev
= rdev
->en_dev
;
138 rc
= en_dev
->en_ops
->bnxt_register_device(en_dev
, BNXT_ROCE_ULP
,
139 &bnxt_re_ulp_ops
, rdev
);
144 static int bnxt_re_free_msix(struct bnxt_re_dev
*rdev
, bool lock_wait
)
146 struct bnxt_en_dev
*en_dev
;
152 en_dev
= rdev
->en_dev
;
157 rc
= en_dev
->en_ops
->bnxt_free_msix(rdev
->en_dev
, BNXT_ROCE_ULP
);
164 static int bnxt_re_request_msix(struct bnxt_re_dev
*rdev
)
166 int rc
= 0, num_msix_want
= BNXT_RE_MAX_MSIX
, num_msix_got
;
167 struct bnxt_en_dev
*en_dev
;
172 en_dev
= rdev
->en_dev
;
174 num_msix_want
= min_t(u32
, BNXT_RE_MAX_MSIX
, num_online_cpus());
177 num_msix_got
= en_dev
->en_ops
->bnxt_request_msix(en_dev
, BNXT_ROCE_ULP
,
180 if (num_msix_got
< BNXT_RE_MIN_MSIX
) {
184 if (num_msix_got
!= num_msix_want
) {
185 dev_warn(rdev_to_dev(rdev
),
186 "Requested %d MSI-X vectors, got %d\n",
187 num_msix_want
, num_msix_got
);
189 rdev
->num_msix
= num_msix_got
;
195 static void bnxt_re_init_hwrm_hdr(struct bnxt_re_dev
*rdev
, struct input
*hdr
,
196 u16 opcd
, u16 crid
, u16 trid
)
198 hdr
->req_type
= cpu_to_le16(opcd
);
199 hdr
->cmpl_ring
= cpu_to_le16(crid
);
200 hdr
->target_id
= cpu_to_le16(trid
);
203 static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg
*fw_msg
, void *msg
,
204 int msg_len
, void *resp
, int resp_max_len
,
208 fw_msg
->msg_len
= msg_len
;
210 fw_msg
->resp_max_len
= resp_max_len
;
211 fw_msg
->timeout
= timeout
;
214 static int bnxt_re_net_ring_free(struct bnxt_re_dev
*rdev
, u16 fw_ring_id
,
217 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
218 struct hwrm_ring_free_input req
= {0};
219 struct hwrm_ring_free_output resp
;
220 struct bnxt_fw_msg fw_msg
;
221 bool do_unlock
= false;
227 memset(&fw_msg
, 0, sizeof(fw_msg
));
233 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
, HWRM_RING_FREE
, -1, -1);
234 req
.ring_type
= RING_ALLOC_REQ_RING_TYPE_L2_CMPL
;
235 req
.ring_id
= cpu_to_le16(fw_ring_id
);
236 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
237 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
238 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
240 dev_err(rdev_to_dev(rdev
),
241 "Failed to free HW ring:%d :%#x", req
.ring_id
, rc
);
247 static int bnxt_re_net_ring_alloc(struct bnxt_re_dev
*rdev
, dma_addr_t
*dma_arr
,
248 int pages
, int type
, u32 ring_mask
,
249 u32 map_index
, u16
*fw_ring_id
)
251 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
252 struct hwrm_ring_alloc_input req
= {0};
253 struct hwrm_ring_alloc_output resp
;
254 struct bnxt_fw_msg fw_msg
;
260 memset(&fw_msg
, 0, sizeof(fw_msg
));
262 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
, HWRM_RING_ALLOC
, -1, -1);
264 req
.page_tbl_addr
= cpu_to_le64(dma_arr
[0]);
266 /* Page size is in log2 units */
267 req
.page_size
= BNXT_PAGE_SHIFT
;
268 req
.page_tbl_depth
= 1;
271 /* Association of ring index with doorbell index and MSIX number */
272 req
.logical_id
= cpu_to_le16(map_index
);
273 req
.length
= cpu_to_le32(ring_mask
+ 1);
274 req
.ring_type
= RING_ALLOC_REQ_RING_TYPE_L2_CMPL
;
275 req
.int_mode
= RING_ALLOC_REQ_INT_MODE_MSIX
;
276 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
277 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
278 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
280 *fw_ring_id
= le16_to_cpu(resp
.ring_id
);
286 static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev
*rdev
,
287 u32 fw_stats_ctx_id
, bool lock_wait
)
289 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
290 struct hwrm_stat_ctx_free_input req
= {0};
291 struct bnxt_fw_msg fw_msg
;
292 bool do_unlock
= false;
298 memset(&fw_msg
, 0, sizeof(fw_msg
));
304 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
, HWRM_STAT_CTX_FREE
, -1, -1);
305 req
.stat_ctx_id
= cpu_to_le32(fw_stats_ctx_id
);
306 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&req
,
307 sizeof(req
), DFLT_HWRM_CMD_TIMEOUT
);
308 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
310 dev_err(rdev_to_dev(rdev
),
311 "Failed to free HW stats context %#x", rc
);
318 static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev
*rdev
,
320 u32
*fw_stats_ctx_id
)
322 struct hwrm_stat_ctx_alloc_output resp
= {0};
323 struct hwrm_stat_ctx_alloc_input req
= {0};
324 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
325 struct bnxt_fw_msg fw_msg
;
328 *fw_stats_ctx_id
= INVALID_STATS_CTX_ID
;
333 memset(&fw_msg
, 0, sizeof(fw_msg
));
336 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
, HWRM_STAT_CTX_ALLOC
, -1, -1);
337 req
.update_period_ms
= cpu_to_le32(1000);
338 req
.stats_dma_addr
= cpu_to_le64(dma_map
);
339 req
.stat_ctx_flags
= STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE
;
340 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
341 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
342 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
344 *fw_stats_ctx_id
= le32_to_cpu(resp
.stat_ctx_id
);
352 static bool is_bnxt_re_dev(struct net_device
*netdev
)
354 struct ethtool_drvinfo drvinfo
;
356 if (netdev
->ethtool_ops
&& netdev
->ethtool_ops
->get_drvinfo
) {
357 memset(&drvinfo
, 0, sizeof(drvinfo
));
358 netdev
->ethtool_ops
->get_drvinfo(netdev
, &drvinfo
);
360 if (strcmp(drvinfo
.driver
, "bnxt_en"))
367 static struct bnxt_re_dev
*bnxt_re_from_netdev(struct net_device
*netdev
)
369 struct bnxt_re_dev
*rdev
;
372 list_for_each_entry_rcu(rdev
, &bnxt_re_dev_list
, list
) {
373 if (rdev
->netdev
== netdev
) {
382 static void bnxt_re_dev_unprobe(struct net_device
*netdev
,
383 struct bnxt_en_dev
*en_dev
)
386 module_put(en_dev
->pdev
->driver
->driver
.owner
);
389 static struct bnxt_en_dev
*bnxt_re_dev_probe(struct net_device
*netdev
)
391 struct bnxt
*bp
= netdev_priv(netdev
);
392 struct bnxt_en_dev
*en_dev
;
393 struct pci_dev
*pdev
;
395 /* Call bnxt_en's RoCE probe via indirect API */
397 return ERR_PTR(-EINVAL
);
399 en_dev
= bp
->ulp_probe(netdev
);
405 return ERR_PTR(-EINVAL
);
407 if (!(en_dev
->flags
& BNXT_EN_FLAG_ROCE_CAP
)) {
409 "%s: probe error: RoCE is not supported on this device",
410 ROCE_DRV_MODULE_NAME
);
411 return ERR_PTR(-ENODEV
);
414 /* Bump net device reference count */
415 if (!try_module_get(pdev
->driver
->driver
.owner
))
416 return ERR_PTR(-ENODEV
);
423 static void bnxt_re_unregister_ib(struct bnxt_re_dev
*rdev
)
425 ib_unregister_device(&rdev
->ibdev
);
428 static int bnxt_re_register_ib(struct bnxt_re_dev
*rdev
)
430 struct ib_device
*ibdev
= &rdev
->ibdev
;
433 ibdev
->owner
= THIS_MODULE
;
434 ibdev
->node_type
= RDMA_NODE_IB_CA
;
435 strlcpy(ibdev
->name
, "bnxt_re%d", IB_DEVICE_NAME_MAX
);
436 strlcpy(ibdev
->node_desc
, BNXT_RE_DESC
" HCA",
437 strlen(BNXT_RE_DESC
) + 5);
438 ibdev
->phys_port_cnt
= 1;
440 bnxt_qplib_get_guid(rdev
->netdev
->dev_addr
, (u8
*)&ibdev
->node_guid
);
442 ibdev
->num_comp_vectors
= 1;
443 ibdev
->dev
.parent
= &rdev
->en_dev
->pdev
->dev
;
444 ibdev
->local_dma_lkey
= BNXT_QPLIB_RSVD_LKEY
;
447 ibdev
->uverbs_abi_ver
= BNXT_RE_ABI_VERSION
;
448 ibdev
->uverbs_cmd_mask
=
449 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
450 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
451 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
452 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
453 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
454 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
455 (1ull << IB_USER_VERBS_CMD_REREG_MR
) |
456 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
457 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
458 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
459 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ
) |
460 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
461 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
462 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
463 (1ull << IB_USER_VERBS_CMD_QUERY_QP
) |
464 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
465 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ
) |
466 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ
) |
467 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ
) |
468 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ
) |
469 (1ull << IB_USER_VERBS_CMD_CREATE_AH
) |
470 (1ull << IB_USER_VERBS_CMD_MODIFY_AH
) |
471 (1ull << IB_USER_VERBS_CMD_QUERY_AH
) |
472 (1ull << IB_USER_VERBS_CMD_DESTROY_AH
);
473 /* POLL_CQ and REQ_NOTIFY_CQ is directly handled in libbnxt_re */
476 ibdev
->query_device
= bnxt_re_query_device
;
477 ibdev
->modify_device
= bnxt_re_modify_device
;
479 ibdev
->query_port
= bnxt_re_query_port
;
480 ibdev
->get_port_immutable
= bnxt_re_get_port_immutable
;
481 ibdev
->query_pkey
= bnxt_re_query_pkey
;
482 ibdev
->query_gid
= bnxt_re_query_gid
;
483 ibdev
->get_netdev
= bnxt_re_get_netdev
;
484 ibdev
->add_gid
= bnxt_re_add_gid
;
485 ibdev
->del_gid
= bnxt_re_del_gid
;
486 ibdev
->get_link_layer
= bnxt_re_get_link_layer
;
488 ibdev
->alloc_pd
= bnxt_re_alloc_pd
;
489 ibdev
->dealloc_pd
= bnxt_re_dealloc_pd
;
491 ibdev
->create_ah
= bnxt_re_create_ah
;
492 ibdev
->modify_ah
= bnxt_re_modify_ah
;
493 ibdev
->query_ah
= bnxt_re_query_ah
;
494 ibdev
->destroy_ah
= bnxt_re_destroy_ah
;
496 ibdev
->create_qp
= bnxt_re_create_qp
;
497 ibdev
->modify_qp
= bnxt_re_modify_qp
;
498 ibdev
->query_qp
= bnxt_re_query_qp
;
499 ibdev
->destroy_qp
= bnxt_re_destroy_qp
;
501 ibdev
->post_send
= bnxt_re_post_send
;
502 ibdev
->post_recv
= bnxt_re_post_recv
;
504 ibdev
->create_cq
= bnxt_re_create_cq
;
505 ibdev
->destroy_cq
= bnxt_re_destroy_cq
;
506 ibdev
->poll_cq
= bnxt_re_poll_cq
;
507 ibdev
->req_notify_cq
= bnxt_re_req_notify_cq
;
509 ibdev
->get_dma_mr
= bnxt_re_get_dma_mr
;
510 ibdev
->dereg_mr
= bnxt_re_dereg_mr
;
511 ibdev
->alloc_mr
= bnxt_re_alloc_mr
;
512 ibdev
->map_mr_sg
= bnxt_re_map_mr_sg
;
514 ibdev
->reg_user_mr
= bnxt_re_reg_user_mr
;
515 ibdev
->alloc_ucontext
= bnxt_re_alloc_ucontext
;
516 ibdev
->dealloc_ucontext
= bnxt_re_dealloc_ucontext
;
517 ibdev
->mmap
= bnxt_re_mmap
;
518 ibdev
->get_hw_stats
= bnxt_re_ib_get_hw_stats
;
519 ibdev
->alloc_hw_stats
= bnxt_re_ib_alloc_hw_stats
;
521 return ib_register_device(ibdev
, NULL
);
524 static ssize_t
show_rev(struct device
*device
, struct device_attribute
*attr
,
527 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(device
, ibdev
.dev
);
529 return scnprintf(buf
, PAGE_SIZE
, "0x%x\n", rdev
->en_dev
->pdev
->vendor
);
532 static ssize_t
show_fw_ver(struct device
*device
, struct device_attribute
*attr
,
535 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(device
, ibdev
.dev
);
537 return scnprintf(buf
, PAGE_SIZE
, "%s\n", rdev
->dev_attr
.fw_ver
);
540 static ssize_t
show_hca(struct device
*device
, struct device_attribute
*attr
,
543 struct bnxt_re_dev
*rdev
= to_bnxt_re_dev(device
, ibdev
.dev
);
545 return scnprintf(buf
, PAGE_SIZE
, "%s\n", rdev
->ibdev
.node_desc
);
548 static DEVICE_ATTR(hw_rev
, 0444, show_rev
, NULL
);
549 static DEVICE_ATTR(fw_rev
, 0444, show_fw_ver
, NULL
);
550 static DEVICE_ATTR(hca_type
, 0444, show_hca
, NULL
);
552 static struct device_attribute
*bnxt_re_attributes
[] = {
558 static void bnxt_re_dev_remove(struct bnxt_re_dev
*rdev
)
560 dev_put(rdev
->netdev
);
563 mutex_lock(&bnxt_re_dev_lock
);
564 list_del_rcu(&rdev
->list
);
565 mutex_unlock(&bnxt_re_dev_lock
);
568 flush_workqueue(bnxt_re_wq
);
570 ib_dealloc_device(&rdev
->ibdev
);
574 static struct bnxt_re_dev
*bnxt_re_dev_add(struct net_device
*netdev
,
575 struct bnxt_en_dev
*en_dev
)
577 struct bnxt_re_dev
*rdev
;
579 /* Allocate bnxt_re_dev instance here */
580 rdev
= (struct bnxt_re_dev
*)ib_alloc_device(sizeof(*rdev
));
582 dev_err(NULL
, "%s: bnxt_re_dev allocation failure!",
583 ROCE_DRV_MODULE_NAME
);
587 rdev
->netdev
= netdev
;
588 dev_hold(rdev
->netdev
);
589 rdev
->en_dev
= en_dev
;
590 rdev
->id
= rdev
->en_dev
->pdev
->devfn
;
591 INIT_LIST_HEAD(&rdev
->qp_list
);
592 mutex_init(&rdev
->qp_lock
);
593 atomic_set(&rdev
->qp_count
, 0);
594 atomic_set(&rdev
->cq_count
, 0);
595 atomic_set(&rdev
->srq_count
, 0);
596 atomic_set(&rdev
->mr_count
, 0);
597 atomic_set(&rdev
->mw_count
, 0);
598 rdev
->cosq
[0] = 0xFFFF;
599 rdev
->cosq
[1] = 0xFFFF;
601 mutex_lock(&bnxt_re_dev_lock
);
602 list_add_tail_rcu(&rdev
->list
, &bnxt_re_dev_list
);
603 mutex_unlock(&bnxt_re_dev_lock
);
607 static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw
*rcfw
,
608 struct creq_func_event
*aeqe
)
610 switch (aeqe
->event
) {
611 case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR
:
613 case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR
:
615 case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR
:
617 case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR
:
619 case CREQ_FUNC_EVENT_EVENT_CQ_ERROR
:
621 case CREQ_FUNC_EVENT_EVENT_TQM_ERROR
:
623 case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR
:
625 case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR
:
627 case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR
:
629 case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR
:
631 case CREQ_FUNC_EVENT_EVENT_TIM_ERROR
:
639 static int bnxt_re_cqn_handler(struct bnxt_qplib_nq
*nq
,
640 struct bnxt_qplib_cq
*handle
)
642 struct bnxt_re_cq
*cq
= container_of(handle
, struct bnxt_re_cq
,
646 dev_err(NULL
, "%s: CQ is NULL, CQN not handled",
647 ROCE_DRV_MODULE_NAME
);
650 if (cq
->ib_cq
.comp_handler
) {
651 /* Lock comp_handler? */
652 (*cq
->ib_cq
.comp_handler
)(&cq
->ib_cq
, cq
->ib_cq
.cq_context
);
658 static void bnxt_re_cleanup_res(struct bnxt_re_dev
*rdev
)
662 if (rdev
->nq
[0].hwq
.max_elements
) {
663 for (i
= 1; i
< rdev
->num_msix
; i
++)
664 bnxt_qplib_disable_nq(&rdev
->nq
[i
- 1]);
667 if (rdev
->qplib_res
.rcfw
)
668 bnxt_qplib_cleanup_res(&rdev
->qplib_res
);
671 static int bnxt_re_init_res(struct bnxt_re_dev
*rdev
)
675 bnxt_qplib_init_res(&rdev
->qplib_res
);
677 for (i
= 1; i
< rdev
->num_msix
; i
++) {
678 rc
= bnxt_qplib_enable_nq(rdev
->en_dev
->pdev
, &rdev
->nq
[i
- 1],
679 i
- 1, rdev
->msix_entries
[i
].vector
,
680 rdev
->msix_entries
[i
].db_offset
,
681 &bnxt_re_cqn_handler
, NULL
);
684 dev_err(rdev_to_dev(rdev
),
685 "Failed to enable NQ with rc = 0x%x", rc
);
694 static void bnxt_re_free_nq_res(struct bnxt_re_dev
*rdev
, bool lock_wait
)
698 for (i
= 0; i
< rdev
->num_msix
- 1; i
++) {
699 bnxt_re_net_ring_free(rdev
, rdev
->nq
[i
].ring_id
, lock_wait
);
700 bnxt_qplib_free_nq(&rdev
->nq
[i
]);
704 static void bnxt_re_free_res(struct bnxt_re_dev
*rdev
, bool lock_wait
)
706 bnxt_re_free_nq_res(rdev
, lock_wait
);
708 if (rdev
->qplib_res
.dpi_tbl
.max
) {
709 bnxt_qplib_dealloc_dpi(&rdev
->qplib_res
,
710 &rdev
->qplib_res
.dpi_tbl
,
711 &rdev
->dpi_privileged
);
713 if (rdev
->qplib_res
.rcfw
) {
714 bnxt_qplib_free_res(&rdev
->qplib_res
);
715 rdev
->qplib_res
.rcfw
= NULL
;
719 static int bnxt_re_alloc_res(struct bnxt_re_dev
*rdev
)
723 /* Configure and allocate resources for qplib */
724 rdev
->qplib_res
.rcfw
= &rdev
->rcfw
;
725 rc
= bnxt_qplib_get_dev_attr(&rdev
->rcfw
, &rdev
->dev_attr
);
729 rc
= bnxt_qplib_alloc_res(&rdev
->qplib_res
, rdev
->en_dev
->pdev
,
730 rdev
->netdev
, &rdev
->dev_attr
);
734 rc
= bnxt_qplib_alloc_dpi(&rdev
->qplib_res
.dpi_tbl
,
735 &rdev
->dpi_privileged
,
740 for (i
= 0; i
< rdev
->num_msix
- 1; i
++) {
741 rdev
->nq
[i
].hwq
.max_elements
= BNXT_RE_MAX_CQ_COUNT
+
742 BNXT_RE_MAX_SRQC_COUNT
+ 2;
743 rc
= bnxt_qplib_alloc_nq(rdev
->en_dev
->pdev
, &rdev
->nq
[i
]);
745 dev_err(rdev_to_dev(rdev
), "Alloc Failed NQ%d rc:%#x",
749 rc
= bnxt_re_net_ring_alloc
750 (rdev
, rdev
->nq
[i
].hwq
.pbl
[PBL_LVL_0
].pg_map_arr
,
751 rdev
->nq
[i
].hwq
.pbl
[rdev
->nq
[i
].hwq
.level
].pg_count
,
752 HWRM_RING_ALLOC_CMPL
,
753 BNXT_QPLIB_NQE_MAX_CNT
- 1,
754 rdev
->msix_entries
[i
+ 1].ring_idx
,
755 &rdev
->nq
[i
].ring_id
);
757 dev_err(rdev_to_dev(rdev
),
758 "Failed to allocate NQ fw id with rc = 0x%x",
765 for (i
= 0; i
< rdev
->num_msix
- 1; i
++)
766 bnxt_qplib_free_nq(&rdev
->nq
[i
]);
768 bnxt_qplib_dealloc_dpi(&rdev
->qplib_res
,
769 &rdev
->qplib_res
.dpi_tbl
,
770 &rdev
->dpi_privileged
);
772 bnxt_qplib_free_res(&rdev
->qplib_res
);
775 rdev
->qplib_res
.rcfw
= NULL
;
779 static void bnxt_re_dispatch_event(struct ib_device
*ibdev
, struct ib_qp
*qp
,
780 u8 port_num
, enum ib_event_type event
)
782 struct ib_event ib_event
;
784 ib_event
.device
= ibdev
;
786 ib_event
.element
.qp
= qp
;
788 ib_event
.element
.port_num
= port_num
;
789 ib_event
.event
= event
;
790 ib_dispatch_event(&ib_event
);
793 #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN 0x02
794 static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev
*rdev
, u8 dir
,
797 struct hwrm_queue_pri2cos_qcfg_input req
= {0};
798 struct bnxt
*bp
= netdev_priv(rdev
->netdev
);
799 struct hwrm_queue_pri2cos_qcfg_output resp
;
800 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
801 struct bnxt_fw_msg fw_msg
;
803 u8
*qcfgmap
, *tmp_map
;
809 memset(&fw_msg
, 0, sizeof(fw_msg
));
810 bnxt_re_init_hwrm_hdr(rdev
, (void *)&req
,
811 HWRM_QUEUE_PRI2COS_QCFG
, -1, -1);
812 flags
|= (dir
& 0x01);
813 flags
|= HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN
;
814 req
.flags
= cpu_to_le32(flags
);
815 req
.port_id
= bp
->pf
.port_id
;
817 bnxt_re_fill_fw_msg(&fw_msg
, (void *)&req
, sizeof(req
), (void *)&resp
,
818 sizeof(resp
), DFLT_HWRM_CMD_TIMEOUT
);
819 rc
= en_dev
->en_ops
->bnxt_send_fw_msg(en_dev
, BNXT_ROCE_ULP
, &fw_msg
);
823 if (resp
.queue_cfg_info
) {
824 dev_warn(rdev_to_dev(rdev
),
825 "Asymmetric cos queue configuration detected");
826 dev_warn(rdev_to_dev(rdev
),
827 " on device, QoS may not be fully functional\n");
829 qcfgmap
= &resp
.pri0_cos_queue_id
;
830 tmp_map
= (u8
*)cid_map
;
831 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
832 tmp_map
[i
] = qcfgmap
[i
];
837 static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev
*rdev
,
838 struct bnxt_re_qp
*qp
)
840 return (qp
->ib_qp
.qp_type
== IB_QPT_GSI
) || (qp
== rdev
->qp1_sqp
);
843 static void bnxt_re_dev_stop(struct bnxt_re_dev
*rdev
)
845 int mask
= IB_QP_STATE
;
846 struct ib_qp_attr qp_attr
;
847 struct bnxt_re_qp
*qp
;
849 qp_attr
.qp_state
= IB_QPS_ERR
;
850 mutex_lock(&rdev
->qp_lock
);
851 list_for_each_entry(qp
, &rdev
->qp_list
, list
) {
852 /* Modify the state of all QPs except QP1/Shadow QP */
853 if (!bnxt_re_is_qp1_or_shadow_qp(rdev
, qp
)) {
854 if (qp
->qplib_qp
.state
!=
855 CMDQ_MODIFY_QP_NEW_STATE_RESET
&&
856 qp
->qplib_qp
.state
!=
857 CMDQ_MODIFY_QP_NEW_STATE_ERR
) {
858 bnxt_re_dispatch_event(&rdev
->ibdev
, &qp
->ib_qp
,
859 1, IB_EVENT_QP_FATAL
);
860 bnxt_re_modify_qp(&qp
->ib_qp
, &qp_attr
, mask
,
865 mutex_unlock(&rdev
->qp_lock
);
868 static int bnxt_re_update_gid(struct bnxt_re_dev
*rdev
)
870 struct bnxt_qplib_sgid_tbl
*sgid_tbl
= &rdev
->qplib_res
.sgid_tbl
;
871 struct bnxt_qplib_gid gid
;
875 if (!test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED
, &rdev
->flags
))
879 dev_err(rdev_to_dev(rdev
), "QPLIB: SGID table not allocated");
883 for (index
= 0; index
< sgid_tbl
->active
; index
++) {
884 gid_idx
= sgid_tbl
->hw_id
[index
];
886 if (!memcmp(&sgid_tbl
->tbl
[index
], &bnxt_qplib_gid_zero
,
887 sizeof(bnxt_qplib_gid_zero
)))
889 /* need to modify the VLAN enable setting of non VLAN GID only
890 * as setting is done for VLAN GID while adding GID
892 if (sgid_tbl
->vlan
[index
])
895 memcpy(&gid
, &sgid_tbl
->tbl
[index
], sizeof(gid
));
897 rc
= bnxt_qplib_update_sgid(sgid_tbl
, &gid
, gid_idx
,
898 rdev
->qplib_res
.netdev
->dev_addr
);
904 static u32
bnxt_re_get_priority_mask(struct bnxt_re_dev
*rdev
)
906 u32 prio_map
= 0, tmp_map
= 0;
907 struct net_device
*netdev
;
910 netdev
= rdev
->netdev
;
912 memset(&app
, 0, sizeof(app
));
913 app
.selector
= IEEE_8021QAZ_APP_SEL_ETHERTYPE
;
914 app
.protocol
= ETH_P_IBOE
;
915 tmp_map
= dcb_ieee_getapp_mask(netdev
, &app
);
918 app
.selector
= IEEE_8021QAZ_APP_SEL_DGRAM
;
919 app
.protocol
= ROCE_V2_UDP_DPORT
;
920 tmp_map
= dcb_ieee_getapp_mask(netdev
, &app
);
926 static void bnxt_re_parse_cid_map(u8 prio_map
, u8
*cid_map
, u16
*cosq
)
931 for (prio
= 0, id
= 0; prio
< 8; prio
++) {
932 if (prio_map
& (1 << prio
)) {
933 cosq
[id
] = cid_map
[prio
];
935 if (id
== 2) /* Max 2 tcs supported */
941 static int bnxt_re_setup_qos(struct bnxt_re_dev
*rdev
)
947 /* Get priority for roce */
948 prio_map
= bnxt_re_get_priority_mask(rdev
);
950 if (prio_map
== rdev
->cur_prio_map
)
952 rdev
->cur_prio_map
= prio_map
;
953 /* Get cosq id for this priority */
954 rc
= bnxt_re_query_hwrm_pri2cos(rdev
, 0, &cid_map
);
956 dev_warn(rdev_to_dev(rdev
), "no cos for p_mask %x\n", prio_map
);
959 /* Parse CoS IDs for app priority */
960 bnxt_re_parse_cid_map(prio_map
, (u8
*)&cid_map
, rdev
->cosq
);
963 rc
= bnxt_qplib_map_tc2cos(&rdev
->qplib_res
, rdev
->cosq
);
965 dev_warn(rdev_to_dev(rdev
), "no tc for cos{%x, %x}\n",
966 rdev
->cosq
[0], rdev
->cosq
[1]);
970 /* Actual priorities are not programmed as they are already
971 * done by L2 driver; just enable or disable priority vlan tagging
973 if ((prio_map
== 0 && rdev
->qplib_res
.prio
) ||
974 (prio_map
!= 0 && !rdev
->qplib_res
.prio
)) {
975 rdev
->qplib_res
.prio
= prio_map
? true : false;
977 bnxt_re_update_gid(rdev
);
983 static void bnxt_re_ib_unreg(struct bnxt_re_dev
*rdev
, bool lock_wait
)
987 if (test_and_clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED
, &rdev
->flags
)) {
988 for (i
= 0; i
< ARRAY_SIZE(bnxt_re_attributes
); i
++)
989 device_remove_file(&rdev
->ibdev
.dev
,
990 bnxt_re_attributes
[i
]);
992 bnxt_re_unregister_ib(rdev
);
994 if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG
, &rdev
->flags
))
995 cancel_delayed_work(&rdev
->worker
);
997 bnxt_re_cleanup_res(rdev
);
998 bnxt_re_free_res(rdev
, lock_wait
);
1000 if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN
, &rdev
->flags
)) {
1001 rc
= bnxt_qplib_deinit_rcfw(&rdev
->rcfw
);
1003 dev_warn(rdev_to_dev(rdev
),
1004 "Failed to deinitialize RCFW: %#x", rc
);
1005 bnxt_re_net_stats_ctx_free(rdev
, rdev
->qplib_ctx
.stats
.fw_id
,
1007 bnxt_qplib_free_ctx(rdev
->en_dev
->pdev
, &rdev
->qplib_ctx
);
1008 bnxt_qplib_disable_rcfw_channel(&rdev
->rcfw
);
1009 bnxt_re_net_ring_free(rdev
, rdev
->rcfw
.creq_ring_id
, lock_wait
);
1010 bnxt_qplib_free_rcfw_channel(&rdev
->rcfw
);
1012 if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX
, &rdev
->flags
)) {
1013 rc
= bnxt_re_free_msix(rdev
, lock_wait
);
1015 dev_warn(rdev_to_dev(rdev
),
1016 "Failed to free MSI-X vectors: %#x", rc
);
1018 if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED
, &rdev
->flags
)) {
1019 rc
= bnxt_re_unregister_netdev(rdev
, lock_wait
);
1021 dev_warn(rdev_to_dev(rdev
),
1022 "Failed to unregister with netdev: %#x", rc
);
1026 static void bnxt_re_set_resource_limits(struct bnxt_re_dev
*rdev
)
1030 rdev
->qplib_ctx
.qpc_count
= BNXT_RE_MAX_QPC_COUNT
;
1031 rdev
->qplib_ctx
.mrw_count
= BNXT_RE_MAX_MRW_COUNT
;
1032 rdev
->qplib_ctx
.srqc_count
= BNXT_RE_MAX_SRQC_COUNT
;
1033 rdev
->qplib_ctx
.cq_count
= BNXT_RE_MAX_CQ_COUNT
;
1034 for (i
= 0; i
< MAX_TQM_ALLOC_REQ
; i
++)
1035 rdev
->qplib_ctx
.tqm_count
[i
] =
1036 rdev
->dev_attr
.tqm_alloc_reqs
[i
];
1039 /* worker thread for polling periodic events. Now used for QoS programming*/
1040 static void bnxt_re_worker(struct work_struct
*work
)
1042 struct bnxt_re_dev
*rdev
= container_of(work
, struct bnxt_re_dev
,
1045 bnxt_re_setup_qos(rdev
);
1046 schedule_delayed_work(&rdev
->worker
, msecs_to_jiffies(30000));
1049 static int bnxt_re_ib_reg(struct bnxt_re_dev
*rdev
)
1053 /* Registered a new RoCE device instance to netdev */
1054 rc
= bnxt_re_register_netdev(rdev
);
1056 pr_err("Failed to register with netedev: %#x\n", rc
);
1059 set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED
, &rdev
->flags
);
1061 rc
= bnxt_re_request_msix(rdev
);
1063 pr_err("Failed to get MSI-X vectors: %#x\n", rc
);
1067 set_bit(BNXT_RE_FLAG_GOT_MSIX
, &rdev
->flags
);
1069 /* Establish RCFW Communication Channel to initialize the context
1070 * memory for the function and all child VFs
1072 rc
= bnxt_qplib_alloc_rcfw_channel(rdev
->en_dev
->pdev
, &rdev
->rcfw
,
1073 BNXT_RE_MAX_QPC_COUNT
);
1077 rc
= bnxt_re_net_ring_alloc
1078 (rdev
, rdev
->rcfw
.creq
.pbl
[PBL_LVL_0
].pg_map_arr
,
1079 rdev
->rcfw
.creq
.pbl
[rdev
->rcfw
.creq
.level
].pg_count
,
1080 HWRM_RING_ALLOC_CMPL
, BNXT_QPLIB_CREQE_MAX_CNT
- 1,
1081 rdev
->msix_entries
[BNXT_RE_AEQ_IDX
].ring_idx
,
1082 &rdev
->rcfw
.creq_ring_id
);
1084 pr_err("Failed to allocate CREQ: %#x\n", rc
);
1087 rc
= bnxt_qplib_enable_rcfw_channel
1088 (rdev
->en_dev
->pdev
, &rdev
->rcfw
,
1089 rdev
->msix_entries
[BNXT_RE_AEQ_IDX
].vector
,
1090 rdev
->msix_entries
[BNXT_RE_AEQ_IDX
].db_offset
,
1091 0, &bnxt_re_aeq_handler
);
1093 pr_err("Failed to enable RCFW channel: %#x\n", rc
);
1097 rc
= bnxt_qplib_get_dev_attr(&rdev
->rcfw
, &rdev
->dev_attr
);
1100 bnxt_re_set_resource_limits(rdev
);
1102 rc
= bnxt_qplib_alloc_ctx(rdev
->en_dev
->pdev
, &rdev
->qplib_ctx
, 0);
1104 pr_err("Failed to allocate QPLIB context: %#x\n", rc
);
1107 rc
= bnxt_re_net_stats_ctx_alloc(rdev
,
1108 rdev
->qplib_ctx
.stats
.dma_map
,
1109 &rdev
->qplib_ctx
.stats
.fw_id
);
1111 pr_err("Failed to allocate stats context: %#x\n", rc
);
1115 rc
= bnxt_qplib_init_rcfw(&rdev
->rcfw
, &rdev
->qplib_ctx
, 0);
1117 pr_err("Failed to initialize RCFW: %#x\n", rc
);
1120 set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN
, &rdev
->flags
);
1122 /* Resources based on the 'new' device caps */
1123 rc
= bnxt_re_alloc_res(rdev
);
1125 pr_err("Failed to allocate resources: %#x\n", rc
);
1128 rc
= bnxt_re_init_res(rdev
);
1130 pr_err("Failed to initialize resources: %#x\n", rc
);
1134 rc
= bnxt_re_setup_qos(rdev
);
1136 pr_info("RoCE priority not yet configured\n");
1138 INIT_DELAYED_WORK(&rdev
->worker
, bnxt_re_worker
);
1139 set_bit(BNXT_RE_FLAG_QOS_WORK_REG
, &rdev
->flags
);
1140 schedule_delayed_work(&rdev
->worker
, msecs_to_jiffies(30000));
1142 /* Register ib dev */
1143 rc
= bnxt_re_register_ib(rdev
);
1145 pr_err("Failed to register with IB: %#x\n", rc
);
1148 dev_info(rdev_to_dev(rdev
), "Device registered successfully");
1149 for (i
= 0; i
< ARRAY_SIZE(bnxt_re_attributes
); i
++) {
1150 rc
= device_create_file(&rdev
->ibdev
.dev
,
1151 bnxt_re_attributes
[i
]);
1153 dev_err(rdev_to_dev(rdev
),
1154 "Failed to create IB sysfs: %#x", rc
);
1155 /* Must clean up all created device files */
1156 for (j
= 0; j
< i
; j
++)
1157 device_remove_file(&rdev
->ibdev
.dev
,
1158 bnxt_re_attributes
[j
]);
1159 bnxt_re_unregister_ib(rdev
);
1163 set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED
, &rdev
->flags
);
1164 ib_get_eth_speed(&rdev
->ibdev
, 1, &rdev
->active_speed
,
1165 &rdev
->active_width
);
1166 bnxt_re_dispatch_event(&rdev
->ibdev
, NULL
, 1, IB_EVENT_PORT_ACTIVE
);
1167 bnxt_re_dispatch_event(&rdev
->ibdev
, NULL
, 1, IB_EVENT_GID_CHANGE
);
1171 bnxt_re_net_stats_ctx_free(rdev
, rdev
->qplib_ctx
.stats
.fw_id
, true);
1173 bnxt_qplib_free_ctx(rdev
->en_dev
->pdev
, &rdev
->qplib_ctx
);
1175 bnxt_qplib_disable_rcfw_channel(&rdev
->rcfw
);
1177 bnxt_re_net_ring_free(rdev
, rdev
->rcfw
.creq_ring_id
, true);
1179 bnxt_qplib_free_rcfw_channel(&rdev
->rcfw
);
1181 bnxt_re_ib_unreg(rdev
, true);
1185 static void bnxt_re_dev_unreg(struct bnxt_re_dev
*rdev
)
1187 struct bnxt_en_dev
*en_dev
= rdev
->en_dev
;
1188 struct net_device
*netdev
= rdev
->netdev
;
1190 bnxt_re_dev_remove(rdev
);
1193 bnxt_re_dev_unprobe(netdev
, en_dev
);
1196 static int bnxt_re_dev_reg(struct bnxt_re_dev
**rdev
, struct net_device
*netdev
)
1198 struct bnxt_en_dev
*en_dev
;
1201 if (!is_bnxt_re_dev(netdev
))
1204 en_dev
= bnxt_re_dev_probe(netdev
);
1205 if (IS_ERR(en_dev
)) {
1206 if (en_dev
!= ERR_PTR(-ENODEV
))
1207 pr_err("%s: Failed to probe\n", ROCE_DRV_MODULE_NAME
);
1208 rc
= PTR_ERR(en_dev
);
1211 *rdev
= bnxt_re_dev_add(netdev
, en_dev
);
1214 bnxt_re_dev_unprobe(netdev
, en_dev
);
1221 static void bnxt_re_remove_one(struct bnxt_re_dev
*rdev
)
1223 pci_dev_put(rdev
->en_dev
->pdev
);
1226 /* Handle all deferred netevents tasks */
1227 static void bnxt_re_task(struct work_struct
*work
)
1229 struct bnxt_re_work
*re_work
;
1230 struct bnxt_re_dev
*rdev
;
1233 re_work
= container_of(work
, struct bnxt_re_work
, work
);
1234 rdev
= re_work
->rdev
;
1236 if (re_work
->event
!= NETDEV_REGISTER
&&
1237 !test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED
, &rdev
->flags
))
1240 switch (re_work
->event
) {
1241 case NETDEV_REGISTER
:
1242 rc
= bnxt_re_ib_reg(rdev
);
1244 dev_err(rdev_to_dev(rdev
),
1245 "Failed to register with IB: %#x", rc
);
1248 bnxt_re_dispatch_event(&rdev
->ibdev
, NULL
, 1,
1249 IB_EVENT_PORT_ACTIVE
);
1252 bnxt_re_dev_stop(rdev
);
1255 if (!netif_carrier_ok(rdev
->netdev
))
1256 bnxt_re_dev_stop(rdev
);
1257 else if (netif_carrier_ok(rdev
->netdev
))
1258 bnxt_re_dispatch_event(&rdev
->ibdev
, NULL
, 1,
1259 IB_EVENT_PORT_ACTIVE
);
1260 ib_get_eth_speed(&rdev
->ibdev
, 1, &rdev
->active_speed
,
1261 &rdev
->active_width
);
1266 smp_mb__before_atomic();
1267 clear_bit(BNXT_RE_FLAG_TASK_IN_PROG
, &rdev
->flags
);
1271 static void bnxt_re_init_one(struct bnxt_re_dev
*rdev
)
1273 pci_dev_get(rdev
->en_dev
->pdev
);
1277 * "Notifier chain callback can be invoked for the same chain from
1278 * different CPUs at the same time".
1280 * For cases when the netdev is already present, our call to the
1281 * register_netdevice_notifier() will actually get the rtnl_lock()
1282 * before sending NETDEV_REGISTER and (if up) NETDEV_UP
1285 * But for cases when the netdev is not already present, the notifier
1286 * chain is subjected to be invoked from different CPUs simultaneously.
1288 * This is protected by the netdev_mutex.
1290 static int bnxt_re_netdev_event(struct notifier_block
*notifier
,
1291 unsigned long event
, void *ptr
)
1293 struct net_device
*real_dev
, *netdev
= netdev_notifier_info_to_dev(ptr
);
1294 struct bnxt_re_work
*re_work
;
1295 struct bnxt_re_dev
*rdev
;
1297 bool sch_work
= false;
1299 real_dev
= rdma_vlan_dev_real_dev(netdev
);
1303 rdev
= bnxt_re_from_netdev(real_dev
);
1304 if (!rdev
&& event
!= NETDEV_REGISTER
)
1306 if (real_dev
!= netdev
)
1310 case NETDEV_REGISTER
:
1313 rc
= bnxt_re_dev_reg(&rdev
, real_dev
);
1317 pr_err("Failed to register with the device %s: %#x\n",
1318 real_dev
->name
, rc
);
1321 bnxt_re_init_one(rdev
);
1325 case NETDEV_UNREGISTER
:
1326 /* netdev notifier will call NETDEV_UNREGISTER again later since
1327 * we are still holding the reference to the netdev
1329 if (test_bit(BNXT_RE_FLAG_TASK_IN_PROG
, &rdev
->flags
))
1331 bnxt_re_ib_unreg(rdev
, false);
1332 bnxt_re_remove_one(rdev
);
1333 bnxt_re_dev_unreg(rdev
);
1341 /* Allocate for the deferred task */
1342 re_work
= kzalloc(sizeof(*re_work
), GFP_ATOMIC
);
1344 re_work
->rdev
= rdev
;
1345 re_work
->event
= event
;
1346 re_work
->vlan_dev
= (real_dev
== netdev
?
1348 INIT_WORK(&re_work
->work
, bnxt_re_task
);
1349 set_bit(BNXT_RE_FLAG_TASK_IN_PROG
, &rdev
->flags
);
1350 queue_work(bnxt_re_wq
, &re_work
->work
);
1358 static struct notifier_block bnxt_re_netdev_notifier
= {
1359 .notifier_call
= bnxt_re_netdev_event
1362 static int __init
bnxt_re_mod_init(void)
1366 pr_info("%s: %s", ROCE_DRV_MODULE_NAME
, version
);
1368 bnxt_re_wq
= create_singlethread_workqueue("bnxt_re");
1372 INIT_LIST_HEAD(&bnxt_re_dev_list
);
1374 rc
= register_netdevice_notifier(&bnxt_re_netdev_notifier
);
1376 pr_err("%s: Cannot register to netdevice_notifier",
1377 ROCE_DRV_MODULE_NAME
);
1383 destroy_workqueue(bnxt_re_wq
);
1388 static void __exit
bnxt_re_mod_exit(void)
1390 struct bnxt_re_dev
*rdev
;
1391 LIST_HEAD(to_be_deleted
);
1393 mutex_lock(&bnxt_re_dev_lock
);
1394 /* Free all adapter allocated resources */
1395 if (!list_empty(&bnxt_re_dev_list
))
1396 list_splice_init(&bnxt_re_dev_list
, &to_be_deleted
);
1397 mutex_unlock(&bnxt_re_dev_lock
);
1399 list_for_each_entry(rdev
, &to_be_deleted
, list
) {
1400 dev_info(rdev_to_dev(rdev
), "Unregistering Device");
1401 bnxt_re_dev_stop(rdev
);
1402 bnxt_re_ib_unreg(rdev
, true);
1403 bnxt_re_remove_one(rdev
);
1404 bnxt_re_dev_unreg(rdev
);
1406 unregister_netdevice_notifier(&bnxt_re_netdev_notifier
);
1408 destroy_workqueue(bnxt_re_wq
);
1411 module_init(bnxt_re_mod_init
);
1412 module_exit(bnxt_re_mod_exit
);