1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2016-2018 Broadcom Limited
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
10 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/interrupt.h>
15 #include <linux/pci.h>
16 #include <linux/netdevice.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/bitops.h>
19 #include <linux/irq.h>
20 #include <asm/byteorder.h>
21 #include <linux/bitmap.h>
27 static int bnxt_register_dev(struct bnxt_en_dev
*edev
, int ulp_id
,
28 struct bnxt_ulp_ops
*ulp_ops
, void *handle
)
30 struct net_device
*dev
= edev
->net
;
31 struct bnxt
*bp
= netdev_priv(dev
);
35 if (ulp_id
>= BNXT_MAX_ULP
)
38 ulp
= &edev
->ulp_tbl
[ulp_id
];
39 if (rcu_access_pointer(ulp
->ulp_ops
)) {
40 netdev_err(bp
->dev
, "ulp id %d already registered\n", ulp_id
);
43 if (ulp_id
== BNXT_ROCE_ULP
) {
44 unsigned int max_stat_ctxs
;
46 max_stat_ctxs
= bnxt_get_max_func_stat_ctxs(bp
);
47 if (max_stat_ctxs
<= BNXT_MIN_ROCE_STAT_CTXS
||
48 bp
->cp_nr_rings
== max_stat_ctxs
)
52 atomic_set(&ulp
->ref_count
, 0);
54 rcu_assign_pointer(ulp
->ulp_ops
, ulp_ops
);
56 if (ulp_id
== BNXT_ROCE_ULP
) {
57 if (test_bit(BNXT_STATE_OPEN
, &bp
->state
))
58 bnxt_hwrm_vnic_cfg(bp
, 0);
64 static int bnxt_unregister_dev(struct bnxt_en_dev
*edev
, int ulp_id
)
66 struct net_device
*dev
= edev
->net
;
67 struct bnxt
*bp
= netdev_priv(dev
);
72 if (ulp_id
>= BNXT_MAX_ULP
)
75 ulp
= &edev
->ulp_tbl
[ulp_id
];
76 if (!rcu_access_pointer(ulp
->ulp_ops
)) {
77 netdev_err(bp
->dev
, "ulp id %d not registered\n", ulp_id
);
80 if (ulp_id
== BNXT_ROCE_ULP
&& ulp
->msix_requested
)
81 edev
->en_ops
->bnxt_free_msix(edev
, ulp_id
);
83 if (ulp
->max_async_event_id
)
84 bnxt_hwrm_func_rgtr_async_events(bp
, NULL
, 0);
86 RCU_INIT_POINTER(ulp
->ulp_ops
, NULL
);
88 ulp
->max_async_event_id
= 0;
89 ulp
->async_events_bmap
= NULL
;
90 while (atomic_read(&ulp
->ref_count
) != 0 && i
< 10) {
97 static void bnxt_fill_msix_vecs(struct bnxt
*bp
, struct bnxt_msix_entry
*ent
)
99 struct bnxt_en_dev
*edev
= bp
->edev
;
100 int num_msix
, idx
, i
;
102 num_msix
= edev
->ulp_tbl
[BNXT_ROCE_ULP
].msix_requested
;
103 idx
= edev
->ulp_tbl
[BNXT_ROCE_ULP
].msix_base
;
104 for (i
= 0; i
< num_msix
; i
++) {
105 ent
[i
].vector
= bp
->irq_tbl
[idx
+ i
].vector
;
106 ent
[i
].ring_idx
= idx
+ i
;
107 ent
[i
].db_offset
= (idx
+ i
) * 0x80;
111 static int bnxt_req_msix_vecs(struct bnxt_en_dev
*edev
, int ulp_id
,
112 struct bnxt_msix_entry
*ent
, int num_msix
)
114 struct net_device
*dev
= edev
->net
;
115 struct bnxt
*bp
= netdev_priv(dev
);
116 int max_idx
, max_cp_rings
;
121 if (ulp_id
!= BNXT_ROCE_ULP
)
124 if (!(bp
->flags
& BNXT_FLAG_USING_MSIX
))
127 if (edev
->ulp_tbl
[ulp_id
].msix_requested
)
130 max_cp_rings
= bnxt_get_max_func_cp_rings(bp
);
131 avail_msix
= bnxt_get_avail_msix(bp
, num_msix
);
134 if (avail_msix
> num_msix
)
135 avail_msix
= num_msix
;
137 if (BNXT_NEW_RM(bp
)) {
138 idx
= bp
->cp_nr_rings
;
140 max_idx
= min_t(int, bp
->total_irqs
, max_cp_rings
);
141 idx
= max_idx
- avail_msix
;
143 edev
->ulp_tbl
[ulp_id
].msix_base
= idx
;
144 edev
->ulp_tbl
[ulp_id
].msix_requested
= avail_msix
;
145 if (bp
->total_irqs
< (idx
+ avail_msix
)) {
146 if (netif_running(dev
)) {
147 bnxt_close_nic(bp
, true, false);
148 rc
= bnxt_open_nic(bp
, true, false);
150 rc
= bnxt_reserve_rings(bp
, true);
154 edev
->ulp_tbl
[ulp_id
].msix_requested
= 0;
158 if (BNXT_NEW_RM(bp
)) {
159 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
162 resv_msix
= hw_resc
->resv_irqs
- bp
->cp_nr_rings
;
163 avail_msix
= min_t(int, resv_msix
, avail_msix
);
164 edev
->ulp_tbl
[ulp_id
].msix_requested
= avail_msix
;
166 bnxt_fill_msix_vecs(bp
, ent
);
167 edev
->flags
|= BNXT_EN_FLAG_MSIX_REQUESTED
;
171 static int bnxt_free_msix_vecs(struct bnxt_en_dev
*edev
, int ulp_id
)
173 struct net_device
*dev
= edev
->net
;
174 struct bnxt
*bp
= netdev_priv(dev
);
177 if (ulp_id
!= BNXT_ROCE_ULP
)
180 if (!(edev
->flags
& BNXT_EN_FLAG_MSIX_REQUESTED
))
183 edev
->ulp_tbl
[ulp_id
].msix_requested
= 0;
184 edev
->flags
&= ~BNXT_EN_FLAG_MSIX_REQUESTED
;
185 if (netif_running(dev
) && !(edev
->flags
& BNXT_EN_FLAG_ULP_STOPPED
)) {
186 bnxt_close_nic(bp
, true, false);
187 bnxt_open_nic(bp
, true, false);
192 int bnxt_get_ulp_msix_num(struct bnxt
*bp
)
194 if (bnxt_ulp_registered(bp
->edev
, BNXT_ROCE_ULP
)) {
195 struct bnxt_en_dev
*edev
= bp
->edev
;
197 return edev
->ulp_tbl
[BNXT_ROCE_ULP
].msix_requested
;
202 int bnxt_get_ulp_msix_base(struct bnxt
*bp
)
204 if (bnxt_ulp_registered(bp
->edev
, BNXT_ROCE_ULP
)) {
205 struct bnxt_en_dev
*edev
= bp
->edev
;
207 if (edev
->ulp_tbl
[BNXT_ROCE_ULP
].msix_requested
)
208 return edev
->ulp_tbl
[BNXT_ROCE_ULP
].msix_base
;
213 int bnxt_get_ulp_stat_ctxs(struct bnxt
*bp
)
215 if (bnxt_ulp_registered(bp
->edev
, BNXT_ROCE_ULP
))
216 return BNXT_MIN_ROCE_STAT_CTXS
;
221 static int bnxt_send_msg(struct bnxt_en_dev
*edev
, int ulp_id
,
222 struct bnxt_fw_msg
*fw_msg
)
224 struct net_device
*dev
= edev
->net
;
225 struct bnxt
*bp
= netdev_priv(dev
);
229 if (ulp_id
!= BNXT_ROCE_ULP
&& bp
->fw_reset_state
)
232 mutex_lock(&bp
->hwrm_cmd_lock
);
234 req
->resp_addr
= cpu_to_le64(bp
->hwrm_cmd_resp_dma_addr
);
235 rc
= _hwrm_send_message(bp
, fw_msg
->msg
, fw_msg
->msg_len
,
238 struct output
*resp
= bp
->hwrm_cmd_resp_addr
;
239 u32 len
= le16_to_cpu(resp
->resp_len
);
241 if (fw_msg
->resp_max_len
< len
)
242 len
= fw_msg
->resp_max_len
;
244 memcpy(fw_msg
->resp
, resp
, len
);
246 mutex_unlock(&bp
->hwrm_cmd_lock
);
250 static void bnxt_ulp_get(struct bnxt_ulp
*ulp
)
252 atomic_inc(&ulp
->ref_count
);
255 static void bnxt_ulp_put(struct bnxt_ulp
*ulp
)
257 atomic_dec(&ulp
->ref_count
);
260 void bnxt_ulp_stop(struct bnxt
*bp
)
262 struct bnxt_en_dev
*edev
= bp
->edev
;
263 struct bnxt_ulp_ops
*ops
;
269 edev
->flags
|= BNXT_EN_FLAG_ULP_STOPPED
;
270 for (i
= 0; i
< BNXT_MAX_ULP
; i
++) {
271 struct bnxt_ulp
*ulp
= &edev
->ulp_tbl
[i
];
273 ops
= rtnl_dereference(ulp
->ulp_ops
);
274 if (!ops
|| !ops
->ulp_stop
)
276 ops
->ulp_stop(ulp
->handle
);
280 void bnxt_ulp_start(struct bnxt
*bp
, int err
)
282 struct bnxt_en_dev
*edev
= bp
->edev
;
283 struct bnxt_ulp_ops
*ops
;
289 edev
->flags
&= ~BNXT_EN_FLAG_ULP_STOPPED
;
294 for (i
= 0; i
< BNXT_MAX_ULP
; i
++) {
295 struct bnxt_ulp
*ulp
= &edev
->ulp_tbl
[i
];
297 ops
= rtnl_dereference(ulp
->ulp_ops
);
298 if (!ops
|| !ops
->ulp_start
)
300 ops
->ulp_start(ulp
->handle
);
304 void bnxt_ulp_sriov_cfg(struct bnxt
*bp
, int num_vfs
)
306 struct bnxt_en_dev
*edev
= bp
->edev
;
307 struct bnxt_ulp_ops
*ops
;
313 for (i
= 0; i
< BNXT_MAX_ULP
; i
++) {
314 struct bnxt_ulp
*ulp
= &edev
->ulp_tbl
[i
];
317 ops
= rcu_dereference(ulp
->ulp_ops
);
318 if (!ops
|| !ops
->ulp_sriov_config
) {
324 ops
->ulp_sriov_config(ulp
->handle
, num_vfs
);
329 void bnxt_ulp_shutdown(struct bnxt
*bp
)
331 struct bnxt_en_dev
*edev
= bp
->edev
;
332 struct bnxt_ulp_ops
*ops
;
338 for (i
= 0; i
< BNXT_MAX_ULP
; i
++) {
339 struct bnxt_ulp
*ulp
= &edev
->ulp_tbl
[i
];
341 ops
= rtnl_dereference(ulp
->ulp_ops
);
342 if (!ops
|| !ops
->ulp_shutdown
)
344 ops
->ulp_shutdown(ulp
->handle
);
348 void bnxt_ulp_irq_stop(struct bnxt
*bp
)
350 struct bnxt_en_dev
*edev
= bp
->edev
;
351 struct bnxt_ulp_ops
*ops
;
353 if (!edev
|| !(edev
->flags
& BNXT_EN_FLAG_MSIX_REQUESTED
))
356 if (bnxt_ulp_registered(bp
->edev
, BNXT_ROCE_ULP
)) {
357 struct bnxt_ulp
*ulp
= &edev
->ulp_tbl
[BNXT_ROCE_ULP
];
359 if (!ulp
->msix_requested
)
362 ops
= rtnl_dereference(ulp
->ulp_ops
);
363 if (!ops
|| !ops
->ulp_irq_stop
)
365 ops
->ulp_irq_stop(ulp
->handle
);
369 void bnxt_ulp_irq_restart(struct bnxt
*bp
, int err
)
371 struct bnxt_en_dev
*edev
= bp
->edev
;
372 struct bnxt_ulp_ops
*ops
;
374 if (!edev
|| !(edev
->flags
& BNXT_EN_FLAG_MSIX_REQUESTED
))
377 if (bnxt_ulp_registered(bp
->edev
, BNXT_ROCE_ULP
)) {
378 struct bnxt_ulp
*ulp
= &edev
->ulp_tbl
[BNXT_ROCE_ULP
];
379 struct bnxt_msix_entry
*ent
= NULL
;
381 if (!ulp
->msix_requested
)
384 ops
= rtnl_dereference(ulp
->ulp_ops
);
385 if (!ops
|| !ops
->ulp_irq_restart
)
389 ent
= kcalloc(ulp
->msix_requested
, sizeof(*ent
),
393 bnxt_fill_msix_vecs(bp
, ent
);
395 ops
->ulp_irq_restart(ulp
->handle
, ent
);
400 void bnxt_ulp_async_events(struct bnxt
*bp
, struct hwrm_async_event_cmpl
*cmpl
)
402 u16 event_id
= le16_to_cpu(cmpl
->event_id
);
403 struct bnxt_en_dev
*edev
= bp
->edev
;
404 struct bnxt_ulp_ops
*ops
;
411 for (i
= 0; i
< BNXT_MAX_ULP
; i
++) {
412 struct bnxt_ulp
*ulp
= &edev
->ulp_tbl
[i
];
414 ops
= rcu_dereference(ulp
->ulp_ops
);
415 if (!ops
|| !ops
->ulp_async_notifier
)
417 if (!ulp
->async_events_bmap
||
418 event_id
> ulp
->max_async_event_id
)
421 /* Read max_async_event_id first before testing the bitmap. */
423 if (test_bit(event_id
, ulp
->async_events_bmap
))
424 ops
->ulp_async_notifier(ulp
->handle
, cmpl
);
429 static int bnxt_register_async_events(struct bnxt_en_dev
*edev
, int ulp_id
,
430 unsigned long *events_bmap
, u16 max_id
)
432 struct net_device
*dev
= edev
->net
;
433 struct bnxt
*bp
= netdev_priv(dev
);
434 struct bnxt_ulp
*ulp
;
436 if (ulp_id
>= BNXT_MAX_ULP
)
439 ulp
= &edev
->ulp_tbl
[ulp_id
];
440 ulp
->async_events_bmap
= events_bmap
;
441 /* Make sure bnxt_ulp_async_events() sees this order */
443 ulp
->max_async_event_id
= max_id
;
444 bnxt_hwrm_func_rgtr_async_events(bp
, events_bmap
, max_id
+ 1);
448 static const struct bnxt_en_ops bnxt_en_ops_tbl
= {
449 .bnxt_register_device
= bnxt_register_dev
,
450 .bnxt_unregister_device
= bnxt_unregister_dev
,
451 .bnxt_request_msix
= bnxt_req_msix_vecs
,
452 .bnxt_free_msix
= bnxt_free_msix_vecs
,
453 .bnxt_send_fw_msg
= bnxt_send_msg
,
454 .bnxt_register_fw_async_events
= bnxt_register_async_events
,
457 struct bnxt_en_dev
*bnxt_ulp_probe(struct net_device
*dev
)
459 struct bnxt
*bp
= netdev_priv(dev
);
460 struct bnxt_en_dev
*edev
;
464 edev
= kzalloc(sizeof(*edev
), GFP_KERNEL
);
466 return ERR_PTR(-ENOMEM
);
467 edev
->en_ops
= &bnxt_en_ops_tbl
;
468 if (bp
->flags
& BNXT_FLAG_ROCEV1_CAP
)
469 edev
->flags
|= BNXT_EN_FLAG_ROCEV1_CAP
;
470 if (bp
->flags
& BNXT_FLAG_ROCEV2_CAP
)
471 edev
->flags
|= BNXT_EN_FLAG_ROCEV2_CAP
;
473 edev
->pdev
= bp
->pdev
;