1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2017 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
11 #include <linux/module.h>
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/rtc.h>
37 #include <linux/bpf.h>
41 #include <net/checksum.h>
42 #include <net/ip6_checksum.h>
43 #include <net/udp_tunnel.h>
44 #include <linux/workqueue.h>
45 #include <linux/prefetch.h>
46 #include <linux/cache.h>
47 #include <linux/log2.h>
48 #include <linux/aer.h>
49 #include <linux/bitmap.h>
50 #include <linux/cpu_rmap.h>
55 #include "bnxt_sriov.h"
56 #include "bnxt_ethtool.h"
60 #define BNXT_TX_TIMEOUT (5 * HZ)
62 static const char version
[] =
63 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME
" v" DRV_MODULE_VERSION
"\n";
65 MODULE_LICENSE("GPL");
66 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
67 MODULE_VERSION(DRV_MODULE_VERSION
);
69 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
70 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
71 #define BNXT_RX_COPY_THRESH 256
73 #define BNXT_TX_PUSH_THRESH 164
108 /* indexed by enum above */
109 static const struct {
112 { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
113 { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
114 { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
115 { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
116 { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
117 { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
118 { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
119 { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
120 { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
121 { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
122 { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
123 { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
124 { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
125 { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
126 { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
127 { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
128 { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
129 { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
130 { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
131 { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
132 { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
133 { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
134 { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
135 { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
136 { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
137 { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
138 { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
139 { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
140 { "Broadcom NetXtreme-E Ethernet Virtual Function" },
141 { "Broadcom NetXtreme-C Ethernet Virtual Function" },
144 static const struct pci_device_id bnxt_pci_tbl
[] = {
145 { PCI_VDEVICE(BROADCOM
, 0x16c0), .driver_data
= BCM57417_NPAR
},
146 { PCI_VDEVICE(BROADCOM
, 0x16c8), .driver_data
= BCM57301
},
147 { PCI_VDEVICE(BROADCOM
, 0x16c9), .driver_data
= BCM57302
},
148 { PCI_VDEVICE(BROADCOM
, 0x16ca), .driver_data
= BCM57304
},
149 { PCI_VDEVICE(BROADCOM
, 0x16cc), .driver_data
= BCM57417_NPAR
},
150 { PCI_VDEVICE(BROADCOM
, 0x16cd), .driver_data
= BCM58700
},
151 { PCI_VDEVICE(BROADCOM
, 0x16ce), .driver_data
= BCM57311
},
152 { PCI_VDEVICE(BROADCOM
, 0x16cf), .driver_data
= BCM57312
},
153 { PCI_VDEVICE(BROADCOM
, 0x16d0), .driver_data
= BCM57402
},
154 { PCI_VDEVICE(BROADCOM
, 0x16d1), .driver_data
= BCM57404
},
155 { PCI_VDEVICE(BROADCOM
, 0x16d2), .driver_data
= BCM57406
},
156 { PCI_VDEVICE(BROADCOM
, 0x16d4), .driver_data
= BCM57402_NPAR
},
157 { PCI_VDEVICE(BROADCOM
, 0x16d5), .driver_data
= BCM57407
},
158 { PCI_VDEVICE(BROADCOM
, 0x16d6), .driver_data
= BCM57412
},
159 { PCI_VDEVICE(BROADCOM
, 0x16d7), .driver_data
= BCM57414
},
160 { PCI_VDEVICE(BROADCOM
, 0x16d8), .driver_data
= BCM57416
},
161 { PCI_VDEVICE(BROADCOM
, 0x16d9), .driver_data
= BCM57417
},
162 { PCI_VDEVICE(BROADCOM
, 0x16de), .driver_data
= BCM57412_NPAR
},
163 { PCI_VDEVICE(BROADCOM
, 0x16df), .driver_data
= BCM57314
},
164 { PCI_VDEVICE(BROADCOM
, 0x16e2), .driver_data
= BCM57417_SFP
},
165 { PCI_VDEVICE(BROADCOM
, 0x16e3), .driver_data
= BCM57416_SFP
},
166 { PCI_VDEVICE(BROADCOM
, 0x16e7), .driver_data
= BCM57404_NPAR
},
167 { PCI_VDEVICE(BROADCOM
, 0x16e8), .driver_data
= BCM57406_NPAR
},
168 { PCI_VDEVICE(BROADCOM
, 0x16e9), .driver_data
= BCM57407_SFP
},
169 { PCI_VDEVICE(BROADCOM
, 0x16ea), .driver_data
= BCM57407_NPAR
},
170 { PCI_VDEVICE(BROADCOM
, 0x16eb), .driver_data
= BCM57412_NPAR
},
171 { PCI_VDEVICE(BROADCOM
, 0x16ec), .driver_data
= BCM57414_NPAR
},
172 { PCI_VDEVICE(BROADCOM
, 0x16ed), .driver_data
= BCM57414_NPAR
},
173 { PCI_VDEVICE(BROADCOM
, 0x16ee), .driver_data
= BCM57416_NPAR
},
174 { PCI_VDEVICE(BROADCOM
, 0x16ef), .driver_data
= BCM57416_NPAR
},
175 { PCI_VDEVICE(BROADCOM
, 0x16f1), .driver_data
= BCM57452
},
176 { PCI_VDEVICE(BROADCOM
, 0x1614), .driver_data
= BCM57454
},
177 #ifdef CONFIG_BNXT_SRIOV
178 { PCI_VDEVICE(BROADCOM
, 0x16c1), .driver_data
= NETXTREME_E_VF
},
179 { PCI_VDEVICE(BROADCOM
, 0x16cb), .driver_data
= NETXTREME_C_VF
},
180 { PCI_VDEVICE(BROADCOM
, 0x16d3), .driver_data
= NETXTREME_E_VF
},
181 { PCI_VDEVICE(BROADCOM
, 0x16dc), .driver_data
= NETXTREME_E_VF
},
182 { PCI_VDEVICE(BROADCOM
, 0x16e1), .driver_data
= NETXTREME_C_VF
},
183 { PCI_VDEVICE(BROADCOM
, 0x16e5), .driver_data
= NETXTREME_C_VF
},
188 MODULE_DEVICE_TABLE(pci
, bnxt_pci_tbl
);
190 static const u16 bnxt_vf_req_snif
[] = {
193 HWRM_CFA_L2_FILTER_ALLOC
,
196 static const u16 bnxt_async_events_arr
[] = {
197 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE
,
198 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD
,
199 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED
,
200 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE
,
201 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE
,
204 static bool bnxt_vf_pciid(enum board_idx idx
)
206 return (idx
== NETXTREME_C_VF
|| idx
== NETXTREME_E_VF
);
209 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
210 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
211 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
213 #define BNXT_CP_DB_REARM(db, raw_cons) \
214 writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
216 #define BNXT_CP_DB(db, raw_cons) \
217 writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
219 #define BNXT_CP_DB_IRQ_DIS(db) \
220 writel(DB_CP_IRQ_DIS_FLAGS, db)
222 const u16 bnxt_lhint_arr
[] = {
223 TX_BD_FLAGS_LHINT_512_AND_SMALLER
,
224 TX_BD_FLAGS_LHINT_512_TO_1023
,
225 TX_BD_FLAGS_LHINT_1024_TO_2047
,
226 TX_BD_FLAGS_LHINT_1024_TO_2047
,
227 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
228 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
229 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
230 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
231 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
232 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
233 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
234 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
235 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
236 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
237 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
238 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
239 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
240 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
241 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
244 static netdev_tx_t
bnxt_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
246 struct bnxt
*bp
= netdev_priv(dev
);
248 struct tx_bd_ext
*txbd1
;
249 struct netdev_queue
*txq
;
252 unsigned int length
, pad
= 0;
253 u32 len
, free_size
, vlan_tag_flags
, cfa_action
, flags
;
255 struct pci_dev
*pdev
= bp
->pdev
;
256 struct bnxt_tx_ring_info
*txr
;
257 struct bnxt_sw_tx_bd
*tx_buf
;
259 i
= skb_get_queue_mapping(skb
);
260 if (unlikely(i
>= bp
->tx_nr_rings
)) {
261 dev_kfree_skb_any(skb
);
265 txq
= netdev_get_tx_queue(dev
, i
);
266 txr
= &bp
->tx_ring
[bp
->tx_ring_map
[i
]];
269 free_size
= bnxt_tx_avail(bp
, txr
);
270 if (unlikely(free_size
< skb_shinfo(skb
)->nr_frags
+ 2)) {
271 netif_tx_stop_queue(txq
);
272 return NETDEV_TX_BUSY
;
276 len
= skb_headlen(skb
);
277 last_frag
= skb_shinfo(skb
)->nr_frags
;
279 txbd
= &txr
->tx_desc_ring
[TX_RING(prod
)][TX_IDX(prod
)];
281 txbd
->tx_bd_opaque
= prod
;
283 tx_buf
= &txr
->tx_buf_ring
[prod
];
285 tx_buf
->nr_frags
= last_frag
;
289 if (skb_vlan_tag_present(skb
)) {
290 vlan_tag_flags
= TX_BD_CFA_META_KEY_VLAN
|
291 skb_vlan_tag_get(skb
);
292 /* Currently supports 8021Q, 8021AD vlan offloads
293 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
295 if (skb
->vlan_proto
== htons(ETH_P_8021Q
))
296 vlan_tag_flags
|= 1 << TX_BD_CFA_META_TPID_SHIFT
;
299 if (free_size
== bp
->tx_ring_size
&& length
<= bp
->tx_push_thresh
) {
300 struct tx_push_buffer
*tx_push_buf
= txr
->tx_push
;
301 struct tx_push_bd
*tx_push
= &tx_push_buf
->push_bd
;
302 struct tx_bd_ext
*tx_push1
= &tx_push
->txbd2
;
303 void *pdata
= tx_push_buf
->data
;
307 /* Set COAL_NOW to be ready quickly for the next push */
308 tx_push
->tx_bd_len_flags_type
=
309 cpu_to_le32((length
<< TX_BD_LEN_SHIFT
) |
310 TX_BD_TYPE_LONG_TX_BD
|
311 TX_BD_FLAGS_LHINT_512_AND_SMALLER
|
312 TX_BD_FLAGS_COAL_NOW
|
313 TX_BD_FLAGS_PACKET_END
|
314 (2 << TX_BD_FLAGS_BD_CNT_SHIFT
));
316 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
317 tx_push1
->tx_bd_hsize_lflags
=
318 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM
);
320 tx_push1
->tx_bd_hsize_lflags
= 0;
322 tx_push1
->tx_bd_cfa_meta
= cpu_to_le32(vlan_tag_flags
);
323 tx_push1
->tx_bd_cfa_action
= cpu_to_le32(cfa_action
);
325 end
= pdata
+ length
;
326 end
= PTR_ALIGN(end
, 8) - 1;
329 skb_copy_from_linear_data(skb
, pdata
, len
);
331 for (j
= 0; j
< last_frag
; j
++) {
332 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[j
];
335 fptr
= skb_frag_address_safe(frag
);
339 memcpy(pdata
, fptr
, skb_frag_size(frag
));
340 pdata
+= skb_frag_size(frag
);
343 txbd
->tx_bd_len_flags_type
= tx_push
->tx_bd_len_flags_type
;
344 txbd
->tx_bd_haddr
= txr
->data_mapping
;
345 prod
= NEXT_TX(prod
);
346 txbd
= &txr
->tx_desc_ring
[TX_RING(prod
)][TX_IDX(prod
)];
347 memcpy(txbd
, tx_push1
, sizeof(*txbd
));
348 prod
= NEXT_TX(prod
);
350 cpu_to_le32(DB_KEY_TX_PUSH
| DB_LONG_TX_PUSH
| prod
);
354 netdev_tx_sent_queue(txq
, skb
->len
);
355 wmb(); /* Sync is_push and byte queue before pushing data */
357 push_len
= (length
+ sizeof(*tx_push
) + 7) / 8;
359 __iowrite64_copy(txr
->tx_doorbell
, tx_push_buf
, 16);
360 __iowrite32_copy(txr
->tx_doorbell
+ 4, tx_push_buf
+ 1,
361 (push_len
- 16) << 1);
363 __iowrite64_copy(txr
->tx_doorbell
, tx_push_buf
,
371 if (length
< BNXT_MIN_PKT_SIZE
) {
372 pad
= BNXT_MIN_PKT_SIZE
- length
;
373 if (skb_pad(skb
, pad
)) {
374 /* SKB already freed. */
378 length
= BNXT_MIN_PKT_SIZE
;
381 mapping
= dma_map_single(&pdev
->dev
, skb
->data
, len
, DMA_TO_DEVICE
);
383 if (unlikely(dma_mapping_error(&pdev
->dev
, mapping
))) {
384 dev_kfree_skb_any(skb
);
389 dma_unmap_addr_set(tx_buf
, mapping
, mapping
);
390 flags
= (len
<< TX_BD_LEN_SHIFT
) | TX_BD_TYPE_LONG_TX_BD
|
391 ((last_frag
+ 2) << TX_BD_FLAGS_BD_CNT_SHIFT
);
393 txbd
->tx_bd_haddr
= cpu_to_le64(mapping
);
395 prod
= NEXT_TX(prod
);
396 txbd1
= (struct tx_bd_ext
*)
397 &txr
->tx_desc_ring
[TX_RING(prod
)][TX_IDX(prod
)];
399 txbd1
->tx_bd_hsize_lflags
= 0;
400 if (skb_is_gso(skb
)) {
403 if (skb
->encapsulation
)
404 hdr_len
= skb_inner_network_offset(skb
) +
405 skb_inner_network_header_len(skb
) +
406 inner_tcp_hdrlen(skb
);
408 hdr_len
= skb_transport_offset(skb
) +
411 txbd1
->tx_bd_hsize_lflags
= cpu_to_le32(TX_BD_FLAGS_LSO
|
413 (hdr_len
<< (TX_BD_HSIZE_SHIFT
- 1)));
414 length
= skb_shinfo(skb
)->gso_size
;
415 txbd1
->tx_bd_mss
= cpu_to_le32(length
);
417 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
418 txbd1
->tx_bd_hsize_lflags
=
419 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM
);
420 txbd1
->tx_bd_mss
= 0;
424 flags
|= bnxt_lhint_arr
[length
];
425 txbd
->tx_bd_len_flags_type
= cpu_to_le32(flags
);
427 txbd1
->tx_bd_cfa_meta
= cpu_to_le32(vlan_tag_flags
);
428 txbd1
->tx_bd_cfa_action
= cpu_to_le32(cfa_action
);
429 for (i
= 0; i
< last_frag
; i
++) {
430 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
432 prod
= NEXT_TX(prod
);
433 txbd
= &txr
->tx_desc_ring
[TX_RING(prod
)][TX_IDX(prod
)];
435 len
= skb_frag_size(frag
);
436 mapping
= skb_frag_dma_map(&pdev
->dev
, frag
, 0, len
,
439 if (unlikely(dma_mapping_error(&pdev
->dev
, mapping
)))
442 tx_buf
= &txr
->tx_buf_ring
[prod
];
443 dma_unmap_addr_set(tx_buf
, mapping
, mapping
);
445 txbd
->tx_bd_haddr
= cpu_to_le64(mapping
);
447 flags
= len
<< TX_BD_LEN_SHIFT
;
448 txbd
->tx_bd_len_flags_type
= cpu_to_le32(flags
);
452 txbd
->tx_bd_len_flags_type
=
453 cpu_to_le32(((len
+ pad
) << TX_BD_LEN_SHIFT
) | flags
|
454 TX_BD_FLAGS_PACKET_END
);
456 netdev_tx_sent_queue(txq
, skb
->len
);
458 /* Sync BD data before updating doorbell */
461 prod
= NEXT_TX(prod
);
464 writel(DB_KEY_TX
| prod
, txr
->tx_doorbell
);
465 writel(DB_KEY_TX
| prod
, txr
->tx_doorbell
);
471 if (unlikely(bnxt_tx_avail(bp
, txr
) <= MAX_SKB_FRAGS
+ 1)) {
472 netif_tx_stop_queue(txq
);
474 /* netif_tx_stop_queue() must be done before checking
475 * tx index in bnxt_tx_avail() below, because in
476 * bnxt_tx_int(), we update tx index before checking for
477 * netif_tx_queue_stopped().
480 if (bnxt_tx_avail(bp
, txr
) > bp
->tx_wake_thresh
)
481 netif_tx_wake_queue(txq
);
488 /* start back at beginning and unmap skb */
490 tx_buf
= &txr
->tx_buf_ring
[prod
];
492 dma_unmap_single(&pdev
->dev
, dma_unmap_addr(tx_buf
, mapping
),
493 skb_headlen(skb
), PCI_DMA_TODEVICE
);
494 prod
= NEXT_TX(prod
);
496 /* unmap remaining mapped pages */
497 for (i
= 0; i
< last_frag
; i
++) {
498 prod
= NEXT_TX(prod
);
499 tx_buf
= &txr
->tx_buf_ring
[prod
];
500 dma_unmap_page(&pdev
->dev
, dma_unmap_addr(tx_buf
, mapping
),
501 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
505 dev_kfree_skb_any(skb
);
509 static void bnxt_tx_int(struct bnxt
*bp
, struct bnxt_napi
*bnapi
, int nr_pkts
)
511 struct bnxt_tx_ring_info
*txr
= bnapi
->tx_ring
;
512 struct netdev_queue
*txq
= netdev_get_tx_queue(bp
->dev
, txr
->txq_index
);
513 u16 cons
= txr
->tx_cons
;
514 struct pci_dev
*pdev
= bp
->pdev
;
516 unsigned int tx_bytes
= 0;
518 for (i
= 0; i
< nr_pkts
; i
++) {
519 struct bnxt_sw_tx_bd
*tx_buf
;
523 tx_buf
= &txr
->tx_buf_ring
[cons
];
524 cons
= NEXT_TX(cons
);
528 if (tx_buf
->is_push
) {
533 dma_unmap_single(&pdev
->dev
, dma_unmap_addr(tx_buf
, mapping
),
534 skb_headlen(skb
), PCI_DMA_TODEVICE
);
535 last
= tx_buf
->nr_frags
;
537 for (j
= 0; j
< last
; j
++) {
538 cons
= NEXT_TX(cons
);
539 tx_buf
= &txr
->tx_buf_ring
[cons
];
542 dma_unmap_addr(tx_buf
, mapping
),
543 skb_frag_size(&skb_shinfo(skb
)->frags
[j
]),
548 cons
= NEXT_TX(cons
);
550 tx_bytes
+= skb
->len
;
551 dev_kfree_skb_any(skb
);
554 netdev_tx_completed_queue(txq
, nr_pkts
, tx_bytes
);
557 /* Need to make the tx_cons update visible to bnxt_start_xmit()
558 * before checking for netif_tx_queue_stopped(). Without the
559 * memory barrier, there is a small possibility that bnxt_start_xmit()
560 * will miss it and cause the queue to be stopped forever.
564 if (unlikely(netif_tx_queue_stopped(txq
)) &&
565 (bnxt_tx_avail(bp
, txr
) > bp
->tx_wake_thresh
)) {
566 __netif_tx_lock(txq
, smp_processor_id());
567 if (netif_tx_queue_stopped(txq
) &&
568 bnxt_tx_avail(bp
, txr
) > bp
->tx_wake_thresh
&&
569 txr
->dev_state
!= BNXT_DEV_STATE_CLOSING
)
570 netif_tx_wake_queue(txq
);
571 __netif_tx_unlock(txq
);
575 static struct page
*__bnxt_alloc_rx_page(struct bnxt
*bp
, dma_addr_t
*mapping
,
578 struct device
*dev
= &bp
->pdev
->dev
;
581 page
= alloc_page(gfp
);
585 *mapping
= dma_map_page(dev
, page
, 0, PAGE_SIZE
, bp
->rx_dir
);
586 if (dma_mapping_error(dev
, *mapping
)) {
590 *mapping
+= bp
->rx_dma_offset
;
594 static inline u8
*__bnxt_alloc_rx_data(struct bnxt
*bp
, dma_addr_t
*mapping
,
598 struct pci_dev
*pdev
= bp
->pdev
;
600 data
= kmalloc(bp
->rx_buf_size
, gfp
);
604 *mapping
= dma_map_single(&pdev
->dev
, data
+ bp
->rx_dma_offset
,
605 bp
->rx_buf_use_size
, bp
->rx_dir
);
607 if (dma_mapping_error(&pdev
->dev
, *mapping
)) {
614 int bnxt_alloc_rx_data(struct bnxt
*bp
, struct bnxt_rx_ring_info
*rxr
,
617 struct rx_bd
*rxbd
= &rxr
->rx_desc_ring
[RX_RING(prod
)][RX_IDX(prod
)];
618 struct bnxt_sw_rx_bd
*rx_buf
= &rxr
->rx_buf_ring
[prod
];
621 if (BNXT_RX_PAGE_MODE(bp
)) {
622 struct page
*page
= __bnxt_alloc_rx_page(bp
, &mapping
, gfp
);
628 rx_buf
->data_ptr
= page_address(page
) + bp
->rx_offset
;
630 u8
*data
= __bnxt_alloc_rx_data(bp
, &mapping
, gfp
);
636 rx_buf
->data_ptr
= data
+ bp
->rx_offset
;
638 rx_buf
->mapping
= mapping
;
640 rxbd
->rx_bd_haddr
= cpu_to_le64(mapping
);
644 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info
*rxr
, u16 cons
, void *data
)
646 u16 prod
= rxr
->rx_prod
;
647 struct bnxt_sw_rx_bd
*cons_rx_buf
, *prod_rx_buf
;
648 struct rx_bd
*cons_bd
, *prod_bd
;
650 prod_rx_buf
= &rxr
->rx_buf_ring
[prod
];
651 cons_rx_buf
= &rxr
->rx_buf_ring
[cons
];
653 prod_rx_buf
->data
= data
;
654 prod_rx_buf
->data_ptr
= cons_rx_buf
->data_ptr
;
656 prod_rx_buf
->mapping
= cons_rx_buf
->mapping
;
658 prod_bd
= &rxr
->rx_desc_ring
[RX_RING(prod
)][RX_IDX(prod
)];
659 cons_bd
= &rxr
->rx_desc_ring
[RX_RING(cons
)][RX_IDX(cons
)];
661 prod_bd
->rx_bd_haddr
= cons_bd
->rx_bd_haddr
;
664 static inline u16
bnxt_find_next_agg_idx(struct bnxt_rx_ring_info
*rxr
, u16 idx
)
666 u16 next
, max
= rxr
->rx_agg_bmap_size
;
668 next
= find_next_zero_bit(rxr
->rx_agg_bmap
, max
, idx
);
670 next
= find_first_zero_bit(rxr
->rx_agg_bmap
, max
);
674 static inline int bnxt_alloc_rx_page(struct bnxt
*bp
,
675 struct bnxt_rx_ring_info
*rxr
,
679 &rxr
->rx_agg_desc_ring
[RX_RING(prod
)][RX_IDX(prod
)];
680 struct bnxt_sw_rx_agg_bd
*rx_agg_buf
;
681 struct pci_dev
*pdev
= bp
->pdev
;
684 u16 sw_prod
= rxr
->rx_sw_agg_prod
;
685 unsigned int offset
= 0;
687 if (PAGE_SIZE
> BNXT_RX_PAGE_SIZE
) {
690 page
= alloc_page(gfp
);
694 rxr
->rx_page_offset
= 0;
696 offset
= rxr
->rx_page_offset
;
697 rxr
->rx_page_offset
+= BNXT_RX_PAGE_SIZE
;
698 if (rxr
->rx_page_offset
== PAGE_SIZE
)
703 page
= alloc_page(gfp
);
708 mapping
= dma_map_page(&pdev
->dev
, page
, offset
, BNXT_RX_PAGE_SIZE
,
710 if (dma_mapping_error(&pdev
->dev
, mapping
)) {
715 if (unlikely(test_bit(sw_prod
, rxr
->rx_agg_bmap
)))
716 sw_prod
= bnxt_find_next_agg_idx(rxr
, sw_prod
);
718 __set_bit(sw_prod
, rxr
->rx_agg_bmap
);
719 rx_agg_buf
= &rxr
->rx_agg_ring
[sw_prod
];
720 rxr
->rx_sw_agg_prod
= NEXT_RX_AGG(sw_prod
);
722 rx_agg_buf
->page
= page
;
723 rx_agg_buf
->offset
= offset
;
724 rx_agg_buf
->mapping
= mapping
;
725 rxbd
->rx_bd_haddr
= cpu_to_le64(mapping
);
726 rxbd
->rx_bd_opaque
= sw_prod
;
730 static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi
*bnapi
, u16 cp_cons
,
733 struct bnxt
*bp
= bnapi
->bp
;
734 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
735 struct bnxt_rx_ring_info
*rxr
= bnapi
->rx_ring
;
736 u16 prod
= rxr
->rx_agg_prod
;
737 u16 sw_prod
= rxr
->rx_sw_agg_prod
;
740 for (i
= 0; i
< agg_bufs
; i
++) {
742 struct rx_agg_cmp
*agg
;
743 struct bnxt_sw_rx_agg_bd
*cons_rx_buf
, *prod_rx_buf
;
744 struct rx_bd
*prod_bd
;
747 agg
= (struct rx_agg_cmp
*)
748 &cpr
->cp_desc_ring
[CP_RING(cp_cons
)][CP_IDX(cp_cons
)];
749 cons
= agg
->rx_agg_cmp_opaque
;
750 __clear_bit(cons
, rxr
->rx_agg_bmap
);
752 if (unlikely(test_bit(sw_prod
, rxr
->rx_agg_bmap
)))
753 sw_prod
= bnxt_find_next_agg_idx(rxr
, sw_prod
);
755 __set_bit(sw_prod
, rxr
->rx_agg_bmap
);
756 prod_rx_buf
= &rxr
->rx_agg_ring
[sw_prod
];
757 cons_rx_buf
= &rxr
->rx_agg_ring
[cons
];
759 /* It is possible for sw_prod to be equal to cons, so
760 * set cons_rx_buf->page to NULL first.
762 page
= cons_rx_buf
->page
;
763 cons_rx_buf
->page
= NULL
;
764 prod_rx_buf
->page
= page
;
765 prod_rx_buf
->offset
= cons_rx_buf
->offset
;
767 prod_rx_buf
->mapping
= cons_rx_buf
->mapping
;
769 prod_bd
= &rxr
->rx_agg_desc_ring
[RX_RING(prod
)][RX_IDX(prod
)];
771 prod_bd
->rx_bd_haddr
= cpu_to_le64(cons_rx_buf
->mapping
);
772 prod_bd
->rx_bd_opaque
= sw_prod
;
774 prod
= NEXT_RX_AGG(prod
);
775 sw_prod
= NEXT_RX_AGG(sw_prod
);
776 cp_cons
= NEXT_CMP(cp_cons
);
778 rxr
->rx_agg_prod
= prod
;
779 rxr
->rx_sw_agg_prod
= sw_prod
;
782 static struct sk_buff
*bnxt_rx_page_skb(struct bnxt
*bp
,
783 struct bnxt_rx_ring_info
*rxr
,
784 u16 cons
, void *data
, u8
*data_ptr
,
786 unsigned int offset_and_len
)
788 unsigned int payload
= offset_and_len
>> 16;
789 unsigned int len
= offset_and_len
& 0xffff;
790 struct skb_frag_struct
*frag
;
791 struct page
*page
= data
;
792 u16 prod
= rxr
->rx_prod
;
796 err
= bnxt_alloc_rx_data(bp
, rxr
, prod
, GFP_ATOMIC
);
798 bnxt_reuse_rx_data(rxr
, cons
, data
);
801 dma_addr
-= bp
->rx_dma_offset
;
802 dma_unmap_page(&bp
->pdev
->dev
, dma_addr
, PAGE_SIZE
, bp
->rx_dir
);
804 if (unlikely(!payload
))
805 payload
= eth_get_headlen(data_ptr
, len
);
807 skb
= napi_alloc_skb(&rxr
->bnapi
->napi
, payload
);
813 off
= (void *)data_ptr
- page_address(page
);
814 skb_add_rx_frag(skb
, 0, page
, off
, len
, PAGE_SIZE
);
815 memcpy(skb
->data
- NET_IP_ALIGN
, data_ptr
- NET_IP_ALIGN
,
816 payload
+ NET_IP_ALIGN
);
818 frag
= &skb_shinfo(skb
)->frags
[0];
819 skb_frag_size_sub(frag
, payload
);
820 frag
->page_offset
+= payload
;
821 skb
->data_len
-= payload
;
822 skb
->tail
+= payload
;
827 static struct sk_buff
*bnxt_rx_skb(struct bnxt
*bp
,
828 struct bnxt_rx_ring_info
*rxr
, u16 cons
,
829 void *data
, u8
*data_ptr
,
831 unsigned int offset_and_len
)
833 u16 prod
= rxr
->rx_prod
;
837 err
= bnxt_alloc_rx_data(bp
, rxr
, prod
, GFP_ATOMIC
);
839 bnxt_reuse_rx_data(rxr
, cons
, data
);
843 skb
= build_skb(data
, 0);
844 dma_unmap_single(&bp
->pdev
->dev
, dma_addr
, bp
->rx_buf_use_size
,
851 skb_reserve(skb
, bp
->rx_offset
);
852 skb_put(skb
, offset_and_len
& 0xffff);
856 static struct sk_buff
*bnxt_rx_pages(struct bnxt
*bp
, struct bnxt_napi
*bnapi
,
857 struct sk_buff
*skb
, u16 cp_cons
,
860 struct pci_dev
*pdev
= bp
->pdev
;
861 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
862 struct bnxt_rx_ring_info
*rxr
= bnapi
->rx_ring
;
863 u16 prod
= rxr
->rx_agg_prod
;
866 for (i
= 0; i
< agg_bufs
; i
++) {
868 struct rx_agg_cmp
*agg
;
869 struct bnxt_sw_rx_agg_bd
*cons_rx_buf
;
873 agg
= (struct rx_agg_cmp
*)
874 &cpr
->cp_desc_ring
[CP_RING(cp_cons
)][CP_IDX(cp_cons
)];
875 cons
= agg
->rx_agg_cmp_opaque
;
876 frag_len
= (le32_to_cpu(agg
->rx_agg_cmp_len_flags_type
) &
877 RX_AGG_CMP_LEN
) >> RX_AGG_CMP_LEN_SHIFT
;
879 cons_rx_buf
= &rxr
->rx_agg_ring
[cons
];
880 skb_fill_page_desc(skb
, i
, cons_rx_buf
->page
,
881 cons_rx_buf
->offset
, frag_len
);
882 __clear_bit(cons
, rxr
->rx_agg_bmap
);
884 /* It is possible for bnxt_alloc_rx_page() to allocate
885 * a sw_prod index that equals the cons index, so we
886 * need to clear the cons entry now.
888 mapping
= cons_rx_buf
->mapping
;
889 page
= cons_rx_buf
->page
;
890 cons_rx_buf
->page
= NULL
;
892 if (bnxt_alloc_rx_page(bp
, rxr
, prod
, GFP_ATOMIC
) != 0) {
893 struct skb_shared_info
*shinfo
;
894 unsigned int nr_frags
;
896 shinfo
= skb_shinfo(skb
);
897 nr_frags
= --shinfo
->nr_frags
;
898 __skb_frag_set_page(&shinfo
->frags
[nr_frags
], NULL
);
902 cons_rx_buf
->page
= page
;
904 /* Update prod since possibly some pages have been
907 rxr
->rx_agg_prod
= prod
;
908 bnxt_reuse_rx_agg_bufs(bnapi
, cp_cons
, agg_bufs
- i
);
912 dma_unmap_page(&pdev
->dev
, mapping
, BNXT_RX_PAGE_SIZE
,
915 skb
->data_len
+= frag_len
;
916 skb
->len
+= frag_len
;
917 skb
->truesize
+= PAGE_SIZE
;
919 prod
= NEXT_RX_AGG(prod
);
920 cp_cons
= NEXT_CMP(cp_cons
);
922 rxr
->rx_agg_prod
= prod
;
926 static int bnxt_agg_bufs_valid(struct bnxt
*bp
, struct bnxt_cp_ring_info
*cpr
,
927 u8 agg_bufs
, u32
*raw_cons
)
930 struct rx_agg_cmp
*agg
;
932 *raw_cons
= ADV_RAW_CMP(*raw_cons
, agg_bufs
);
933 last
= RING_CMP(*raw_cons
);
934 agg
= (struct rx_agg_cmp
*)
935 &cpr
->cp_desc_ring
[CP_RING(last
)][CP_IDX(last
)];
936 return RX_AGG_CMP_VALID(agg
, *raw_cons
);
939 static inline struct sk_buff
*bnxt_copy_skb(struct bnxt_napi
*bnapi
, u8
*data
,
943 struct bnxt
*bp
= bnapi
->bp
;
944 struct pci_dev
*pdev
= bp
->pdev
;
947 skb
= napi_alloc_skb(&bnapi
->napi
, len
);
951 dma_sync_single_for_cpu(&pdev
->dev
, mapping
, bp
->rx_copy_thresh
,
954 memcpy(skb
->data
- NET_IP_ALIGN
, data
- NET_IP_ALIGN
,
957 dma_sync_single_for_device(&pdev
->dev
, mapping
, bp
->rx_copy_thresh
,
964 static int bnxt_discard_rx(struct bnxt
*bp
, struct bnxt_napi
*bnapi
,
965 u32
*raw_cons
, void *cmp
)
967 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
968 struct rx_cmp
*rxcmp
= cmp
;
969 u32 tmp_raw_cons
= *raw_cons
;
970 u8 cmp_type
, agg_bufs
= 0;
972 cmp_type
= RX_CMP_TYPE(rxcmp
);
974 if (cmp_type
== CMP_TYPE_RX_L2_CMP
) {
975 agg_bufs
= (le32_to_cpu(rxcmp
->rx_cmp_misc_v1
) &
977 RX_CMP_AGG_BUFS_SHIFT
;
978 } else if (cmp_type
== CMP_TYPE_RX_L2_TPA_END_CMP
) {
979 struct rx_tpa_end_cmp
*tpa_end
= cmp
;
981 agg_bufs
= (le32_to_cpu(tpa_end
->rx_tpa_end_cmp_misc_v1
) &
982 RX_TPA_END_CMP_AGG_BUFS
) >>
983 RX_TPA_END_CMP_AGG_BUFS_SHIFT
;
987 if (!bnxt_agg_bufs_valid(bp
, cpr
, agg_bufs
, &tmp_raw_cons
))
990 *raw_cons
= tmp_raw_cons
;
994 static void bnxt_sched_reset(struct bnxt
*bp
, struct bnxt_rx_ring_info
*rxr
)
996 if (!rxr
->bnapi
->in_reset
) {
997 rxr
->bnapi
->in_reset
= true;
998 set_bit(BNXT_RESET_TASK_SP_EVENT
, &bp
->sp_event
);
999 schedule_work(&bp
->sp_task
);
1001 rxr
->rx_next_cons
= 0xffff;
1004 static void bnxt_tpa_start(struct bnxt
*bp
, struct bnxt_rx_ring_info
*rxr
,
1005 struct rx_tpa_start_cmp
*tpa_start
,
1006 struct rx_tpa_start_cmp_ext
*tpa_start1
)
1008 u8 agg_id
= TPA_START_AGG_ID(tpa_start
);
1010 struct bnxt_tpa_info
*tpa_info
;
1011 struct bnxt_sw_rx_bd
*cons_rx_buf
, *prod_rx_buf
;
1012 struct rx_bd
*prod_bd
;
1015 cons
= tpa_start
->rx_tpa_start_cmp_opaque
;
1016 prod
= rxr
->rx_prod
;
1017 cons_rx_buf
= &rxr
->rx_buf_ring
[cons
];
1018 prod_rx_buf
= &rxr
->rx_buf_ring
[prod
];
1019 tpa_info
= &rxr
->rx_tpa
[agg_id
];
1021 if (unlikely(cons
!= rxr
->rx_next_cons
)) {
1022 bnxt_sched_reset(bp
, rxr
);
1026 prod_rx_buf
->data
= tpa_info
->data
;
1027 prod_rx_buf
->data_ptr
= tpa_info
->data_ptr
;
1029 mapping
= tpa_info
->mapping
;
1030 prod_rx_buf
->mapping
= mapping
;
1032 prod_bd
= &rxr
->rx_desc_ring
[RX_RING(prod
)][RX_IDX(prod
)];
1034 prod_bd
->rx_bd_haddr
= cpu_to_le64(mapping
);
1036 tpa_info
->data
= cons_rx_buf
->data
;
1037 tpa_info
->data_ptr
= cons_rx_buf
->data_ptr
;
1038 cons_rx_buf
->data
= NULL
;
1039 tpa_info
->mapping
= cons_rx_buf
->mapping
;
1042 le32_to_cpu(tpa_start
->rx_tpa_start_cmp_len_flags_type
) >>
1043 RX_TPA_START_CMP_LEN_SHIFT
;
1044 if (likely(TPA_START_HASH_VALID(tpa_start
))) {
1045 u32 hash_type
= TPA_START_HASH_TYPE(tpa_start
);
1047 tpa_info
->hash_type
= PKT_HASH_TYPE_L4
;
1048 tpa_info
->gso_type
= SKB_GSO_TCPV4
;
1049 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1051 tpa_info
->gso_type
= SKB_GSO_TCPV6
;
1052 tpa_info
->rss_hash
=
1053 le32_to_cpu(tpa_start
->rx_tpa_start_cmp_rss_hash
);
1055 tpa_info
->hash_type
= PKT_HASH_TYPE_NONE
;
1056 tpa_info
->gso_type
= 0;
1057 if (netif_msg_rx_err(bp
))
1058 netdev_warn(bp
->dev
, "TPA packet without valid hash\n");
1060 tpa_info
->flags2
= le32_to_cpu(tpa_start1
->rx_tpa_start_cmp_flags2
);
1061 tpa_info
->metadata
= le32_to_cpu(tpa_start1
->rx_tpa_start_cmp_metadata
);
1062 tpa_info
->hdr_info
= le32_to_cpu(tpa_start1
->rx_tpa_start_cmp_hdr_info
);
1064 rxr
->rx_prod
= NEXT_RX(prod
);
1065 cons
= NEXT_RX(cons
);
1066 rxr
->rx_next_cons
= NEXT_RX(cons
);
1067 cons_rx_buf
= &rxr
->rx_buf_ring
[cons
];
1069 bnxt_reuse_rx_data(rxr
, cons
, cons_rx_buf
->data
);
1070 rxr
->rx_prod
= NEXT_RX(rxr
->rx_prod
);
1071 cons_rx_buf
->data
= NULL
;
1074 static void bnxt_abort_tpa(struct bnxt
*bp
, struct bnxt_napi
*bnapi
,
1075 u16 cp_cons
, u32 agg_bufs
)
1078 bnxt_reuse_rx_agg_bufs(bnapi
, cp_cons
, agg_bufs
);
1081 static struct sk_buff
*bnxt_gro_func_5731x(struct bnxt_tpa_info
*tpa_info
,
1082 int payload_off
, int tcp_ts
,
1083 struct sk_buff
*skb
)
1088 u16 outer_ip_off
, inner_ip_off
, inner_mac_off
;
1089 u32 hdr_info
= tpa_info
->hdr_info
;
1090 bool loopback
= false;
1092 inner_ip_off
= BNXT_TPA_INNER_L3_OFF(hdr_info
);
1093 inner_mac_off
= BNXT_TPA_INNER_L2_OFF(hdr_info
);
1094 outer_ip_off
= BNXT_TPA_OUTER_L3_OFF(hdr_info
);
1096 /* If the packet is an internal loopback packet, the offsets will
1097 * have an extra 4 bytes.
1099 if (inner_mac_off
== 4) {
1101 } else if (inner_mac_off
> 4) {
1102 __be16 proto
= *((__be16
*)(skb
->data
+ inner_ip_off
-
1105 /* We only support inner iPv4/ipv6. If we don't see the
1106 * correct protocol ID, it must be a loopback packet where
1107 * the offsets are off by 4.
1109 if (proto
!= htons(ETH_P_IP
) && proto
!= htons(ETH_P_IPV6
))
1113 /* internal loopback packet, subtract all offsets by 4 */
1119 nw_off
= inner_ip_off
- ETH_HLEN
;
1120 skb_set_network_header(skb
, nw_off
);
1121 if (tpa_info
->flags2
& RX_TPA_START_CMP_FLAGS2_IP_TYPE
) {
1122 struct ipv6hdr
*iph
= ipv6_hdr(skb
);
1124 skb_set_transport_header(skb
, nw_off
+ sizeof(struct ipv6hdr
));
1125 len
= skb
->len
- skb_transport_offset(skb
);
1127 th
->check
= ~tcp_v6_check(len
, &iph
->saddr
, &iph
->daddr
, 0);
1129 struct iphdr
*iph
= ip_hdr(skb
);
1131 skb_set_transport_header(skb
, nw_off
+ sizeof(struct iphdr
));
1132 len
= skb
->len
- skb_transport_offset(skb
);
1134 th
->check
= ~tcp_v4_check(len
, iph
->saddr
, iph
->daddr
, 0);
1137 if (inner_mac_off
) { /* tunnel */
1138 struct udphdr
*uh
= NULL
;
1139 __be16 proto
= *((__be16
*)(skb
->data
+ outer_ip_off
-
1142 if (proto
== htons(ETH_P_IP
)) {
1143 struct iphdr
*iph
= (struct iphdr
*)skb
->data
;
1145 if (iph
->protocol
== IPPROTO_UDP
)
1146 uh
= (struct udphdr
*)(iph
+ 1);
1148 struct ipv6hdr
*iph
= (struct ipv6hdr
*)skb
->data
;
1150 if (iph
->nexthdr
== IPPROTO_UDP
)
1151 uh
= (struct udphdr
*)(iph
+ 1);
1155 skb_shinfo(skb
)->gso_type
|=
1156 SKB_GSO_UDP_TUNNEL_CSUM
;
1158 skb_shinfo(skb
)->gso_type
|= SKB_GSO_UDP_TUNNEL
;
1165 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1166 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1168 static struct sk_buff
*bnxt_gro_func_5730x(struct bnxt_tpa_info
*tpa_info
,
1169 int payload_off
, int tcp_ts
,
1170 struct sk_buff
*skb
)
1174 int len
, nw_off
, tcp_opt_len
= 0;
1179 if (tpa_info
->gso_type
== SKB_GSO_TCPV4
) {
1182 nw_off
= payload_off
- BNXT_IPV4_HDR_SIZE
- tcp_opt_len
-
1184 skb_set_network_header(skb
, nw_off
);
1186 skb_set_transport_header(skb
, nw_off
+ sizeof(struct iphdr
));
1187 len
= skb
->len
- skb_transport_offset(skb
);
1189 th
->check
= ~tcp_v4_check(len
, iph
->saddr
, iph
->daddr
, 0);
1190 } else if (tpa_info
->gso_type
== SKB_GSO_TCPV6
) {
1191 struct ipv6hdr
*iph
;
1193 nw_off
= payload_off
- BNXT_IPV6_HDR_SIZE
- tcp_opt_len
-
1195 skb_set_network_header(skb
, nw_off
);
1196 iph
= ipv6_hdr(skb
);
1197 skb_set_transport_header(skb
, nw_off
+ sizeof(struct ipv6hdr
));
1198 len
= skb
->len
- skb_transport_offset(skb
);
1200 th
->check
= ~tcp_v6_check(len
, &iph
->saddr
, &iph
->daddr
, 0);
1202 dev_kfree_skb_any(skb
);
1206 if (nw_off
) { /* tunnel */
1207 struct udphdr
*uh
= NULL
;
1209 if (skb
->protocol
== htons(ETH_P_IP
)) {
1210 struct iphdr
*iph
= (struct iphdr
*)skb
->data
;
1212 if (iph
->protocol
== IPPROTO_UDP
)
1213 uh
= (struct udphdr
*)(iph
+ 1);
1215 struct ipv6hdr
*iph
= (struct ipv6hdr
*)skb
->data
;
1217 if (iph
->nexthdr
== IPPROTO_UDP
)
1218 uh
= (struct udphdr
*)(iph
+ 1);
1222 skb_shinfo(skb
)->gso_type
|=
1223 SKB_GSO_UDP_TUNNEL_CSUM
;
1225 skb_shinfo(skb
)->gso_type
|= SKB_GSO_UDP_TUNNEL
;
1232 static inline struct sk_buff
*bnxt_gro_skb(struct bnxt
*bp
,
1233 struct bnxt_tpa_info
*tpa_info
,
1234 struct rx_tpa_end_cmp
*tpa_end
,
1235 struct rx_tpa_end_cmp_ext
*tpa_end1
,
1236 struct sk_buff
*skb
)
1242 segs
= TPA_END_TPA_SEGS(tpa_end
);
1246 NAPI_GRO_CB(skb
)->count
= segs
;
1247 skb_shinfo(skb
)->gso_size
=
1248 le32_to_cpu(tpa_end1
->rx_tpa_end_cmp_seg_len
);
1249 skb_shinfo(skb
)->gso_type
= tpa_info
->gso_type
;
1250 payload_off
= (le32_to_cpu(tpa_end
->rx_tpa_end_cmp_misc_v1
) &
1251 RX_TPA_END_CMP_PAYLOAD_OFFSET
) >>
1252 RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT
;
1253 skb
= bp
->gro_func(tpa_info
, payload_off
, TPA_END_GRO_TS(tpa_end
), skb
);
1255 tcp_gro_complete(skb
);
1260 static inline struct sk_buff
*bnxt_tpa_end(struct bnxt
*bp
,
1261 struct bnxt_napi
*bnapi
,
1263 struct rx_tpa_end_cmp
*tpa_end
,
1264 struct rx_tpa_end_cmp_ext
*tpa_end1
,
1267 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
1268 struct bnxt_rx_ring_info
*rxr
= bnapi
->rx_ring
;
1269 u8 agg_id
= TPA_END_AGG_ID(tpa_end
);
1270 u8
*data_ptr
, agg_bufs
;
1271 u16 cp_cons
= RING_CMP(*raw_cons
);
1273 struct bnxt_tpa_info
*tpa_info
;
1275 struct sk_buff
*skb
;
1278 if (unlikely(bnapi
->in_reset
)) {
1279 int rc
= bnxt_discard_rx(bp
, bnapi
, raw_cons
, tpa_end
);
1282 return ERR_PTR(-EBUSY
);
1286 tpa_info
= &rxr
->rx_tpa
[agg_id
];
1287 data
= tpa_info
->data
;
1288 data_ptr
= tpa_info
->data_ptr
;
1290 len
= tpa_info
->len
;
1291 mapping
= tpa_info
->mapping
;
1293 agg_bufs
= (le32_to_cpu(tpa_end
->rx_tpa_end_cmp_misc_v1
) &
1294 RX_TPA_END_CMP_AGG_BUFS
) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT
;
1297 if (!bnxt_agg_bufs_valid(bp
, cpr
, agg_bufs
, raw_cons
))
1298 return ERR_PTR(-EBUSY
);
1300 *event
|= BNXT_AGG_EVENT
;
1301 cp_cons
= NEXT_CMP(cp_cons
);
1304 if (unlikely(agg_bufs
> MAX_SKB_FRAGS
)) {
1305 bnxt_abort_tpa(bp
, bnapi
, cp_cons
, agg_bufs
);
1306 netdev_warn(bp
->dev
, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1307 agg_bufs
, (int)MAX_SKB_FRAGS
);
1311 if (len
<= bp
->rx_copy_thresh
) {
1312 skb
= bnxt_copy_skb(bnapi
, data_ptr
, len
, mapping
);
1314 bnxt_abort_tpa(bp
, bnapi
, cp_cons
, agg_bufs
);
1319 dma_addr_t new_mapping
;
1321 new_data
= __bnxt_alloc_rx_data(bp
, &new_mapping
, GFP_ATOMIC
);
1323 bnxt_abort_tpa(bp
, bnapi
, cp_cons
, agg_bufs
);
1327 tpa_info
->data
= new_data
;
1328 tpa_info
->data_ptr
= new_data
+ bp
->rx_offset
;
1329 tpa_info
->mapping
= new_mapping
;
1331 skb
= build_skb(data
, 0);
1332 dma_unmap_single(&bp
->pdev
->dev
, mapping
, bp
->rx_buf_use_size
,
1337 bnxt_abort_tpa(bp
, bnapi
, cp_cons
, agg_bufs
);
1340 skb_reserve(skb
, bp
->rx_offset
);
1345 skb
= bnxt_rx_pages(bp
, bnapi
, skb
, cp_cons
, agg_bufs
);
1347 /* Page reuse already handled by bnxt_rx_pages(). */
1351 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1353 if (tpa_info
->hash_type
!= PKT_HASH_TYPE_NONE
)
1354 skb_set_hash(skb
, tpa_info
->rss_hash
, tpa_info
->hash_type
);
1356 if ((tpa_info
->flags2
& RX_CMP_FLAGS2_META_FORMAT_VLAN
) &&
1357 (skb
->dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
)) {
1358 u16 vlan_proto
= tpa_info
->metadata
>>
1359 RX_CMP_FLAGS2_METADATA_TPID_SFT
;
1360 u16 vtag
= tpa_info
->metadata
& RX_CMP_FLAGS2_METADATA_VID_MASK
;
1362 __vlan_hwaccel_put_tag(skb
, htons(vlan_proto
), vtag
);
1365 skb_checksum_none_assert(skb
);
1366 if (likely(tpa_info
->flags2
& RX_TPA_START_CMP_FLAGS2_L4_CS_CALC
)) {
1367 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1369 (tpa_info
->flags2
& RX_CMP_FLAGS2_T_L4_CS_CALC
) >> 3;
1372 if (TPA_END_GRO(tpa_end
))
1373 skb
= bnxt_gro_skb(bp
, tpa_info
, tpa_end
, tpa_end1
, skb
);
1378 /* returns the following:
1379 * 1 - 1 packet successfully received
1380 * 0 - successful TPA_START, packet not completed yet
1381 * -EBUSY - completion ring does not have all the agg buffers yet
1382 * -ENOMEM - packet aborted due to out of memory
1383 * -EIO - packet aborted due to hw error indicated in BD
1385 static int bnxt_rx_pkt(struct bnxt
*bp
, struct bnxt_napi
*bnapi
, u32
*raw_cons
,
1388 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
1389 struct bnxt_rx_ring_info
*rxr
= bnapi
->rx_ring
;
1390 struct net_device
*dev
= bp
->dev
;
1391 struct rx_cmp
*rxcmp
;
1392 struct rx_cmp_ext
*rxcmp1
;
1393 u32 tmp_raw_cons
= *raw_cons
;
1394 u16 cons
, prod
, cp_cons
= RING_CMP(tmp_raw_cons
);
1395 struct bnxt_sw_rx_bd
*rx_buf
;
1397 u8
*data_ptr
, agg_bufs
, cmp_type
;
1398 dma_addr_t dma_addr
;
1399 struct sk_buff
*skb
;
1404 rxcmp
= (struct rx_cmp
*)
1405 &cpr
->cp_desc_ring
[CP_RING(cp_cons
)][CP_IDX(cp_cons
)];
1407 tmp_raw_cons
= NEXT_RAW_CMP(tmp_raw_cons
);
1408 cp_cons
= RING_CMP(tmp_raw_cons
);
1409 rxcmp1
= (struct rx_cmp_ext
*)
1410 &cpr
->cp_desc_ring
[CP_RING(cp_cons
)][CP_IDX(cp_cons
)];
1412 if (!RX_CMP_VALID(rxcmp1
, tmp_raw_cons
))
1415 cmp_type
= RX_CMP_TYPE(rxcmp
);
1417 prod
= rxr
->rx_prod
;
1419 if (cmp_type
== CMP_TYPE_RX_L2_TPA_START_CMP
) {
1420 bnxt_tpa_start(bp
, rxr
, (struct rx_tpa_start_cmp
*)rxcmp
,
1421 (struct rx_tpa_start_cmp_ext
*)rxcmp1
);
1423 *event
|= BNXT_RX_EVENT
;
1424 goto next_rx_no_prod
;
1426 } else if (cmp_type
== CMP_TYPE_RX_L2_TPA_END_CMP
) {
1427 skb
= bnxt_tpa_end(bp
, bnapi
, &tmp_raw_cons
,
1428 (struct rx_tpa_end_cmp
*)rxcmp
,
1429 (struct rx_tpa_end_cmp_ext
*)rxcmp1
, event
);
1431 if (unlikely(IS_ERR(skb
)))
1436 skb_record_rx_queue(skb
, bnapi
->index
);
1437 napi_gro_receive(&bnapi
->napi
, skb
);
1440 *event
|= BNXT_RX_EVENT
;
1441 goto next_rx_no_prod
;
1444 cons
= rxcmp
->rx_cmp_opaque
;
1445 rx_buf
= &rxr
->rx_buf_ring
[cons
];
1446 data
= rx_buf
->data
;
1447 data_ptr
= rx_buf
->data_ptr
;
1448 if (unlikely(cons
!= rxr
->rx_next_cons
)) {
1449 int rc1
= bnxt_discard_rx(bp
, bnapi
, raw_cons
, rxcmp
);
1451 bnxt_sched_reset(bp
, rxr
);
1456 misc
= le32_to_cpu(rxcmp
->rx_cmp_misc_v1
);
1457 agg_bufs
= (misc
& RX_CMP_AGG_BUFS
) >> RX_CMP_AGG_BUFS_SHIFT
;
1460 if (!bnxt_agg_bufs_valid(bp
, cpr
, agg_bufs
, &tmp_raw_cons
))
1463 cp_cons
= NEXT_CMP(cp_cons
);
1464 *event
|= BNXT_AGG_EVENT
;
1466 *event
|= BNXT_RX_EVENT
;
1468 rx_buf
->data
= NULL
;
1469 if (rxcmp1
->rx_cmp_cfa_code_errors_v2
& RX_CMP_L2_ERRORS
) {
1470 bnxt_reuse_rx_data(rxr
, cons
, data
);
1472 bnxt_reuse_rx_agg_bufs(bnapi
, cp_cons
, agg_bufs
);
1478 len
= le32_to_cpu(rxcmp
->rx_cmp_len_flags_type
) >> RX_CMP_LEN_SHIFT
;
1479 dma_addr
= rx_buf
->mapping
;
1481 if (bnxt_rx_xdp(bp
, rxr
, cons
, data
, &data_ptr
, &len
, event
)) {
1486 if (len
<= bp
->rx_copy_thresh
) {
1487 skb
= bnxt_copy_skb(bnapi
, data_ptr
, len
, dma_addr
);
1488 bnxt_reuse_rx_data(rxr
, cons
, data
);
1496 if (rx_buf
->data_ptr
== data_ptr
)
1497 payload
= misc
& RX_CMP_PAYLOAD_OFFSET
;
1500 skb
= bp
->rx_skb_func(bp
, rxr
, cons
, data
, data_ptr
, dma_addr
,
1509 skb
= bnxt_rx_pages(bp
, bnapi
, skb
, cp_cons
, agg_bufs
);
1516 if (RX_CMP_HASH_VALID(rxcmp
)) {
1517 u32 hash_type
= RX_CMP_HASH_TYPE(rxcmp
);
1518 enum pkt_hash_types type
= PKT_HASH_TYPE_L4
;
1520 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1521 if (hash_type
!= 1 && hash_type
!= 3)
1522 type
= PKT_HASH_TYPE_L3
;
1523 skb_set_hash(skb
, le32_to_cpu(rxcmp
->rx_cmp_rss_hash
), type
);
1526 skb
->protocol
= eth_type_trans(skb
, dev
);
1528 if ((rxcmp1
->rx_cmp_flags2
&
1529 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN
)) &&
1530 (skb
->dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
)) {
1531 u32 meta_data
= le32_to_cpu(rxcmp1
->rx_cmp_meta_data
);
1532 u16 vtag
= meta_data
& RX_CMP_FLAGS2_METADATA_VID_MASK
;
1533 u16 vlan_proto
= meta_data
>> RX_CMP_FLAGS2_METADATA_TPID_SFT
;
1535 __vlan_hwaccel_put_tag(skb
, htons(vlan_proto
), vtag
);
1538 skb_checksum_none_assert(skb
);
1539 if (RX_CMP_L4_CS_OK(rxcmp1
)) {
1540 if (dev
->features
& NETIF_F_RXCSUM
) {
1541 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1542 skb
->csum_level
= RX_CMP_ENCAP(rxcmp1
);
1545 if (rxcmp1
->rx_cmp_cfa_code_errors_v2
& RX_CMP_L4_CS_ERR_BITS
) {
1546 if (dev
->features
& NETIF_F_RXCSUM
)
1547 cpr
->rx_l4_csum_errors
++;
1551 skb_record_rx_queue(skb
, bnapi
->index
);
1552 napi_gro_receive(&bnapi
->napi
, skb
);
1556 rxr
->rx_prod
= NEXT_RX(prod
);
1557 rxr
->rx_next_cons
= NEXT_RX(cons
);
1560 *raw_cons
= tmp_raw_cons
;
1565 #define BNXT_GET_EVENT_PORT(data) \
1567 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1569 static int bnxt_async_event_process(struct bnxt
*bp
,
1570 struct hwrm_async_event_cmpl
*cmpl
)
1572 u16 event_id
= le16_to_cpu(cmpl
->event_id
);
1574 /* TODO CHIMP_FW: Define event id's for link change, error etc */
1576 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE
: {
1577 u32 data1
= le32_to_cpu(cmpl
->event_data1
);
1578 struct bnxt_link_info
*link_info
= &bp
->link_info
;
1581 goto async_event_process_exit
;
1582 if (data1
& 0x20000) {
1583 u16 fw_speed
= link_info
->force_link_speed
;
1584 u32 speed
= bnxt_fw_to_ethtool_speed(fw_speed
);
1586 netdev_warn(bp
->dev
, "Link speed %d no longer supported\n",
1589 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT
, &bp
->sp_event
);
1592 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE
:
1593 set_bit(BNXT_LINK_CHNG_SP_EVENT
, &bp
->sp_event
);
1595 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD
:
1596 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT
, &bp
->sp_event
);
1598 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED
: {
1599 u32 data1
= le32_to_cpu(cmpl
->event_data1
);
1600 u16 port_id
= BNXT_GET_EVENT_PORT(data1
);
1605 if (bp
->pf
.port_id
!= port_id
)
1608 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT
, &bp
->sp_event
);
1611 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE
:
1613 goto async_event_process_exit
;
1614 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT
, &bp
->sp_event
);
1617 goto async_event_process_exit
;
1619 schedule_work(&bp
->sp_task
);
1620 async_event_process_exit
:
1621 bnxt_ulp_async_events(bp
, cmpl
);
1625 static int bnxt_hwrm_handler(struct bnxt
*bp
, struct tx_cmp
*txcmp
)
1627 u16 cmpl_type
= TX_CMP_TYPE(txcmp
), vf_id
, seq_id
;
1628 struct hwrm_cmpl
*h_cmpl
= (struct hwrm_cmpl
*)txcmp
;
1629 struct hwrm_fwd_req_cmpl
*fwd_req_cmpl
=
1630 (struct hwrm_fwd_req_cmpl
*)txcmp
;
1632 switch (cmpl_type
) {
1633 case CMPL_BASE_TYPE_HWRM_DONE
:
1634 seq_id
= le16_to_cpu(h_cmpl
->sequence_id
);
1635 if (seq_id
== bp
->hwrm_intr_seq_id
)
1636 bp
->hwrm_intr_seq_id
= HWRM_SEQ_ID_INVALID
;
1638 netdev_err(bp
->dev
, "Invalid hwrm seq id %d\n", seq_id
);
1641 case CMPL_BASE_TYPE_HWRM_FWD_REQ
:
1642 vf_id
= le16_to_cpu(fwd_req_cmpl
->source_id
);
1644 if ((vf_id
< bp
->pf
.first_vf_id
) ||
1645 (vf_id
>= bp
->pf
.first_vf_id
+ bp
->pf
.active_vfs
)) {
1646 netdev_err(bp
->dev
, "Msg contains invalid VF id %x\n",
1651 set_bit(vf_id
- bp
->pf
.first_vf_id
, bp
->pf
.vf_event_bmap
);
1652 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT
, &bp
->sp_event
);
1653 schedule_work(&bp
->sp_task
);
1656 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT
:
1657 bnxt_async_event_process(bp
,
1658 (struct hwrm_async_event_cmpl
*)txcmp
);
1667 static irqreturn_t
bnxt_msix(int irq
, void *dev_instance
)
1669 struct bnxt_napi
*bnapi
= dev_instance
;
1670 struct bnxt
*bp
= bnapi
->bp
;
1671 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
1672 u32 cons
= RING_CMP(cpr
->cp_raw_cons
);
1674 prefetch(&cpr
->cp_desc_ring
[CP_RING(cons
)][CP_IDX(cons
)]);
1675 napi_schedule(&bnapi
->napi
);
1679 static inline int bnxt_has_work(struct bnxt
*bp
, struct bnxt_cp_ring_info
*cpr
)
1681 u32 raw_cons
= cpr
->cp_raw_cons
;
1682 u16 cons
= RING_CMP(raw_cons
);
1683 struct tx_cmp
*txcmp
;
1685 txcmp
= &cpr
->cp_desc_ring
[CP_RING(cons
)][CP_IDX(cons
)];
1687 return TX_CMP_VALID(txcmp
, raw_cons
);
1690 static irqreturn_t
bnxt_inta(int irq
, void *dev_instance
)
1692 struct bnxt_napi
*bnapi
= dev_instance
;
1693 struct bnxt
*bp
= bnapi
->bp
;
1694 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
1695 u32 cons
= RING_CMP(cpr
->cp_raw_cons
);
1698 prefetch(&cpr
->cp_desc_ring
[CP_RING(cons
)][CP_IDX(cons
)]);
1700 if (!bnxt_has_work(bp
, cpr
)) {
1701 int_status
= readl(bp
->bar0
+ BNXT_CAG_REG_LEGACY_INT_STATUS
);
1702 /* return if erroneous interrupt */
1703 if (!(int_status
& (0x10000 << cpr
->cp_ring_struct
.fw_ring_id
)))
1707 /* disable ring IRQ */
1708 BNXT_CP_DB_IRQ_DIS(cpr
->cp_doorbell
);
1710 /* Return here if interrupt is shared and is disabled. */
1711 if (unlikely(atomic_read(&bp
->intr_sem
) != 0))
1714 napi_schedule(&bnapi
->napi
);
1718 static int bnxt_poll_work(struct bnxt
*bp
, struct bnxt_napi
*bnapi
, int budget
)
1720 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
1721 u32 raw_cons
= cpr
->cp_raw_cons
;
1726 struct tx_cmp
*txcmp
;
1731 cons
= RING_CMP(raw_cons
);
1732 txcmp
= &cpr
->cp_desc_ring
[CP_RING(cons
)][CP_IDX(cons
)];
1734 if (!TX_CMP_VALID(txcmp
, raw_cons
))
1737 /* The valid test of the entry must be done first before
1738 * reading any further.
1741 if (TX_CMP_TYPE(txcmp
) == CMP_TYPE_TX_L2_CMP
) {
1743 /* return full budget so NAPI will complete. */
1744 if (unlikely(tx_pkts
> bp
->tx_wake_thresh
))
1746 } else if ((TX_CMP_TYPE(txcmp
) & 0x30) == 0x10) {
1747 rc
= bnxt_rx_pkt(bp
, bnapi
, &raw_cons
, &event
);
1748 if (likely(rc
>= 0))
1750 else if (rc
== -EBUSY
) /* partial completion */
1752 } else if (unlikely((TX_CMP_TYPE(txcmp
) ==
1753 CMPL_BASE_TYPE_HWRM_DONE
) ||
1754 (TX_CMP_TYPE(txcmp
) ==
1755 CMPL_BASE_TYPE_HWRM_FWD_REQ
) ||
1756 (TX_CMP_TYPE(txcmp
) ==
1757 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT
))) {
1758 bnxt_hwrm_handler(bp
, txcmp
);
1760 raw_cons
= NEXT_RAW_CMP(raw_cons
);
1762 if (rx_pkts
== budget
)
1766 if (event
& BNXT_TX_EVENT
) {
1767 struct bnxt_tx_ring_info
*txr
= bnapi
->tx_ring
;
1768 void __iomem
*db
= txr
->tx_doorbell
;
1769 u16 prod
= txr
->tx_prod
;
1771 /* Sync BD data before updating doorbell */
1774 writel(DB_KEY_TX
| prod
, db
);
1775 writel(DB_KEY_TX
| prod
, db
);
1778 cpr
->cp_raw_cons
= raw_cons
;
1779 /* ACK completion ring before freeing tx ring and producing new
1780 * buffers in rx/agg rings to prevent overflowing the completion
1783 BNXT_CP_DB(cpr
->cp_doorbell
, cpr
->cp_raw_cons
);
1786 bnapi
->tx_int(bp
, bnapi
, tx_pkts
);
1788 if (event
& BNXT_RX_EVENT
) {
1789 struct bnxt_rx_ring_info
*rxr
= bnapi
->rx_ring
;
1791 writel(DB_KEY_RX
| rxr
->rx_prod
, rxr
->rx_doorbell
);
1792 writel(DB_KEY_RX
| rxr
->rx_prod
, rxr
->rx_doorbell
);
1793 if (event
& BNXT_AGG_EVENT
) {
1794 writel(DB_KEY_RX
| rxr
->rx_agg_prod
,
1795 rxr
->rx_agg_doorbell
);
1796 writel(DB_KEY_RX
| rxr
->rx_agg_prod
,
1797 rxr
->rx_agg_doorbell
);
1803 static int bnxt_poll_nitroa0(struct napi_struct
*napi
, int budget
)
1805 struct bnxt_napi
*bnapi
= container_of(napi
, struct bnxt_napi
, napi
);
1806 struct bnxt
*bp
= bnapi
->bp
;
1807 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
1808 struct bnxt_rx_ring_info
*rxr
= bnapi
->rx_ring
;
1809 struct tx_cmp
*txcmp
;
1810 struct rx_cmp_ext
*rxcmp1
;
1811 u32 cp_cons
, tmp_raw_cons
;
1812 u32 raw_cons
= cpr
->cp_raw_cons
;
1819 cp_cons
= RING_CMP(raw_cons
);
1820 txcmp
= &cpr
->cp_desc_ring
[CP_RING(cp_cons
)][CP_IDX(cp_cons
)];
1822 if (!TX_CMP_VALID(txcmp
, raw_cons
))
1825 if ((TX_CMP_TYPE(txcmp
) & 0x30) == 0x10) {
1826 tmp_raw_cons
= NEXT_RAW_CMP(raw_cons
);
1827 cp_cons
= RING_CMP(tmp_raw_cons
);
1828 rxcmp1
= (struct rx_cmp_ext
*)
1829 &cpr
->cp_desc_ring
[CP_RING(cp_cons
)][CP_IDX(cp_cons
)];
1831 if (!RX_CMP_VALID(rxcmp1
, tmp_raw_cons
))
1834 /* force an error to recycle the buffer */
1835 rxcmp1
->rx_cmp_cfa_code_errors_v2
|=
1836 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR
);
1838 rc
= bnxt_rx_pkt(bp
, bnapi
, &raw_cons
, &event
);
1839 if (likely(rc
== -EIO
))
1841 else if (rc
== -EBUSY
) /* partial completion */
1843 } else if (unlikely(TX_CMP_TYPE(txcmp
) ==
1844 CMPL_BASE_TYPE_HWRM_DONE
)) {
1845 bnxt_hwrm_handler(bp
, txcmp
);
1848 "Invalid completion received on special ring\n");
1850 raw_cons
= NEXT_RAW_CMP(raw_cons
);
1852 if (rx_pkts
== budget
)
1856 cpr
->cp_raw_cons
= raw_cons
;
1857 BNXT_CP_DB(cpr
->cp_doorbell
, cpr
->cp_raw_cons
);
1858 writel(DB_KEY_RX
| rxr
->rx_prod
, rxr
->rx_doorbell
);
1859 writel(DB_KEY_RX
| rxr
->rx_prod
, rxr
->rx_doorbell
);
1861 if (event
& BNXT_AGG_EVENT
) {
1862 writel(DB_KEY_RX
| rxr
->rx_agg_prod
, rxr
->rx_agg_doorbell
);
1863 writel(DB_KEY_RX
| rxr
->rx_agg_prod
, rxr
->rx_agg_doorbell
);
1866 if (!bnxt_has_work(bp
, cpr
) && rx_pkts
< budget
) {
1867 napi_complete_done(napi
, rx_pkts
);
1868 BNXT_CP_DB_REARM(cpr
->cp_doorbell
, cpr
->cp_raw_cons
);
1873 static int bnxt_poll(struct napi_struct
*napi
, int budget
)
1875 struct bnxt_napi
*bnapi
= container_of(napi
, struct bnxt_napi
, napi
);
1876 struct bnxt
*bp
= bnapi
->bp
;
1877 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
1881 work_done
+= bnxt_poll_work(bp
, bnapi
, budget
- work_done
);
1883 if (work_done
>= budget
)
1886 if (!bnxt_has_work(bp
, cpr
)) {
1887 if (napi_complete_done(napi
, work_done
))
1888 BNXT_CP_DB_REARM(cpr
->cp_doorbell
,
1897 static void bnxt_free_tx_skbs(struct bnxt
*bp
)
1900 struct pci_dev
*pdev
= bp
->pdev
;
1905 max_idx
= bp
->tx_nr_pages
* TX_DESC_CNT
;
1906 for (i
= 0; i
< bp
->tx_nr_rings
; i
++) {
1907 struct bnxt_tx_ring_info
*txr
= &bp
->tx_ring
[i
];
1910 for (j
= 0; j
< max_idx
;) {
1911 struct bnxt_sw_tx_bd
*tx_buf
= &txr
->tx_buf_ring
[j
];
1912 struct sk_buff
*skb
= tx_buf
->skb
;
1922 if (tx_buf
->is_push
) {
1928 dma_unmap_single(&pdev
->dev
,
1929 dma_unmap_addr(tx_buf
, mapping
),
1933 last
= tx_buf
->nr_frags
;
1935 for (k
= 0; k
< last
; k
++, j
++) {
1936 int ring_idx
= j
& bp
->tx_ring_mask
;
1937 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[k
];
1939 tx_buf
= &txr
->tx_buf_ring
[ring_idx
];
1942 dma_unmap_addr(tx_buf
, mapping
),
1943 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
1947 netdev_tx_reset_queue(netdev_get_tx_queue(bp
->dev
, i
));
1951 static void bnxt_free_rx_skbs(struct bnxt
*bp
)
1953 int i
, max_idx
, max_agg_idx
;
1954 struct pci_dev
*pdev
= bp
->pdev
;
1959 max_idx
= bp
->rx_nr_pages
* RX_DESC_CNT
;
1960 max_agg_idx
= bp
->rx_agg_nr_pages
* RX_DESC_CNT
;
1961 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
1962 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[i
];
1966 for (j
= 0; j
< MAX_TPA
; j
++) {
1967 struct bnxt_tpa_info
*tpa_info
=
1969 u8
*data
= tpa_info
->data
;
1974 dma_unmap_single(&pdev
->dev
, tpa_info
->mapping
,
1975 bp
->rx_buf_use_size
,
1978 tpa_info
->data
= NULL
;
1984 for (j
= 0; j
< max_idx
; j
++) {
1985 struct bnxt_sw_rx_bd
*rx_buf
= &rxr
->rx_buf_ring
[j
];
1986 void *data
= rx_buf
->data
;
1991 dma_unmap_single(&pdev
->dev
, rx_buf
->mapping
,
1992 bp
->rx_buf_use_size
, bp
->rx_dir
);
1994 rx_buf
->data
= NULL
;
1996 if (BNXT_RX_PAGE_MODE(bp
))
2002 for (j
= 0; j
< max_agg_idx
; j
++) {
2003 struct bnxt_sw_rx_agg_bd
*rx_agg_buf
=
2004 &rxr
->rx_agg_ring
[j
];
2005 struct page
*page
= rx_agg_buf
->page
;
2010 dma_unmap_page(&pdev
->dev
, rx_agg_buf
->mapping
,
2011 BNXT_RX_PAGE_SIZE
, PCI_DMA_FROMDEVICE
);
2013 rx_agg_buf
->page
= NULL
;
2014 __clear_bit(j
, rxr
->rx_agg_bmap
);
2019 __free_page(rxr
->rx_page
);
2020 rxr
->rx_page
= NULL
;
2025 static void bnxt_free_skbs(struct bnxt
*bp
)
2027 bnxt_free_tx_skbs(bp
);
2028 bnxt_free_rx_skbs(bp
);
2031 static void bnxt_free_ring(struct bnxt
*bp
, struct bnxt_ring_struct
*ring
)
2033 struct pci_dev
*pdev
= bp
->pdev
;
2036 for (i
= 0; i
< ring
->nr_pages
; i
++) {
2037 if (!ring
->pg_arr
[i
])
2040 dma_free_coherent(&pdev
->dev
, ring
->page_size
,
2041 ring
->pg_arr
[i
], ring
->dma_arr
[i
]);
2043 ring
->pg_arr
[i
] = NULL
;
2046 dma_free_coherent(&pdev
->dev
, ring
->nr_pages
* 8,
2047 ring
->pg_tbl
, ring
->pg_tbl_map
);
2048 ring
->pg_tbl
= NULL
;
2050 if (ring
->vmem_size
&& *ring
->vmem
) {
2056 static int bnxt_alloc_ring(struct bnxt
*bp
, struct bnxt_ring_struct
*ring
)
2059 struct pci_dev
*pdev
= bp
->pdev
;
2061 if (ring
->nr_pages
> 1) {
2062 ring
->pg_tbl
= dma_alloc_coherent(&pdev
->dev
,
2070 for (i
= 0; i
< ring
->nr_pages
; i
++) {
2071 ring
->pg_arr
[i
] = dma_alloc_coherent(&pdev
->dev
,
2075 if (!ring
->pg_arr
[i
])
2078 if (ring
->nr_pages
> 1)
2079 ring
->pg_tbl
[i
] = cpu_to_le64(ring
->dma_arr
[i
]);
2082 if (ring
->vmem_size
) {
2083 *ring
->vmem
= vzalloc(ring
->vmem_size
);
2090 static void bnxt_free_rx_rings(struct bnxt
*bp
)
2097 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
2098 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[i
];
2099 struct bnxt_ring_struct
*ring
;
2102 bpf_prog_put(rxr
->xdp_prog
);
2107 kfree(rxr
->rx_agg_bmap
);
2108 rxr
->rx_agg_bmap
= NULL
;
2110 ring
= &rxr
->rx_ring_struct
;
2111 bnxt_free_ring(bp
, ring
);
2113 ring
= &rxr
->rx_agg_ring_struct
;
2114 bnxt_free_ring(bp
, ring
);
2118 static int bnxt_alloc_rx_rings(struct bnxt
*bp
)
2120 int i
, rc
, agg_rings
= 0, tpa_rings
= 0;
2125 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
2128 if (bp
->flags
& BNXT_FLAG_TPA
)
2131 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
2132 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[i
];
2133 struct bnxt_ring_struct
*ring
;
2135 ring
= &rxr
->rx_ring_struct
;
2137 rc
= bnxt_alloc_ring(bp
, ring
);
2144 ring
= &rxr
->rx_agg_ring_struct
;
2145 rc
= bnxt_alloc_ring(bp
, ring
);
2149 rxr
->rx_agg_bmap_size
= bp
->rx_agg_ring_mask
+ 1;
2150 mem_size
= rxr
->rx_agg_bmap_size
/ 8;
2151 rxr
->rx_agg_bmap
= kzalloc(mem_size
, GFP_KERNEL
);
2152 if (!rxr
->rx_agg_bmap
)
2156 rxr
->rx_tpa
= kcalloc(MAX_TPA
,
2157 sizeof(struct bnxt_tpa_info
),
2167 static void bnxt_free_tx_rings(struct bnxt
*bp
)
2170 struct pci_dev
*pdev
= bp
->pdev
;
2175 for (i
= 0; i
< bp
->tx_nr_rings
; i
++) {
2176 struct bnxt_tx_ring_info
*txr
= &bp
->tx_ring
[i
];
2177 struct bnxt_ring_struct
*ring
;
2180 dma_free_coherent(&pdev
->dev
, bp
->tx_push_size
,
2181 txr
->tx_push
, txr
->tx_push_mapping
);
2182 txr
->tx_push
= NULL
;
2185 ring
= &txr
->tx_ring_struct
;
2187 bnxt_free_ring(bp
, ring
);
2191 static int bnxt_alloc_tx_rings(struct bnxt
*bp
)
2194 struct pci_dev
*pdev
= bp
->pdev
;
2196 bp
->tx_push_size
= 0;
2197 if (bp
->tx_push_thresh
) {
2200 push_size
= L1_CACHE_ALIGN(sizeof(struct tx_push_bd
) +
2201 bp
->tx_push_thresh
);
2203 if (push_size
> 256) {
2205 bp
->tx_push_thresh
= 0;
2208 bp
->tx_push_size
= push_size
;
2211 for (i
= 0, j
= 0; i
< bp
->tx_nr_rings
; i
++) {
2212 struct bnxt_tx_ring_info
*txr
= &bp
->tx_ring
[i
];
2213 struct bnxt_ring_struct
*ring
;
2215 ring
= &txr
->tx_ring_struct
;
2217 rc
= bnxt_alloc_ring(bp
, ring
);
2221 if (bp
->tx_push_size
) {
2224 /* One pre-allocated DMA buffer to backup
2227 txr
->tx_push
= dma_alloc_coherent(&pdev
->dev
,
2229 &txr
->tx_push_mapping
,
2235 mapping
= txr
->tx_push_mapping
+
2236 sizeof(struct tx_push_bd
);
2237 txr
->data_mapping
= cpu_to_le64(mapping
);
2239 memset(txr
->tx_push
, 0, sizeof(struct tx_push_bd
));
2241 ring
->queue_id
= bp
->q_info
[j
].queue_id
;
2242 if (i
< bp
->tx_nr_rings_xdp
)
2244 if (i
% bp
->tx_nr_rings_per_tc
== (bp
->tx_nr_rings_per_tc
- 1))
2250 static void bnxt_free_cp_rings(struct bnxt
*bp
)
2257 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
2258 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
2259 struct bnxt_cp_ring_info
*cpr
;
2260 struct bnxt_ring_struct
*ring
;
2265 cpr
= &bnapi
->cp_ring
;
2266 ring
= &cpr
->cp_ring_struct
;
2268 bnxt_free_ring(bp
, ring
);
2272 static int bnxt_alloc_cp_rings(struct bnxt
*bp
)
2276 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
2277 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
2278 struct bnxt_cp_ring_info
*cpr
;
2279 struct bnxt_ring_struct
*ring
;
2284 cpr
= &bnapi
->cp_ring
;
2285 ring
= &cpr
->cp_ring_struct
;
2287 rc
= bnxt_alloc_ring(bp
, ring
);
2294 static void bnxt_init_ring_struct(struct bnxt
*bp
)
2298 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
2299 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
2300 struct bnxt_cp_ring_info
*cpr
;
2301 struct bnxt_rx_ring_info
*rxr
;
2302 struct bnxt_tx_ring_info
*txr
;
2303 struct bnxt_ring_struct
*ring
;
2308 cpr
= &bnapi
->cp_ring
;
2309 ring
= &cpr
->cp_ring_struct
;
2310 ring
->nr_pages
= bp
->cp_nr_pages
;
2311 ring
->page_size
= HW_CMPD_RING_SIZE
;
2312 ring
->pg_arr
= (void **)cpr
->cp_desc_ring
;
2313 ring
->dma_arr
= cpr
->cp_desc_mapping
;
2314 ring
->vmem_size
= 0;
2316 rxr
= bnapi
->rx_ring
;
2320 ring
= &rxr
->rx_ring_struct
;
2321 ring
->nr_pages
= bp
->rx_nr_pages
;
2322 ring
->page_size
= HW_RXBD_RING_SIZE
;
2323 ring
->pg_arr
= (void **)rxr
->rx_desc_ring
;
2324 ring
->dma_arr
= rxr
->rx_desc_mapping
;
2325 ring
->vmem_size
= SW_RXBD_RING_SIZE
* bp
->rx_nr_pages
;
2326 ring
->vmem
= (void **)&rxr
->rx_buf_ring
;
2328 ring
= &rxr
->rx_agg_ring_struct
;
2329 ring
->nr_pages
= bp
->rx_agg_nr_pages
;
2330 ring
->page_size
= HW_RXBD_RING_SIZE
;
2331 ring
->pg_arr
= (void **)rxr
->rx_agg_desc_ring
;
2332 ring
->dma_arr
= rxr
->rx_agg_desc_mapping
;
2333 ring
->vmem_size
= SW_RXBD_AGG_RING_SIZE
* bp
->rx_agg_nr_pages
;
2334 ring
->vmem
= (void **)&rxr
->rx_agg_ring
;
2337 txr
= bnapi
->tx_ring
;
2341 ring
= &txr
->tx_ring_struct
;
2342 ring
->nr_pages
= bp
->tx_nr_pages
;
2343 ring
->page_size
= HW_RXBD_RING_SIZE
;
2344 ring
->pg_arr
= (void **)txr
->tx_desc_ring
;
2345 ring
->dma_arr
= txr
->tx_desc_mapping
;
2346 ring
->vmem_size
= SW_TXBD_RING_SIZE
* bp
->tx_nr_pages
;
2347 ring
->vmem
= (void **)&txr
->tx_buf_ring
;
2351 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct
*ring
, u32 type
)
2355 struct rx_bd
**rx_buf_ring
;
2357 rx_buf_ring
= (struct rx_bd
**)ring
->pg_arr
;
2358 for (i
= 0, prod
= 0; i
< ring
->nr_pages
; i
++) {
2362 rxbd
= rx_buf_ring
[i
];
2366 for (j
= 0; j
< RX_DESC_CNT
; j
++, rxbd
++, prod
++) {
2367 rxbd
->rx_bd_len_flags_type
= cpu_to_le32(type
);
2368 rxbd
->rx_bd_opaque
= prod
;
2373 static int bnxt_init_one_rx_ring(struct bnxt
*bp
, int ring_nr
)
2375 struct net_device
*dev
= bp
->dev
;
2376 struct bnxt_rx_ring_info
*rxr
;
2377 struct bnxt_ring_struct
*ring
;
2381 type
= (bp
->rx_buf_use_size
<< RX_BD_LEN_SHIFT
) |
2382 RX_BD_TYPE_RX_PACKET_BD
| RX_BD_FLAGS_EOP
;
2384 if (NET_IP_ALIGN
== 2)
2385 type
|= RX_BD_FLAGS_SOP
;
2387 rxr
= &bp
->rx_ring
[ring_nr
];
2388 ring
= &rxr
->rx_ring_struct
;
2389 bnxt_init_rxbd_pages(ring
, type
);
2391 if (BNXT_RX_PAGE_MODE(bp
) && bp
->xdp_prog
) {
2392 rxr
->xdp_prog
= bpf_prog_add(bp
->xdp_prog
, 1);
2393 if (IS_ERR(rxr
->xdp_prog
)) {
2394 int rc
= PTR_ERR(rxr
->xdp_prog
);
2396 rxr
->xdp_prog
= NULL
;
2400 prod
= rxr
->rx_prod
;
2401 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
2402 if (bnxt_alloc_rx_data(bp
, rxr
, prod
, GFP_KERNEL
) != 0) {
2403 netdev_warn(dev
, "init'ed rx ring %d with %d/%d skbs only\n",
2404 ring_nr
, i
, bp
->rx_ring_size
);
2407 prod
= NEXT_RX(prod
);
2409 rxr
->rx_prod
= prod
;
2410 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
2412 ring
= &rxr
->rx_agg_ring_struct
;
2413 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
2415 if (!(bp
->flags
& BNXT_FLAG_AGG_RINGS
))
2418 type
= ((u32
)BNXT_RX_PAGE_SIZE
<< RX_BD_LEN_SHIFT
) |
2419 RX_BD_TYPE_RX_AGG_BD
| RX_BD_FLAGS_SOP
;
2421 bnxt_init_rxbd_pages(ring
, type
);
2423 prod
= rxr
->rx_agg_prod
;
2424 for (i
= 0; i
< bp
->rx_agg_ring_size
; i
++) {
2425 if (bnxt_alloc_rx_page(bp
, rxr
, prod
, GFP_KERNEL
) != 0) {
2426 netdev_warn(dev
, "init'ed rx ring %d with %d/%d pages only\n",
2427 ring_nr
, i
, bp
->rx_ring_size
);
2430 prod
= NEXT_RX_AGG(prod
);
2432 rxr
->rx_agg_prod
= prod
;
2434 if (bp
->flags
& BNXT_FLAG_TPA
) {
2439 for (i
= 0; i
< MAX_TPA
; i
++) {
2440 data
= __bnxt_alloc_rx_data(bp
, &mapping
,
2445 rxr
->rx_tpa
[i
].data
= data
;
2446 rxr
->rx_tpa
[i
].data_ptr
= data
+ bp
->rx_offset
;
2447 rxr
->rx_tpa
[i
].mapping
= mapping
;
2450 netdev_err(bp
->dev
, "No resource allocated for LRO/GRO\n");
2458 static int bnxt_init_rx_rings(struct bnxt
*bp
)
2462 if (BNXT_RX_PAGE_MODE(bp
)) {
2463 bp
->rx_offset
= NET_IP_ALIGN
+ XDP_PACKET_HEADROOM
;
2464 bp
->rx_dma_offset
= XDP_PACKET_HEADROOM
;
2466 bp
->rx_offset
= BNXT_RX_OFFSET
;
2467 bp
->rx_dma_offset
= BNXT_RX_DMA_OFFSET
;
2470 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
2471 rc
= bnxt_init_one_rx_ring(bp
, i
);
2479 static int bnxt_init_tx_rings(struct bnxt
*bp
)
2483 bp
->tx_wake_thresh
= max_t(int, bp
->tx_ring_size
/ 2,
2486 for (i
= 0; i
< bp
->tx_nr_rings
; i
++) {
2487 struct bnxt_tx_ring_info
*txr
= &bp
->tx_ring
[i
];
2488 struct bnxt_ring_struct
*ring
= &txr
->tx_ring_struct
;
2490 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
2496 static void bnxt_free_ring_grps(struct bnxt
*bp
)
2498 kfree(bp
->grp_info
);
2499 bp
->grp_info
= NULL
;
2502 static int bnxt_init_ring_grps(struct bnxt
*bp
, bool irq_re_init
)
2507 bp
->grp_info
= kcalloc(bp
->cp_nr_rings
,
2508 sizeof(struct bnxt_ring_grp_info
),
2513 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
2515 bp
->grp_info
[i
].fw_stats_ctx
= INVALID_HW_RING_ID
;
2516 bp
->grp_info
[i
].fw_grp_id
= INVALID_HW_RING_ID
;
2517 bp
->grp_info
[i
].rx_fw_ring_id
= INVALID_HW_RING_ID
;
2518 bp
->grp_info
[i
].agg_fw_ring_id
= INVALID_HW_RING_ID
;
2519 bp
->grp_info
[i
].cp_fw_ring_id
= INVALID_HW_RING_ID
;
2524 static void bnxt_free_vnics(struct bnxt
*bp
)
2526 kfree(bp
->vnic_info
);
2527 bp
->vnic_info
= NULL
;
2531 static int bnxt_alloc_vnics(struct bnxt
*bp
)
2535 #ifdef CONFIG_RFS_ACCEL
2536 if (bp
->flags
& BNXT_FLAG_RFS
)
2537 num_vnics
+= bp
->rx_nr_rings
;
2540 if (BNXT_CHIP_TYPE_NITRO_A0(bp
))
2543 bp
->vnic_info
= kcalloc(num_vnics
, sizeof(struct bnxt_vnic_info
),
2548 bp
->nr_vnics
= num_vnics
;
2552 static void bnxt_init_vnics(struct bnxt
*bp
)
2556 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
2557 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[i
];
2559 vnic
->fw_vnic_id
= INVALID_HW_RING_ID
;
2560 vnic
->fw_rss_cos_lb_ctx
[0] = INVALID_HW_RING_ID
;
2561 vnic
->fw_rss_cos_lb_ctx
[1] = INVALID_HW_RING_ID
;
2562 vnic
->fw_l2_ctx_id
= INVALID_HW_RING_ID
;
2564 if (bp
->vnic_info
[i
].rss_hash_key
) {
2566 prandom_bytes(vnic
->rss_hash_key
,
2569 memcpy(vnic
->rss_hash_key
,
2570 bp
->vnic_info
[0].rss_hash_key
,
2576 static int bnxt_calc_nr_ring_pages(u32 ring_size
, int desc_per_pg
)
2580 pages
= ring_size
/ desc_per_pg
;
2587 while (pages
& (pages
- 1))
2593 void bnxt_set_tpa_flags(struct bnxt
*bp
)
2595 bp
->flags
&= ~BNXT_FLAG_TPA
;
2596 if (bp
->flags
& BNXT_FLAG_NO_AGG_RINGS
)
2598 if (bp
->dev
->features
& NETIF_F_LRO
)
2599 bp
->flags
|= BNXT_FLAG_LRO
;
2600 if (bp
->dev
->features
& NETIF_F_GRO
)
2601 bp
->flags
|= BNXT_FLAG_GRO
;
2604 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
2607 void bnxt_set_ring_params(struct bnxt
*bp
)
2609 u32 ring_size
, rx_size
, rx_space
;
2610 u32 agg_factor
= 0, agg_ring_size
= 0;
2612 /* 8 for CRC and VLAN */
2613 rx_size
= SKB_DATA_ALIGN(bp
->dev
->mtu
+ ETH_HLEN
+ NET_IP_ALIGN
+ 8);
2615 rx_space
= rx_size
+ NET_SKB_PAD
+
2616 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
2618 bp
->rx_copy_thresh
= BNXT_RX_COPY_THRESH
;
2619 ring_size
= bp
->rx_ring_size
;
2620 bp
->rx_agg_ring_size
= 0;
2621 bp
->rx_agg_nr_pages
= 0;
2623 if (bp
->flags
& BNXT_FLAG_TPA
)
2624 agg_factor
= min_t(u32
, 4, 65536 / BNXT_RX_PAGE_SIZE
);
2626 bp
->flags
&= ~BNXT_FLAG_JUMBO
;
2627 if (rx_space
> PAGE_SIZE
&& !(bp
->flags
& BNXT_FLAG_NO_AGG_RINGS
)) {
2630 bp
->flags
|= BNXT_FLAG_JUMBO
;
2631 jumbo_factor
= PAGE_ALIGN(bp
->dev
->mtu
- 40) >> PAGE_SHIFT
;
2632 if (jumbo_factor
> agg_factor
)
2633 agg_factor
= jumbo_factor
;
2635 agg_ring_size
= ring_size
* agg_factor
;
2637 if (agg_ring_size
) {
2638 bp
->rx_agg_nr_pages
= bnxt_calc_nr_ring_pages(agg_ring_size
,
2640 if (bp
->rx_agg_nr_pages
> MAX_RX_AGG_PAGES
) {
2641 u32 tmp
= agg_ring_size
;
2643 bp
->rx_agg_nr_pages
= MAX_RX_AGG_PAGES
;
2644 agg_ring_size
= MAX_RX_AGG_PAGES
* RX_DESC_CNT
- 1;
2645 netdev_warn(bp
->dev
, "rx agg ring size %d reduced to %d.\n",
2646 tmp
, agg_ring_size
);
2648 bp
->rx_agg_ring_size
= agg_ring_size
;
2649 bp
->rx_agg_ring_mask
= (bp
->rx_agg_nr_pages
* RX_DESC_CNT
) - 1;
2650 rx_size
= SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH
+ NET_IP_ALIGN
);
2651 rx_space
= rx_size
+ NET_SKB_PAD
+
2652 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
2655 bp
->rx_buf_use_size
= rx_size
;
2656 bp
->rx_buf_size
= rx_space
;
2658 bp
->rx_nr_pages
= bnxt_calc_nr_ring_pages(ring_size
, RX_DESC_CNT
);
2659 bp
->rx_ring_mask
= (bp
->rx_nr_pages
* RX_DESC_CNT
) - 1;
2661 ring_size
= bp
->tx_ring_size
;
2662 bp
->tx_nr_pages
= bnxt_calc_nr_ring_pages(ring_size
, TX_DESC_CNT
);
2663 bp
->tx_ring_mask
= (bp
->tx_nr_pages
* TX_DESC_CNT
) - 1;
2665 ring_size
= bp
->rx_ring_size
* (2 + agg_factor
) + bp
->tx_ring_size
;
2666 bp
->cp_ring_size
= ring_size
;
2668 bp
->cp_nr_pages
= bnxt_calc_nr_ring_pages(ring_size
, CP_DESC_CNT
);
2669 if (bp
->cp_nr_pages
> MAX_CP_PAGES
) {
2670 bp
->cp_nr_pages
= MAX_CP_PAGES
;
2671 bp
->cp_ring_size
= MAX_CP_PAGES
* CP_DESC_CNT
- 1;
2672 netdev_warn(bp
->dev
, "completion ring size %d reduced to %d.\n",
2673 ring_size
, bp
->cp_ring_size
);
2675 bp
->cp_bit
= bp
->cp_nr_pages
* CP_DESC_CNT
;
2676 bp
->cp_ring_mask
= bp
->cp_bit
- 1;
2679 int bnxt_set_rx_skb_mode(struct bnxt
*bp
, bool page_mode
)
2682 if (bp
->dev
->mtu
> BNXT_MAX_PAGE_MODE_MTU
)
2684 bp
->dev
->max_mtu
= BNXT_MAX_PAGE_MODE_MTU
;
2685 bp
->flags
&= ~BNXT_FLAG_AGG_RINGS
;
2686 bp
->flags
|= BNXT_FLAG_NO_AGG_RINGS
| BNXT_FLAG_RX_PAGE_MODE
;
2687 bp
->dev
->hw_features
&= ~NETIF_F_LRO
;
2688 bp
->dev
->features
&= ~NETIF_F_LRO
;
2689 bp
->rx_dir
= DMA_BIDIRECTIONAL
;
2690 bp
->rx_skb_func
= bnxt_rx_page_skb
;
2692 bp
->dev
->max_mtu
= BNXT_MAX_MTU
;
2693 bp
->flags
&= ~BNXT_FLAG_RX_PAGE_MODE
;
2694 bp
->rx_dir
= DMA_FROM_DEVICE
;
2695 bp
->rx_skb_func
= bnxt_rx_skb
;
2700 static void bnxt_free_vnic_attributes(struct bnxt
*bp
)
2703 struct bnxt_vnic_info
*vnic
;
2704 struct pci_dev
*pdev
= bp
->pdev
;
2709 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
2710 vnic
= &bp
->vnic_info
[i
];
2712 kfree(vnic
->fw_grp_ids
);
2713 vnic
->fw_grp_ids
= NULL
;
2715 kfree(vnic
->uc_list
);
2716 vnic
->uc_list
= NULL
;
2718 if (vnic
->mc_list
) {
2719 dma_free_coherent(&pdev
->dev
, vnic
->mc_list_size
,
2720 vnic
->mc_list
, vnic
->mc_list_mapping
);
2721 vnic
->mc_list
= NULL
;
2724 if (vnic
->rss_table
) {
2725 dma_free_coherent(&pdev
->dev
, PAGE_SIZE
,
2727 vnic
->rss_table_dma_addr
);
2728 vnic
->rss_table
= NULL
;
2731 vnic
->rss_hash_key
= NULL
;
2736 static int bnxt_alloc_vnic_attributes(struct bnxt
*bp
)
2738 int i
, rc
= 0, size
;
2739 struct bnxt_vnic_info
*vnic
;
2740 struct pci_dev
*pdev
= bp
->pdev
;
2743 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
2744 vnic
= &bp
->vnic_info
[i
];
2746 if (vnic
->flags
& BNXT_VNIC_UCAST_FLAG
) {
2747 int mem_size
= (BNXT_MAX_UC_ADDRS
- 1) * ETH_ALEN
;
2750 vnic
->uc_list
= kmalloc(mem_size
, GFP_KERNEL
);
2751 if (!vnic
->uc_list
) {
2758 if (vnic
->flags
& BNXT_VNIC_MCAST_FLAG
) {
2759 vnic
->mc_list_size
= BNXT_MAX_MC_ADDRS
* ETH_ALEN
;
2761 dma_alloc_coherent(&pdev
->dev
,
2763 &vnic
->mc_list_mapping
,
2765 if (!vnic
->mc_list
) {
2771 if (vnic
->flags
& BNXT_VNIC_RSS_FLAG
)
2772 max_rings
= bp
->rx_nr_rings
;
2776 vnic
->fw_grp_ids
= kcalloc(max_rings
, sizeof(u16
), GFP_KERNEL
);
2777 if (!vnic
->fw_grp_ids
) {
2782 if ((bp
->flags
& BNXT_FLAG_NEW_RSS_CAP
) &&
2783 !(vnic
->flags
& BNXT_VNIC_RSS_FLAG
))
2786 /* Allocate rss table and hash key */
2787 vnic
->rss_table
= dma_alloc_coherent(&pdev
->dev
, PAGE_SIZE
,
2788 &vnic
->rss_table_dma_addr
,
2790 if (!vnic
->rss_table
) {
2795 size
= L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE
* sizeof(u16
));
2797 vnic
->rss_hash_key
= ((void *)vnic
->rss_table
) + size
;
2798 vnic
->rss_hash_key_dma_addr
= vnic
->rss_table_dma_addr
+ size
;
2806 static void bnxt_free_hwrm_resources(struct bnxt
*bp
)
2808 struct pci_dev
*pdev
= bp
->pdev
;
2810 dma_free_coherent(&pdev
->dev
, PAGE_SIZE
, bp
->hwrm_cmd_resp_addr
,
2811 bp
->hwrm_cmd_resp_dma_addr
);
2813 bp
->hwrm_cmd_resp_addr
= NULL
;
2814 if (bp
->hwrm_dbg_resp_addr
) {
2815 dma_free_coherent(&pdev
->dev
, HWRM_DBG_REG_BUF_SIZE
,
2816 bp
->hwrm_dbg_resp_addr
,
2817 bp
->hwrm_dbg_resp_dma_addr
);
2819 bp
->hwrm_dbg_resp_addr
= NULL
;
2823 static int bnxt_alloc_hwrm_resources(struct bnxt
*bp
)
2825 struct pci_dev
*pdev
= bp
->pdev
;
2827 bp
->hwrm_cmd_resp_addr
= dma_alloc_coherent(&pdev
->dev
, PAGE_SIZE
,
2828 &bp
->hwrm_cmd_resp_dma_addr
,
2830 if (!bp
->hwrm_cmd_resp_addr
)
2832 bp
->hwrm_dbg_resp_addr
= dma_alloc_coherent(&pdev
->dev
,
2833 HWRM_DBG_REG_BUF_SIZE
,
2834 &bp
->hwrm_dbg_resp_dma_addr
,
2836 if (!bp
->hwrm_dbg_resp_addr
)
2837 netdev_warn(bp
->dev
, "fail to alloc debug register dma mem\n");
2842 static void bnxt_free_stats(struct bnxt
*bp
)
2845 struct pci_dev
*pdev
= bp
->pdev
;
2847 if (bp
->hw_rx_port_stats
) {
2848 dma_free_coherent(&pdev
->dev
, bp
->hw_port_stats_size
,
2849 bp
->hw_rx_port_stats
,
2850 bp
->hw_rx_port_stats_map
);
2851 bp
->hw_rx_port_stats
= NULL
;
2852 bp
->flags
&= ~BNXT_FLAG_PORT_STATS
;
2858 size
= sizeof(struct ctx_hw_stats
);
2860 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
2861 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
2862 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
2864 if (cpr
->hw_stats
) {
2865 dma_free_coherent(&pdev
->dev
, size
, cpr
->hw_stats
,
2867 cpr
->hw_stats
= NULL
;
2872 static int bnxt_alloc_stats(struct bnxt
*bp
)
2875 struct pci_dev
*pdev
= bp
->pdev
;
2877 size
= sizeof(struct ctx_hw_stats
);
2879 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
2880 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
2881 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
2883 cpr
->hw_stats
= dma_alloc_coherent(&pdev
->dev
, size
,
2889 cpr
->hw_stats_ctx_id
= INVALID_STATS_CTX_ID
;
2892 if (BNXT_PF(bp
) && bp
->chip_num
!= CHIP_NUM_58700
) {
2893 bp
->hw_port_stats_size
= sizeof(struct rx_port_stats
) +
2894 sizeof(struct tx_port_stats
) + 1024;
2896 bp
->hw_rx_port_stats
=
2897 dma_alloc_coherent(&pdev
->dev
, bp
->hw_port_stats_size
,
2898 &bp
->hw_rx_port_stats_map
,
2900 if (!bp
->hw_rx_port_stats
)
2903 bp
->hw_tx_port_stats
= (void *)(bp
->hw_rx_port_stats
+ 1) +
2905 bp
->hw_tx_port_stats_map
= bp
->hw_rx_port_stats_map
+
2906 sizeof(struct rx_port_stats
) + 512;
2907 bp
->flags
|= BNXT_FLAG_PORT_STATS
;
2912 static void bnxt_clear_ring_indices(struct bnxt
*bp
)
2919 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
2920 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
2921 struct bnxt_cp_ring_info
*cpr
;
2922 struct bnxt_rx_ring_info
*rxr
;
2923 struct bnxt_tx_ring_info
*txr
;
2928 cpr
= &bnapi
->cp_ring
;
2929 cpr
->cp_raw_cons
= 0;
2931 txr
= bnapi
->tx_ring
;
2937 rxr
= bnapi
->rx_ring
;
2940 rxr
->rx_agg_prod
= 0;
2941 rxr
->rx_sw_agg_prod
= 0;
2942 rxr
->rx_next_cons
= 0;
2947 static void bnxt_free_ntp_fltrs(struct bnxt
*bp
, bool irq_reinit
)
2949 #ifdef CONFIG_RFS_ACCEL
2952 /* Under rtnl_lock and all our NAPIs have been disabled. It's
2953 * safe to delete the hash table.
2955 for (i
= 0; i
< BNXT_NTP_FLTR_HASH_SIZE
; i
++) {
2956 struct hlist_head
*head
;
2957 struct hlist_node
*tmp
;
2958 struct bnxt_ntuple_filter
*fltr
;
2960 head
= &bp
->ntp_fltr_hash_tbl
[i
];
2961 hlist_for_each_entry_safe(fltr
, tmp
, head
, hash
) {
2962 hlist_del(&fltr
->hash
);
2967 kfree(bp
->ntp_fltr_bmap
);
2968 bp
->ntp_fltr_bmap
= NULL
;
2970 bp
->ntp_fltr_count
= 0;
2974 static int bnxt_alloc_ntp_fltrs(struct bnxt
*bp
)
2976 #ifdef CONFIG_RFS_ACCEL
2979 if (!(bp
->flags
& BNXT_FLAG_RFS
))
2982 for (i
= 0; i
< BNXT_NTP_FLTR_HASH_SIZE
; i
++)
2983 INIT_HLIST_HEAD(&bp
->ntp_fltr_hash_tbl
[i
]);
2985 bp
->ntp_fltr_count
= 0;
2986 bp
->ntp_fltr_bmap
= kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR
),
2989 if (!bp
->ntp_fltr_bmap
)
2998 static void bnxt_free_mem(struct bnxt
*bp
, bool irq_re_init
)
3000 bnxt_free_vnic_attributes(bp
);
3001 bnxt_free_tx_rings(bp
);
3002 bnxt_free_rx_rings(bp
);
3003 bnxt_free_cp_rings(bp
);
3004 bnxt_free_ntp_fltrs(bp
, irq_re_init
);
3006 bnxt_free_stats(bp
);
3007 bnxt_free_ring_grps(bp
);
3008 bnxt_free_vnics(bp
);
3009 kfree(bp
->tx_ring_map
);
3010 bp
->tx_ring_map
= NULL
;
3018 bnxt_clear_ring_indices(bp
);
3022 static int bnxt_alloc_mem(struct bnxt
*bp
, bool irq_re_init
)
3024 int i
, j
, rc
, size
, arr_size
;
3028 /* Allocate bnapi mem pointer array and mem block for
3031 arr_size
= L1_CACHE_ALIGN(sizeof(struct bnxt_napi
*) *
3033 size
= L1_CACHE_ALIGN(sizeof(struct bnxt_napi
));
3034 bnapi
= kzalloc(arr_size
+ size
* bp
->cp_nr_rings
, GFP_KERNEL
);
3040 for (i
= 0; i
< bp
->cp_nr_rings
; i
++, bnapi
+= size
) {
3041 bp
->bnapi
[i
] = bnapi
;
3042 bp
->bnapi
[i
]->index
= i
;
3043 bp
->bnapi
[i
]->bp
= bp
;
3046 bp
->rx_ring
= kcalloc(bp
->rx_nr_rings
,
3047 sizeof(struct bnxt_rx_ring_info
),
3052 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
3053 bp
->rx_ring
[i
].bnapi
= bp
->bnapi
[i
];
3054 bp
->bnapi
[i
]->rx_ring
= &bp
->rx_ring
[i
];
3057 bp
->tx_ring
= kcalloc(bp
->tx_nr_rings
,
3058 sizeof(struct bnxt_tx_ring_info
),
3063 bp
->tx_ring_map
= kcalloc(bp
->tx_nr_rings
, sizeof(u16
),
3066 if (!bp
->tx_ring_map
)
3069 if (bp
->flags
& BNXT_FLAG_SHARED_RINGS
)
3072 j
= bp
->rx_nr_rings
;
3074 for (i
= 0; i
< bp
->tx_nr_rings
; i
++, j
++) {
3075 bp
->tx_ring
[i
].bnapi
= bp
->bnapi
[j
];
3076 bp
->bnapi
[j
]->tx_ring
= &bp
->tx_ring
[i
];
3077 bp
->tx_ring_map
[i
] = bp
->tx_nr_rings_xdp
+ i
;
3078 if (i
>= bp
->tx_nr_rings_xdp
) {
3079 bp
->tx_ring
[i
].txq_index
= i
-
3080 bp
->tx_nr_rings_xdp
;
3081 bp
->bnapi
[j
]->tx_int
= bnxt_tx_int
;
3083 bp
->bnapi
[j
]->flags
|= BNXT_NAPI_FLAG_XDP
;
3084 bp
->bnapi
[j
]->tx_int
= bnxt_tx_int_xdp
;
3088 rc
= bnxt_alloc_stats(bp
);
3092 rc
= bnxt_alloc_ntp_fltrs(bp
);
3096 rc
= bnxt_alloc_vnics(bp
);
3101 bnxt_init_ring_struct(bp
);
3103 rc
= bnxt_alloc_rx_rings(bp
);
3107 rc
= bnxt_alloc_tx_rings(bp
);
3111 rc
= bnxt_alloc_cp_rings(bp
);
3115 bp
->vnic_info
[0].flags
|= BNXT_VNIC_RSS_FLAG
| BNXT_VNIC_MCAST_FLAG
|
3116 BNXT_VNIC_UCAST_FLAG
;
3117 rc
= bnxt_alloc_vnic_attributes(bp
);
3123 bnxt_free_mem(bp
, true);
3127 static void bnxt_disable_int(struct bnxt
*bp
)
3134 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
3135 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
3136 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
3137 struct bnxt_ring_struct
*ring
= &cpr
->cp_ring_struct
;
3139 if (ring
->fw_ring_id
!= INVALID_HW_RING_ID
)
3140 BNXT_CP_DB(cpr
->cp_doorbell
, cpr
->cp_raw_cons
);
3144 static void bnxt_disable_int_sync(struct bnxt
*bp
)
3148 atomic_inc(&bp
->intr_sem
);
3150 bnxt_disable_int(bp
);
3151 for (i
= 0; i
< bp
->cp_nr_rings
; i
++)
3152 synchronize_irq(bp
->irq_tbl
[i
].vector
);
3155 static void bnxt_enable_int(struct bnxt
*bp
)
3159 atomic_set(&bp
->intr_sem
, 0);
3160 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
3161 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
3162 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
3164 BNXT_CP_DB_REARM(cpr
->cp_doorbell
, cpr
->cp_raw_cons
);
3168 void bnxt_hwrm_cmd_hdr_init(struct bnxt
*bp
, void *request
, u16 req_type
,
3169 u16 cmpl_ring
, u16 target_id
)
3171 struct input
*req
= request
;
3173 req
->req_type
= cpu_to_le16(req_type
);
3174 req
->cmpl_ring
= cpu_to_le16(cmpl_ring
);
3175 req
->target_id
= cpu_to_le16(target_id
);
3176 req
->resp_addr
= cpu_to_le64(bp
->hwrm_cmd_resp_dma_addr
);
3179 static int bnxt_hwrm_do_send_msg(struct bnxt
*bp
, void *msg
, u32 msg_len
,
3180 int timeout
, bool silent
)
3182 int i
, intr_process
, rc
, tmo_count
;
3183 struct input
*req
= msg
;
3185 __le32
*resp_len
, *valid
;
3186 u16 cp_ring_id
, len
= 0;
3187 struct hwrm_err_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3189 req
->seq_id
= cpu_to_le16(bp
->hwrm_cmd_seq
++);
3190 memset(resp
, 0, PAGE_SIZE
);
3191 cp_ring_id
= le16_to_cpu(req
->cmpl_ring
);
3192 intr_process
= (cp_ring_id
== INVALID_HW_RING_ID
) ? 0 : 1;
3194 /* Write request msg to hwrm channel */
3195 __iowrite32_copy(bp
->bar0
, data
, msg_len
/ 4);
3197 for (i
= msg_len
; i
< BNXT_HWRM_MAX_REQ_LEN
; i
+= 4)
3198 writel(0, bp
->bar0
+ i
);
3200 /* currently supports only one outstanding message */
3202 bp
->hwrm_intr_seq_id
= le16_to_cpu(req
->seq_id
);
3204 /* Ring channel doorbell */
3205 writel(1, bp
->bar0
+ 0x100);
3208 timeout
= DFLT_HWRM_CMD_TIMEOUT
;
3211 tmo_count
= timeout
* 40;
3213 /* Wait until hwrm response cmpl interrupt is processed */
3214 while (bp
->hwrm_intr_seq_id
!= HWRM_SEQ_ID_INVALID
&&
3216 usleep_range(25, 40);
3219 if (bp
->hwrm_intr_seq_id
!= HWRM_SEQ_ID_INVALID
) {
3220 netdev_err(bp
->dev
, "Resp cmpl intr err msg: 0x%x\n",
3221 le16_to_cpu(req
->req_type
));
3225 /* Check if response len is updated */
3226 resp_len
= bp
->hwrm_cmd_resp_addr
+ HWRM_RESP_LEN_OFFSET
;
3227 for (i
= 0; i
< tmo_count
; i
++) {
3228 len
= (le32_to_cpu(*resp_len
) & HWRM_RESP_LEN_MASK
) >>
3232 usleep_range(25, 40);
3235 if (i
>= tmo_count
) {
3236 netdev_err(bp
->dev
, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
3237 timeout
, le16_to_cpu(req
->req_type
),
3238 le16_to_cpu(req
->seq_id
), len
);
3242 /* Last word of resp contains valid bit */
3243 valid
= bp
->hwrm_cmd_resp_addr
+ len
- 4;
3244 for (i
= 0; i
< 5; i
++) {
3245 if (le32_to_cpu(*valid
) & HWRM_RESP_VALID_MASK
)
3251 netdev_err(bp
->dev
, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
3252 timeout
, le16_to_cpu(req
->req_type
),
3253 le16_to_cpu(req
->seq_id
), len
, *valid
);
3258 rc
= le16_to_cpu(resp
->error_code
);
3260 netdev_err(bp
->dev
, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
3261 le16_to_cpu(resp
->req_type
),
3262 le16_to_cpu(resp
->seq_id
), rc
);
3266 int _hwrm_send_message(struct bnxt
*bp
, void *msg
, u32 msg_len
, int timeout
)
3268 return bnxt_hwrm_do_send_msg(bp
, msg
, msg_len
, timeout
, false);
3271 int hwrm_send_message(struct bnxt
*bp
, void *msg
, u32 msg_len
, int timeout
)
3275 mutex_lock(&bp
->hwrm_cmd_lock
);
3276 rc
= _hwrm_send_message(bp
, msg
, msg_len
, timeout
);
3277 mutex_unlock(&bp
->hwrm_cmd_lock
);
3281 int hwrm_send_message_silent(struct bnxt
*bp
, void *msg
, u32 msg_len
,
3286 mutex_lock(&bp
->hwrm_cmd_lock
);
3287 rc
= bnxt_hwrm_do_send_msg(bp
, msg
, msg_len
, timeout
, true);
3288 mutex_unlock(&bp
->hwrm_cmd_lock
);
3292 int bnxt_hwrm_func_rgtr_async_events(struct bnxt
*bp
, unsigned long *bmap
,
3295 struct hwrm_func_drv_rgtr_input req
= {0};
3296 DECLARE_BITMAP(async_events_bmap
, 256);
3297 u32
*events
= (u32
*)async_events_bmap
;
3300 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_DRV_RGTR
, -1, -1);
3303 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD
);
3305 memset(async_events_bmap
, 0, sizeof(async_events_bmap
));
3306 for (i
= 0; i
< ARRAY_SIZE(bnxt_async_events_arr
); i
++)
3307 __set_bit(bnxt_async_events_arr
[i
], async_events_bmap
);
3309 if (bmap
&& bmap_size
) {
3310 for (i
= 0; i
< bmap_size
; i
++) {
3311 if (test_bit(i
, bmap
))
3312 __set_bit(i
, async_events_bmap
);
3316 for (i
= 0; i
< 8; i
++)
3317 req
.async_event_fwd
[i
] |= cpu_to_le32(events
[i
]);
3319 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3322 static int bnxt_hwrm_func_drv_rgtr(struct bnxt
*bp
)
3324 struct hwrm_func_drv_rgtr_input req
= {0};
3326 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_DRV_RGTR
, -1, -1);
3329 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE
|
3330 FUNC_DRV_RGTR_REQ_ENABLES_VER
);
3332 req
.os_type
= cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX
);
3333 req
.ver_maj
= DRV_VER_MAJ
;
3334 req
.ver_min
= DRV_VER_MIN
;
3335 req
.ver_upd
= DRV_VER_UPD
;
3338 DECLARE_BITMAP(vf_req_snif_bmap
, 256);
3339 u32
*data
= (u32
*)vf_req_snif_bmap
;
3342 memset(vf_req_snif_bmap
, 0, sizeof(vf_req_snif_bmap
));
3343 for (i
= 0; i
< ARRAY_SIZE(bnxt_vf_req_snif
); i
++)
3344 __set_bit(bnxt_vf_req_snif
[i
], vf_req_snif_bmap
);
3346 for (i
= 0; i
< 8; i
++)
3347 req
.vf_req_fwd
[i
] = cpu_to_le32(data
[i
]);
3350 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD
);
3353 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3356 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt
*bp
)
3358 struct hwrm_func_drv_unrgtr_input req
= {0};
3360 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_DRV_UNRGTR
, -1, -1);
3361 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3364 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt
*bp
, u8 tunnel_type
)
3367 struct hwrm_tunnel_dst_port_free_input req
= {0};
3369 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_TUNNEL_DST_PORT_FREE
, -1, -1);
3370 req
.tunnel_type
= tunnel_type
;
3372 switch (tunnel_type
) {
3373 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN
:
3374 req
.tunnel_dst_port_id
= bp
->vxlan_fw_dst_port_id
;
3376 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE
:
3377 req
.tunnel_dst_port_id
= bp
->nge_fw_dst_port_id
;
3383 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3385 netdev_err(bp
->dev
, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
3390 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt
*bp
, __be16 port
,
3394 struct hwrm_tunnel_dst_port_alloc_input req
= {0};
3395 struct hwrm_tunnel_dst_port_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3397 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_TUNNEL_DST_PORT_ALLOC
, -1, -1);
3399 req
.tunnel_type
= tunnel_type
;
3400 req
.tunnel_dst_port_val
= port
;
3402 mutex_lock(&bp
->hwrm_cmd_lock
);
3403 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3405 netdev_err(bp
->dev
, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
3410 switch (tunnel_type
) {
3411 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN
:
3412 bp
->vxlan_fw_dst_port_id
= resp
->tunnel_dst_port_id
;
3414 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE
:
3415 bp
->nge_fw_dst_port_id
= resp
->tunnel_dst_port_id
;
3422 mutex_unlock(&bp
->hwrm_cmd_lock
);
3426 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt
*bp
, u16 vnic_id
)
3428 struct hwrm_cfa_l2_set_rx_mask_input req
= {0};
3429 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[vnic_id
];
3431 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_CFA_L2_SET_RX_MASK
, -1, -1);
3432 req
.vnic_id
= cpu_to_le32(vnic
->fw_vnic_id
);
3434 req
.num_mc_entries
= cpu_to_le32(vnic
->mc_list_count
);
3435 req
.mc_tbl_addr
= cpu_to_le64(vnic
->mc_list_mapping
);
3436 req
.mask
= cpu_to_le32(vnic
->rx_mask
);
3437 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3440 #ifdef CONFIG_RFS_ACCEL
3441 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt
*bp
,
3442 struct bnxt_ntuple_filter
*fltr
)
3444 struct hwrm_cfa_ntuple_filter_free_input req
= {0};
3446 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_CFA_NTUPLE_FILTER_FREE
, -1, -1);
3447 req
.ntuple_filter_id
= fltr
->filter_id
;
3448 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3451 #define BNXT_NTP_FLTR_FLAGS \
3452 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
3453 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
3454 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
3455 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
3456 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
3457 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
3458 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
3459 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
3460 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
3461 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
3462 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
3463 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
3464 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
3465 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
3467 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
3468 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
3470 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt
*bp
,
3471 struct bnxt_ntuple_filter
*fltr
)
3474 struct hwrm_cfa_ntuple_filter_alloc_input req
= {0};
3475 struct hwrm_cfa_ntuple_filter_alloc_output
*resp
=
3476 bp
->hwrm_cmd_resp_addr
;
3477 struct flow_keys
*keys
= &fltr
->fkeys
;
3478 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[fltr
->rxq
+ 1];
3480 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_CFA_NTUPLE_FILTER_ALLOC
, -1, -1);
3481 req
.l2_filter_id
= bp
->vnic_info
[0].fw_l2_filter_id
[fltr
->l2_fltr_idx
];
3483 req
.enables
= cpu_to_le32(BNXT_NTP_FLTR_FLAGS
);
3485 req
.ethertype
= htons(ETH_P_IP
);
3486 memcpy(req
.src_macaddr
, fltr
->src_mac_addr
, ETH_ALEN
);
3487 req
.ip_addr_type
= CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4
;
3488 req
.ip_protocol
= keys
->basic
.ip_proto
;
3490 if (keys
->basic
.n_proto
== htons(ETH_P_IPV6
)) {
3493 req
.ethertype
= htons(ETH_P_IPV6
);
3495 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
;
3496 *(struct in6_addr
*)&req
.src_ipaddr
[0] =
3497 keys
->addrs
.v6addrs
.src
;
3498 *(struct in6_addr
*)&req
.dst_ipaddr
[0] =
3499 keys
->addrs
.v6addrs
.dst
;
3500 for (i
= 0; i
< 4; i
++) {
3501 req
.src_ipaddr_mask
[i
] = cpu_to_be32(0xffffffff);
3502 req
.dst_ipaddr_mask
[i
] = cpu_to_be32(0xffffffff);
3505 req
.src_ipaddr
[0] = keys
->addrs
.v4addrs
.src
;
3506 req
.src_ipaddr_mask
[0] = cpu_to_be32(0xffffffff);
3507 req
.dst_ipaddr
[0] = keys
->addrs
.v4addrs
.dst
;
3508 req
.dst_ipaddr_mask
[0] = cpu_to_be32(0xffffffff);
3510 if (keys
->control
.flags
& FLOW_DIS_ENCAPSULATION
) {
3511 req
.enables
|= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG
);
3513 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
;
3516 req
.src_port
= keys
->ports
.src
;
3517 req
.src_port_mask
= cpu_to_be16(0xffff);
3518 req
.dst_port
= keys
->ports
.dst
;
3519 req
.dst_port_mask
= cpu_to_be16(0xffff);
3521 req
.dst_id
= cpu_to_le16(vnic
->fw_vnic_id
);
3522 mutex_lock(&bp
->hwrm_cmd_lock
);
3523 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3525 fltr
->filter_id
= resp
->ntuple_filter_id
;
3526 mutex_unlock(&bp
->hwrm_cmd_lock
);
3531 static int bnxt_hwrm_set_vnic_filter(struct bnxt
*bp
, u16 vnic_id
, u16 idx
,
3535 struct hwrm_cfa_l2_filter_alloc_input req
= {0};
3536 struct hwrm_cfa_l2_filter_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3538 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_CFA_L2_FILTER_ALLOC
, -1, -1);
3539 req
.flags
= cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
);
3540 if (!BNXT_CHIP_TYPE_NITRO_A0(bp
))
3542 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST
);
3543 req
.dst_id
= cpu_to_le16(bp
->vnic_info
[vnic_id
].fw_vnic_id
);
3545 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR
|
3546 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID
|
3547 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK
);
3548 memcpy(req
.l2_addr
, mac_addr
, ETH_ALEN
);
3549 req
.l2_addr_mask
[0] = 0xff;
3550 req
.l2_addr_mask
[1] = 0xff;
3551 req
.l2_addr_mask
[2] = 0xff;
3552 req
.l2_addr_mask
[3] = 0xff;
3553 req
.l2_addr_mask
[4] = 0xff;
3554 req
.l2_addr_mask
[5] = 0xff;
3556 mutex_lock(&bp
->hwrm_cmd_lock
);
3557 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3559 bp
->vnic_info
[vnic_id
].fw_l2_filter_id
[idx
] =
3561 mutex_unlock(&bp
->hwrm_cmd_lock
);
3565 static int bnxt_hwrm_clear_vnic_filter(struct bnxt
*bp
)
3567 u16 i
, j
, num_of_vnics
= 1; /* only vnic 0 supported */
3570 /* Any associated ntuple filters will also be cleared by firmware. */
3571 mutex_lock(&bp
->hwrm_cmd_lock
);
3572 for (i
= 0; i
< num_of_vnics
; i
++) {
3573 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[i
];
3575 for (j
= 0; j
< vnic
->uc_filter_count
; j
++) {
3576 struct hwrm_cfa_l2_filter_free_input req
= {0};
3578 bnxt_hwrm_cmd_hdr_init(bp
, &req
,
3579 HWRM_CFA_L2_FILTER_FREE
, -1, -1);
3581 req
.l2_filter_id
= vnic
->fw_l2_filter_id
[j
];
3583 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
3586 vnic
->uc_filter_count
= 0;
3588 mutex_unlock(&bp
->hwrm_cmd_lock
);
3593 static int bnxt_hwrm_vnic_set_tpa(struct bnxt
*bp
, u16 vnic_id
, u32 tpa_flags
)
3595 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[vnic_id
];
3596 struct hwrm_vnic_tpa_cfg_input req
= {0};
3598 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_TPA_CFG
, -1, -1);
3601 u16 mss
= bp
->dev
->mtu
- 40;
3602 u32 nsegs
, n
, segs
= 0, flags
;
3604 flags
= VNIC_TPA_CFG_REQ_FLAGS_TPA
|
3605 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA
|
3606 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE
|
3607 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN
|
3608 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ
;
3609 if (tpa_flags
& BNXT_FLAG_GRO
)
3610 flags
|= VNIC_TPA_CFG_REQ_FLAGS_GRO
;
3612 req
.flags
= cpu_to_le32(flags
);
3615 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS
|
3616 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS
|
3617 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN
);
3619 /* Number of segs are log2 units, and first packet is not
3620 * included as part of this units.
3622 if (mss
<= BNXT_RX_PAGE_SIZE
) {
3623 n
= BNXT_RX_PAGE_SIZE
/ mss
;
3624 nsegs
= (MAX_SKB_FRAGS
- 1) * n
;
3626 n
= mss
/ BNXT_RX_PAGE_SIZE
;
3627 if (mss
& (BNXT_RX_PAGE_SIZE
- 1))
3629 nsegs
= (MAX_SKB_FRAGS
- n
) / n
;
3632 segs
= ilog2(nsegs
);
3633 req
.max_agg_segs
= cpu_to_le16(segs
);
3634 req
.max_aggs
= cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX
);
3636 req
.min_agg_len
= cpu_to_le32(512);
3638 req
.vnic_id
= cpu_to_le16(vnic
->fw_vnic_id
);
3640 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3643 static int bnxt_hwrm_vnic_set_rss(struct bnxt
*bp
, u16 vnic_id
, bool set_rss
)
3645 u32 i
, j
, max_rings
;
3646 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[vnic_id
];
3647 struct hwrm_vnic_rss_cfg_input req
= {0};
3649 if (vnic
->fw_rss_cos_lb_ctx
[0] == INVALID_HW_RING_ID
)
3652 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_RSS_CFG
, -1, -1);
3654 req
.hash_type
= cpu_to_le32(bp
->rss_hash_cfg
);
3655 if (vnic
->flags
& BNXT_VNIC_RSS_FLAG
) {
3656 if (BNXT_CHIP_TYPE_NITRO_A0(bp
))
3657 max_rings
= bp
->rx_nr_rings
- 1;
3659 max_rings
= bp
->rx_nr_rings
;
3664 /* Fill the RSS indirection table with ring group ids */
3665 for (i
= 0, j
= 0; i
< HW_HASH_INDEX_SIZE
; i
++, j
++) {
3668 vnic
->rss_table
[i
] = cpu_to_le16(vnic
->fw_grp_ids
[j
]);
3671 req
.ring_grp_tbl_addr
= cpu_to_le64(vnic
->rss_table_dma_addr
);
3672 req
.hash_key_tbl_addr
=
3673 cpu_to_le64(vnic
->rss_hash_key_dma_addr
);
3675 req
.rss_ctx_idx
= cpu_to_le16(vnic
->fw_rss_cos_lb_ctx
[0]);
3676 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3679 static int bnxt_hwrm_vnic_set_hds(struct bnxt
*bp
, u16 vnic_id
)
3681 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[vnic_id
];
3682 struct hwrm_vnic_plcmodes_cfg_input req
= {0};
3684 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_PLCMODES_CFG
, -1, -1);
3685 req
.flags
= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT
|
3686 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4
|
3687 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6
);
3689 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID
|
3690 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID
);
3691 /* thresholds not implemented in firmware yet */
3692 req
.jumbo_thresh
= cpu_to_le16(bp
->rx_copy_thresh
);
3693 req
.hds_threshold
= cpu_to_le16(bp
->rx_copy_thresh
);
3694 req
.vnic_id
= cpu_to_le32(vnic
->fw_vnic_id
);
3695 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3698 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt
*bp
, u16 vnic_id
,
3701 struct hwrm_vnic_rss_cos_lb_ctx_free_input req
= {0};
3703 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_RSS_COS_LB_CTX_FREE
, -1, -1);
3704 req
.rss_cos_lb_ctx_id
=
3705 cpu_to_le16(bp
->vnic_info
[vnic_id
].fw_rss_cos_lb_ctx
[ctx_idx
]);
3707 hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3708 bp
->vnic_info
[vnic_id
].fw_rss_cos_lb_ctx
[ctx_idx
] = INVALID_HW_RING_ID
;
3711 static void bnxt_hwrm_vnic_ctx_free(struct bnxt
*bp
)
3715 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
3716 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[i
];
3718 for (j
= 0; j
< BNXT_MAX_CTX_PER_VNIC
; j
++) {
3719 if (vnic
->fw_rss_cos_lb_ctx
[j
] != INVALID_HW_RING_ID
)
3720 bnxt_hwrm_vnic_ctx_free_one(bp
, i
, j
);
3723 bp
->rsscos_nr_ctxs
= 0;
3726 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt
*bp
, u16 vnic_id
, u16 ctx_idx
)
3729 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req
= {0};
3730 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output
*resp
=
3731 bp
->hwrm_cmd_resp_addr
;
3733 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC
, -1,
3736 mutex_lock(&bp
->hwrm_cmd_lock
);
3737 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3739 bp
->vnic_info
[vnic_id
].fw_rss_cos_lb_ctx
[ctx_idx
] =
3740 le16_to_cpu(resp
->rss_cos_lb_ctx_id
);
3741 mutex_unlock(&bp
->hwrm_cmd_lock
);
3746 int bnxt_hwrm_vnic_cfg(struct bnxt
*bp
, u16 vnic_id
)
3748 unsigned int ring
= 0, grp_idx
;
3749 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[vnic_id
];
3750 struct hwrm_vnic_cfg_input req
= {0};
3753 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_CFG
, -1, -1);
3755 req
.enables
= cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP
);
3756 /* Only RSS support for now TBD: COS & LB */
3757 if (vnic
->fw_rss_cos_lb_ctx
[0] != INVALID_HW_RING_ID
) {
3758 req
.rss_rule
= cpu_to_le16(vnic
->fw_rss_cos_lb_ctx
[0]);
3759 req
.enables
|= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE
|
3760 VNIC_CFG_REQ_ENABLES_MRU
);
3761 } else if (vnic
->flags
& BNXT_VNIC_RFS_NEW_RSS_FLAG
) {
3763 cpu_to_le16(bp
->vnic_info
[0].fw_rss_cos_lb_ctx
[0]);
3764 req
.enables
|= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE
|
3765 VNIC_CFG_REQ_ENABLES_MRU
);
3766 req
.flags
|= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE
);
3768 req
.rss_rule
= cpu_to_le16(0xffff);
3771 if (BNXT_CHIP_TYPE_NITRO_A0(bp
) &&
3772 (vnic
->fw_rss_cos_lb_ctx
[0] != INVALID_HW_RING_ID
)) {
3773 req
.cos_rule
= cpu_to_le16(vnic
->fw_rss_cos_lb_ctx
[1]);
3774 req
.enables
|= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE
);
3776 req
.cos_rule
= cpu_to_le16(0xffff);
3779 if (vnic
->flags
& BNXT_VNIC_RSS_FLAG
)
3781 else if (vnic
->flags
& BNXT_VNIC_RFS_FLAG
)
3783 else if ((vnic_id
== 1) && BNXT_CHIP_TYPE_NITRO_A0(bp
))
3784 ring
= bp
->rx_nr_rings
- 1;
3786 grp_idx
= bp
->rx_ring
[ring
].bnapi
->index
;
3787 req
.vnic_id
= cpu_to_le16(vnic
->fw_vnic_id
);
3788 req
.dflt_ring_grp
= cpu_to_le16(bp
->grp_info
[grp_idx
].fw_grp_id
);
3790 req
.lb_rule
= cpu_to_le16(0xffff);
3791 req
.mru
= cpu_to_le16(bp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+
3794 #ifdef CONFIG_BNXT_SRIOV
3796 def_vlan
= bp
->vf
.vlan
;
3798 if ((bp
->flags
& BNXT_FLAG_STRIP_VLAN
) || def_vlan
)
3799 req
.flags
|= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE
);
3800 if (!vnic_id
&& bnxt_ulp_registered(bp
->edev
, BNXT_ROCE_ULP
))
3802 cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE
);
3804 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3807 static int bnxt_hwrm_vnic_free_one(struct bnxt
*bp
, u16 vnic_id
)
3811 if (bp
->vnic_info
[vnic_id
].fw_vnic_id
!= INVALID_HW_RING_ID
) {
3812 struct hwrm_vnic_free_input req
= {0};
3814 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_FREE
, -1, -1);
3816 cpu_to_le32(bp
->vnic_info
[vnic_id
].fw_vnic_id
);
3818 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3821 bp
->vnic_info
[vnic_id
].fw_vnic_id
= INVALID_HW_RING_ID
;
3826 static void bnxt_hwrm_vnic_free(struct bnxt
*bp
)
3830 for (i
= 0; i
< bp
->nr_vnics
; i
++)
3831 bnxt_hwrm_vnic_free_one(bp
, i
);
3834 static int bnxt_hwrm_vnic_alloc(struct bnxt
*bp
, u16 vnic_id
,
3835 unsigned int start_rx_ring_idx
,
3836 unsigned int nr_rings
)
3839 unsigned int i
, j
, grp_idx
, end_idx
= start_rx_ring_idx
+ nr_rings
;
3840 struct hwrm_vnic_alloc_input req
= {0};
3841 struct hwrm_vnic_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3843 /* map ring groups to this vnic */
3844 for (i
= start_rx_ring_idx
, j
= 0; i
< end_idx
; i
++, j
++) {
3845 grp_idx
= bp
->rx_ring
[i
].bnapi
->index
;
3846 if (bp
->grp_info
[grp_idx
].fw_grp_id
== INVALID_HW_RING_ID
) {
3847 netdev_err(bp
->dev
, "Not enough ring groups avail:%x req:%x\n",
3851 bp
->vnic_info
[vnic_id
].fw_grp_ids
[j
] =
3852 bp
->grp_info
[grp_idx
].fw_grp_id
;
3855 bp
->vnic_info
[vnic_id
].fw_rss_cos_lb_ctx
[0] = INVALID_HW_RING_ID
;
3856 bp
->vnic_info
[vnic_id
].fw_rss_cos_lb_ctx
[1] = INVALID_HW_RING_ID
;
3858 req
.flags
= cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT
);
3860 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_ALLOC
, -1, -1);
3862 mutex_lock(&bp
->hwrm_cmd_lock
);
3863 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3865 bp
->vnic_info
[vnic_id
].fw_vnic_id
= le32_to_cpu(resp
->vnic_id
);
3866 mutex_unlock(&bp
->hwrm_cmd_lock
);
3870 static int bnxt_hwrm_vnic_qcaps(struct bnxt
*bp
)
3872 struct hwrm_vnic_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3873 struct hwrm_vnic_qcaps_input req
= {0};
3876 if (bp
->hwrm_spec_code
< 0x10600)
3879 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_QCAPS
, -1, -1);
3880 mutex_lock(&bp
->hwrm_cmd_lock
);
3881 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3884 cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP
))
3885 bp
->flags
|= BNXT_FLAG_NEW_RSS_CAP
;
3887 mutex_unlock(&bp
->hwrm_cmd_lock
);
3891 static int bnxt_hwrm_ring_grp_alloc(struct bnxt
*bp
)
3896 mutex_lock(&bp
->hwrm_cmd_lock
);
3897 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
3898 struct hwrm_ring_grp_alloc_input req
= {0};
3899 struct hwrm_ring_grp_alloc_output
*resp
=
3900 bp
->hwrm_cmd_resp_addr
;
3901 unsigned int grp_idx
= bp
->rx_ring
[i
].bnapi
->index
;
3903 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_RING_GRP_ALLOC
, -1, -1);
3905 req
.cr
= cpu_to_le16(bp
->grp_info
[grp_idx
].cp_fw_ring_id
);
3906 req
.rr
= cpu_to_le16(bp
->grp_info
[grp_idx
].rx_fw_ring_id
);
3907 req
.ar
= cpu_to_le16(bp
->grp_info
[grp_idx
].agg_fw_ring_id
);
3908 req
.sc
= cpu_to_le16(bp
->grp_info
[grp_idx
].fw_stats_ctx
);
3910 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
3915 bp
->grp_info
[grp_idx
].fw_grp_id
=
3916 le32_to_cpu(resp
->ring_group_id
);
3918 mutex_unlock(&bp
->hwrm_cmd_lock
);
3922 static int bnxt_hwrm_ring_grp_free(struct bnxt
*bp
)
3926 struct hwrm_ring_grp_free_input req
= {0};
3931 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_RING_GRP_FREE
, -1, -1);
3933 mutex_lock(&bp
->hwrm_cmd_lock
);
3934 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
3935 if (bp
->grp_info
[i
].fw_grp_id
== INVALID_HW_RING_ID
)
3938 cpu_to_le32(bp
->grp_info
[i
].fw_grp_id
);
3940 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
3944 bp
->grp_info
[i
].fw_grp_id
= INVALID_HW_RING_ID
;
3946 mutex_unlock(&bp
->hwrm_cmd_lock
);
3950 static int hwrm_ring_alloc_send_msg(struct bnxt
*bp
,
3951 struct bnxt_ring_struct
*ring
,
3952 u32 ring_type
, u32 map_index
,
3955 int rc
= 0, err
= 0;
3956 struct hwrm_ring_alloc_input req
= {0};
3957 struct hwrm_ring_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3960 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_RING_ALLOC
, -1, -1);
3963 if (ring
->nr_pages
> 1) {
3964 req
.page_tbl_addr
= cpu_to_le64(ring
->pg_tbl_map
);
3965 /* Page size is in log2 units */
3966 req
.page_size
= BNXT_PAGE_SHIFT
;
3967 req
.page_tbl_depth
= 1;
3969 req
.page_tbl_addr
= cpu_to_le64(ring
->dma_arr
[0]);
3972 /* Association of ring index with doorbell index and MSIX number */
3973 req
.logical_id
= cpu_to_le16(map_index
);
3975 switch (ring_type
) {
3976 case HWRM_RING_ALLOC_TX
:
3977 req
.ring_type
= RING_ALLOC_REQ_RING_TYPE_TX
;
3978 /* Association of transmit ring with completion ring */
3980 cpu_to_le16(bp
->grp_info
[map_index
].cp_fw_ring_id
);
3981 req
.length
= cpu_to_le32(bp
->tx_ring_mask
+ 1);
3982 req
.stat_ctx_id
= cpu_to_le32(stats_ctx_id
);
3983 req
.queue_id
= cpu_to_le16(ring
->queue_id
);
3985 case HWRM_RING_ALLOC_RX
:
3986 req
.ring_type
= RING_ALLOC_REQ_RING_TYPE_RX
;
3987 req
.length
= cpu_to_le32(bp
->rx_ring_mask
+ 1);
3989 case HWRM_RING_ALLOC_AGG
:
3990 req
.ring_type
= RING_ALLOC_REQ_RING_TYPE_RX
;
3991 req
.length
= cpu_to_le32(bp
->rx_agg_ring_mask
+ 1);
3993 case HWRM_RING_ALLOC_CMPL
:
3994 req
.ring_type
= RING_ALLOC_REQ_RING_TYPE_L2_CMPL
;
3995 req
.length
= cpu_to_le32(bp
->cp_ring_mask
+ 1);
3996 if (bp
->flags
& BNXT_FLAG_USING_MSIX
)
3997 req
.int_mode
= RING_ALLOC_REQ_INT_MODE_MSIX
;
4000 netdev_err(bp
->dev
, "hwrm alloc invalid ring type %d\n",
4005 mutex_lock(&bp
->hwrm_cmd_lock
);
4006 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4007 err
= le16_to_cpu(resp
->error_code
);
4008 ring_id
= le16_to_cpu(resp
->ring_id
);
4009 mutex_unlock(&bp
->hwrm_cmd_lock
);
4012 switch (ring_type
) {
4013 case RING_FREE_REQ_RING_TYPE_L2_CMPL
:
4014 netdev_err(bp
->dev
, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
4018 case RING_FREE_REQ_RING_TYPE_RX
:
4019 netdev_err(bp
->dev
, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
4023 case RING_FREE_REQ_RING_TYPE_TX
:
4024 netdev_err(bp
->dev
, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
4029 netdev_err(bp
->dev
, "Invalid ring\n");
4033 ring
->fw_ring_id
= ring_id
;
4037 static int bnxt_hwrm_set_async_event_cr(struct bnxt
*bp
, int idx
)
4042 struct hwrm_func_cfg_input req
= {0};
4044 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
4045 req
.fid
= cpu_to_le16(0xffff);
4046 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR
);
4047 req
.async_event_cr
= cpu_to_le16(idx
);
4048 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4050 struct hwrm_func_vf_cfg_input req
= {0};
4052 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_VF_CFG
, -1, -1);
4054 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR
);
4055 req
.async_event_cr
= cpu_to_le16(idx
);
4056 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4061 static int bnxt_hwrm_ring_alloc(struct bnxt
*bp
)
4065 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
4066 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
4067 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
4068 struct bnxt_ring_struct
*ring
= &cpr
->cp_ring_struct
;
4070 cpr
->cp_doorbell
= bp
->bar1
+ i
* 0x80;
4071 rc
= hwrm_ring_alloc_send_msg(bp
, ring
, HWRM_RING_ALLOC_CMPL
, i
,
4072 INVALID_STATS_CTX_ID
);
4075 BNXT_CP_DB(cpr
->cp_doorbell
, cpr
->cp_raw_cons
);
4076 bp
->grp_info
[i
].cp_fw_ring_id
= ring
->fw_ring_id
;
4079 rc
= bnxt_hwrm_set_async_event_cr(bp
, ring
->fw_ring_id
);
4081 netdev_warn(bp
->dev
, "Failed to set async event completion ring.\n");
4085 for (i
= 0; i
< bp
->tx_nr_rings
; i
++) {
4086 struct bnxt_tx_ring_info
*txr
= &bp
->tx_ring
[i
];
4087 struct bnxt_ring_struct
*ring
= &txr
->tx_ring_struct
;
4088 u32 map_idx
= txr
->bnapi
->index
;
4089 u16 fw_stats_ctx
= bp
->grp_info
[map_idx
].fw_stats_ctx
;
4091 rc
= hwrm_ring_alloc_send_msg(bp
, ring
, HWRM_RING_ALLOC_TX
,
4092 map_idx
, fw_stats_ctx
);
4095 txr
->tx_doorbell
= bp
->bar1
+ map_idx
* 0x80;
4098 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
4099 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[i
];
4100 struct bnxt_ring_struct
*ring
= &rxr
->rx_ring_struct
;
4101 u32 map_idx
= rxr
->bnapi
->index
;
4103 rc
= hwrm_ring_alloc_send_msg(bp
, ring
, HWRM_RING_ALLOC_RX
,
4104 map_idx
, INVALID_STATS_CTX_ID
);
4107 rxr
->rx_doorbell
= bp
->bar1
+ map_idx
* 0x80;
4108 writel(DB_KEY_RX
| rxr
->rx_prod
, rxr
->rx_doorbell
);
4109 bp
->grp_info
[map_idx
].rx_fw_ring_id
= ring
->fw_ring_id
;
4112 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
) {
4113 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
4114 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[i
];
4115 struct bnxt_ring_struct
*ring
=
4116 &rxr
->rx_agg_ring_struct
;
4117 u32 grp_idx
= rxr
->bnapi
->index
;
4118 u32 map_idx
= grp_idx
+ bp
->rx_nr_rings
;
4120 rc
= hwrm_ring_alloc_send_msg(bp
, ring
,
4121 HWRM_RING_ALLOC_AGG
,
4123 INVALID_STATS_CTX_ID
);
4127 rxr
->rx_agg_doorbell
= bp
->bar1
+ map_idx
* 0x80;
4128 writel(DB_KEY_RX
| rxr
->rx_agg_prod
,
4129 rxr
->rx_agg_doorbell
);
4130 bp
->grp_info
[grp_idx
].agg_fw_ring_id
= ring
->fw_ring_id
;
4137 static int hwrm_ring_free_send_msg(struct bnxt
*bp
,
4138 struct bnxt_ring_struct
*ring
,
4139 u32 ring_type
, int cmpl_ring_id
)
4142 struct hwrm_ring_free_input req
= {0};
4143 struct hwrm_ring_free_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4146 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_RING_FREE
, cmpl_ring_id
, -1);
4147 req
.ring_type
= ring_type
;
4148 req
.ring_id
= cpu_to_le16(ring
->fw_ring_id
);
4150 mutex_lock(&bp
->hwrm_cmd_lock
);
4151 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4152 error_code
= le16_to_cpu(resp
->error_code
);
4153 mutex_unlock(&bp
->hwrm_cmd_lock
);
4155 if (rc
|| error_code
) {
4156 switch (ring_type
) {
4157 case RING_FREE_REQ_RING_TYPE_L2_CMPL
:
4158 netdev_err(bp
->dev
, "hwrm_ring_free cp failed. rc:%d\n",
4161 case RING_FREE_REQ_RING_TYPE_RX
:
4162 netdev_err(bp
->dev
, "hwrm_ring_free rx failed. rc:%d\n",
4165 case RING_FREE_REQ_RING_TYPE_TX
:
4166 netdev_err(bp
->dev
, "hwrm_ring_free tx failed. rc:%d\n",
4170 netdev_err(bp
->dev
, "Invalid ring\n");
4177 static void bnxt_hwrm_ring_free(struct bnxt
*bp
, bool close_path
)
4184 for (i
= 0; i
< bp
->tx_nr_rings
; i
++) {
4185 struct bnxt_tx_ring_info
*txr
= &bp
->tx_ring
[i
];
4186 struct bnxt_ring_struct
*ring
= &txr
->tx_ring_struct
;
4187 u32 grp_idx
= txr
->bnapi
->index
;
4188 u32 cmpl_ring_id
= bp
->grp_info
[grp_idx
].cp_fw_ring_id
;
4190 if (ring
->fw_ring_id
!= INVALID_HW_RING_ID
) {
4191 hwrm_ring_free_send_msg(bp
, ring
,
4192 RING_FREE_REQ_RING_TYPE_TX
,
4193 close_path
? cmpl_ring_id
:
4194 INVALID_HW_RING_ID
);
4195 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
4199 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
4200 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[i
];
4201 struct bnxt_ring_struct
*ring
= &rxr
->rx_ring_struct
;
4202 u32 grp_idx
= rxr
->bnapi
->index
;
4203 u32 cmpl_ring_id
= bp
->grp_info
[grp_idx
].cp_fw_ring_id
;
4205 if (ring
->fw_ring_id
!= INVALID_HW_RING_ID
) {
4206 hwrm_ring_free_send_msg(bp
, ring
,
4207 RING_FREE_REQ_RING_TYPE_RX
,
4208 close_path
? cmpl_ring_id
:
4209 INVALID_HW_RING_ID
);
4210 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
4211 bp
->grp_info
[grp_idx
].rx_fw_ring_id
=
4216 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
4217 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[i
];
4218 struct bnxt_ring_struct
*ring
= &rxr
->rx_agg_ring_struct
;
4219 u32 grp_idx
= rxr
->bnapi
->index
;
4220 u32 cmpl_ring_id
= bp
->grp_info
[grp_idx
].cp_fw_ring_id
;
4222 if (ring
->fw_ring_id
!= INVALID_HW_RING_ID
) {
4223 hwrm_ring_free_send_msg(bp
, ring
,
4224 RING_FREE_REQ_RING_TYPE_RX
,
4225 close_path
? cmpl_ring_id
:
4226 INVALID_HW_RING_ID
);
4227 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
4228 bp
->grp_info
[grp_idx
].agg_fw_ring_id
=
4233 /* The completion rings are about to be freed. After that the
4234 * IRQ doorbell will not work anymore. So we need to disable
4237 bnxt_disable_int_sync(bp
);
4239 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
4240 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
4241 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
4242 struct bnxt_ring_struct
*ring
= &cpr
->cp_ring_struct
;
4244 if (ring
->fw_ring_id
!= INVALID_HW_RING_ID
) {
4245 hwrm_ring_free_send_msg(bp
, ring
,
4246 RING_FREE_REQ_RING_TYPE_L2_CMPL
,
4247 INVALID_HW_RING_ID
);
4248 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
4249 bp
->grp_info
[i
].cp_fw_ring_id
= INVALID_HW_RING_ID
;
4254 /* Caller must hold bp->hwrm_cmd_lock */
4255 int __bnxt_hwrm_get_tx_rings(struct bnxt
*bp
, u16 fid
, int *tx_rings
)
4257 struct hwrm_func_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4258 struct hwrm_func_qcfg_input req
= {0};
4261 if (bp
->hwrm_spec_code
< 0x10601)
4264 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_QCFG
, -1, -1);
4265 req
.fid
= cpu_to_le16(fid
);
4266 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4268 *tx_rings
= le16_to_cpu(resp
->alloc_tx_rings
);
4273 static int bnxt_hwrm_reserve_tx_rings(struct bnxt
*bp
, int *tx_rings
)
4275 struct hwrm_func_cfg_input req
= {0};
4278 if (bp
->hwrm_spec_code
< 0x10601)
4284 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
4285 req
.fid
= cpu_to_le16(0xffff);
4286 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS
);
4287 req
.num_tx_rings
= cpu_to_le16(*tx_rings
);
4288 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4292 mutex_lock(&bp
->hwrm_cmd_lock
);
4293 rc
= __bnxt_hwrm_get_tx_rings(bp
, 0xffff, tx_rings
);
4294 mutex_unlock(&bp
->hwrm_cmd_lock
);
4298 static void bnxt_hwrm_set_coal_params(struct bnxt
*bp
, u32 max_bufs
,
4299 u32 buf_tmrs
, u16 flags
,
4300 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input
*req
)
4302 req
->flags
= cpu_to_le16(flags
);
4303 req
->num_cmpl_dma_aggr
= cpu_to_le16((u16
)max_bufs
);
4304 req
->num_cmpl_dma_aggr_during_int
= cpu_to_le16(max_bufs
>> 16);
4305 req
->cmpl_aggr_dma_tmr
= cpu_to_le16((u16
)buf_tmrs
);
4306 req
->cmpl_aggr_dma_tmr_during_int
= cpu_to_le16(buf_tmrs
>> 16);
4307 /* Minimum time between 2 interrupts set to buf_tmr x 2 */
4308 req
->int_lat_tmr_min
= cpu_to_le16((u16
)buf_tmrs
* 2);
4309 req
->int_lat_tmr_max
= cpu_to_le16((u16
)buf_tmrs
* 4);
4310 req
->num_cmpl_aggr_int
= cpu_to_le16((u16
)max_bufs
* 4);
4313 int bnxt_hwrm_set_coal(struct bnxt
*bp
)
4316 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx
= {0},
4318 u16 max_buf
, max_buf_irq
;
4319 u16 buf_tmr
, buf_tmr_irq
;
4322 bnxt_hwrm_cmd_hdr_init(bp
, &req_rx
,
4323 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS
, -1, -1);
4324 bnxt_hwrm_cmd_hdr_init(bp
, &req_tx
,
4325 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS
, -1, -1);
4327 /* Each rx completion (2 records) should be DMAed immediately.
4328 * DMA 1/4 of the completion buffers at a time.
4330 max_buf
= min_t(u16
, bp
->rx_coal_bufs
/ 4, 2);
4331 /* max_buf must not be zero */
4332 max_buf
= clamp_t(u16
, max_buf
, 1, 63);
4333 max_buf_irq
= clamp_t(u16
, bp
->rx_coal_bufs_irq
, 1, 63);
4334 buf_tmr
= BNXT_USEC_TO_COAL_TIMER(bp
->rx_coal_ticks
);
4335 /* buf timer set to 1/4 of interrupt timer */
4336 buf_tmr
= max_t(u16
, buf_tmr
/ 4, 1);
4337 buf_tmr_irq
= BNXT_USEC_TO_COAL_TIMER(bp
->rx_coal_ticks_irq
);
4338 buf_tmr_irq
= max_t(u16
, buf_tmr_irq
, 1);
4340 flags
= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET
;
4342 /* RING_IDLE generates more IRQs for lower latency. Enable it only
4343 * if coal_ticks is less than 25 us.
4345 if (bp
->rx_coal_ticks
< 25)
4346 flags
|= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE
;
4348 bnxt_hwrm_set_coal_params(bp
, max_buf_irq
<< 16 | max_buf
,
4349 buf_tmr_irq
<< 16 | buf_tmr
, flags
, &req_rx
);
4351 /* max_buf must not be zero */
4352 max_buf
= clamp_t(u16
, bp
->tx_coal_bufs
, 1, 63);
4353 max_buf_irq
= clamp_t(u16
, bp
->tx_coal_bufs_irq
, 1, 63);
4354 buf_tmr
= BNXT_USEC_TO_COAL_TIMER(bp
->tx_coal_ticks
);
4355 /* buf timer set to 1/4 of interrupt timer */
4356 buf_tmr
= max_t(u16
, buf_tmr
/ 4, 1);
4357 buf_tmr_irq
= BNXT_USEC_TO_COAL_TIMER(bp
->tx_coal_ticks_irq
);
4358 buf_tmr_irq
= max_t(u16
, buf_tmr_irq
, 1);
4360 flags
= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET
;
4361 bnxt_hwrm_set_coal_params(bp
, max_buf_irq
<< 16 | max_buf
,
4362 buf_tmr_irq
<< 16 | buf_tmr
, flags
, &req_tx
);
4364 mutex_lock(&bp
->hwrm_cmd_lock
);
4365 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
4366 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
4369 if (!bnapi
->rx_ring
)
4371 req
->ring_id
= cpu_to_le16(bp
->grp_info
[i
].cp_fw_ring_id
);
4373 rc
= _hwrm_send_message(bp
, req
, sizeof(*req
),
4378 mutex_unlock(&bp
->hwrm_cmd_lock
);
4382 static int bnxt_hwrm_stat_ctx_free(struct bnxt
*bp
)
4385 struct hwrm_stat_ctx_free_input req
= {0};
4390 if (BNXT_CHIP_TYPE_NITRO_A0(bp
))
4393 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_STAT_CTX_FREE
, -1, -1);
4395 mutex_lock(&bp
->hwrm_cmd_lock
);
4396 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
4397 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
4398 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
4400 if (cpr
->hw_stats_ctx_id
!= INVALID_STATS_CTX_ID
) {
4401 req
.stat_ctx_id
= cpu_to_le32(cpr
->hw_stats_ctx_id
);
4403 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
4408 cpr
->hw_stats_ctx_id
= INVALID_STATS_CTX_ID
;
4411 mutex_unlock(&bp
->hwrm_cmd_lock
);
4415 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt
*bp
)
4418 struct hwrm_stat_ctx_alloc_input req
= {0};
4419 struct hwrm_stat_ctx_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4421 if (BNXT_CHIP_TYPE_NITRO_A0(bp
))
4424 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_STAT_CTX_ALLOC
, -1, -1);
4426 req
.update_period_ms
= cpu_to_le32(bp
->stats_coal_ticks
/ 1000);
4428 mutex_lock(&bp
->hwrm_cmd_lock
);
4429 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
4430 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
4431 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
4433 req
.stats_dma_addr
= cpu_to_le64(cpr
->hw_stats_map
);
4435 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
4440 cpr
->hw_stats_ctx_id
= le32_to_cpu(resp
->stat_ctx_id
);
4442 bp
->grp_info
[i
].fw_stats_ctx
= cpr
->hw_stats_ctx_id
;
4444 mutex_unlock(&bp
->hwrm_cmd_lock
);
4448 static int bnxt_hwrm_func_qcfg(struct bnxt
*bp
)
4450 struct hwrm_func_qcfg_input req
= {0};
4451 struct hwrm_func_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4454 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_QCFG
, -1, -1);
4455 req
.fid
= cpu_to_le16(0xffff);
4456 mutex_lock(&bp
->hwrm_cmd_lock
);
4457 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4459 goto func_qcfg_exit
;
4461 #ifdef CONFIG_BNXT_SRIOV
4463 struct bnxt_vf_info
*vf
= &bp
->vf
;
4465 vf
->vlan
= le16_to_cpu(resp
->vlan
) & VLAN_VID_MASK
;
4468 switch (resp
->port_partition_type
) {
4469 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0
:
4470 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5
:
4471 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0
:
4472 bp
->port_partition_type
= resp
->port_partition_type
;
4477 mutex_unlock(&bp
->hwrm_cmd_lock
);
4481 static int bnxt_hwrm_func_qcaps(struct bnxt
*bp
)
4484 struct hwrm_func_qcaps_input req
= {0};
4485 struct hwrm_func_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4487 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_QCAPS
, -1, -1);
4488 req
.fid
= cpu_to_le16(0xffff);
4490 mutex_lock(&bp
->hwrm_cmd_lock
);
4491 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4493 goto hwrm_func_qcaps_exit
;
4495 if (resp
->flags
& cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED
))
4496 bp
->flags
|= BNXT_FLAG_ROCEV1_CAP
;
4497 if (resp
->flags
& cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED
))
4498 bp
->flags
|= BNXT_FLAG_ROCEV2_CAP
;
4500 bp
->tx_push_thresh
= 0;
4502 cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED
))
4503 bp
->tx_push_thresh
= BNXT_TX_PUSH_THRESH
;
4506 struct bnxt_pf_info
*pf
= &bp
->pf
;
4508 pf
->fw_fid
= le16_to_cpu(resp
->fid
);
4509 pf
->port_id
= le16_to_cpu(resp
->port_id
);
4510 bp
->dev
->dev_port
= pf
->port_id
;
4511 memcpy(pf
->mac_addr
, resp
->mac_address
, ETH_ALEN
);
4512 memcpy(bp
->dev
->dev_addr
, pf
->mac_addr
, ETH_ALEN
);
4513 pf
->max_rsscos_ctxs
= le16_to_cpu(resp
->max_rsscos_ctx
);
4514 pf
->max_cp_rings
= le16_to_cpu(resp
->max_cmpl_rings
);
4515 pf
->max_tx_rings
= le16_to_cpu(resp
->max_tx_rings
);
4516 pf
->max_rx_rings
= le16_to_cpu(resp
->max_rx_rings
);
4517 pf
->max_hw_ring_grps
= le32_to_cpu(resp
->max_hw_ring_grps
);
4518 if (!pf
->max_hw_ring_grps
)
4519 pf
->max_hw_ring_grps
= pf
->max_tx_rings
;
4520 pf
->max_l2_ctxs
= le16_to_cpu(resp
->max_l2_ctxs
);
4521 pf
->max_vnics
= le16_to_cpu(resp
->max_vnics
);
4522 pf
->max_stat_ctxs
= le16_to_cpu(resp
->max_stat_ctx
);
4523 pf
->first_vf_id
= le16_to_cpu(resp
->first_vf_id
);
4524 pf
->max_vfs
= le16_to_cpu(resp
->max_vfs
);
4525 pf
->max_encap_records
= le32_to_cpu(resp
->max_encap_records
);
4526 pf
->max_decap_records
= le32_to_cpu(resp
->max_decap_records
);
4527 pf
->max_tx_em_flows
= le32_to_cpu(resp
->max_tx_em_flows
);
4528 pf
->max_tx_wm_flows
= le32_to_cpu(resp
->max_tx_wm_flows
);
4529 pf
->max_rx_em_flows
= le32_to_cpu(resp
->max_rx_em_flows
);
4530 pf
->max_rx_wm_flows
= le32_to_cpu(resp
->max_rx_wm_flows
);
4532 #ifdef CONFIG_BNXT_SRIOV
4533 struct bnxt_vf_info
*vf
= &bp
->vf
;
4535 vf
->fw_fid
= le16_to_cpu(resp
->fid
);
4537 vf
->max_rsscos_ctxs
= le16_to_cpu(resp
->max_rsscos_ctx
);
4538 vf
->max_cp_rings
= le16_to_cpu(resp
->max_cmpl_rings
);
4539 vf
->max_tx_rings
= le16_to_cpu(resp
->max_tx_rings
);
4540 vf
->max_rx_rings
= le16_to_cpu(resp
->max_rx_rings
);
4541 vf
->max_hw_ring_grps
= le32_to_cpu(resp
->max_hw_ring_grps
);
4542 if (!vf
->max_hw_ring_grps
)
4543 vf
->max_hw_ring_grps
= vf
->max_tx_rings
;
4544 vf
->max_l2_ctxs
= le16_to_cpu(resp
->max_l2_ctxs
);
4545 vf
->max_vnics
= le16_to_cpu(resp
->max_vnics
);
4546 vf
->max_stat_ctxs
= le16_to_cpu(resp
->max_stat_ctx
);
4548 memcpy(vf
->mac_addr
, resp
->mac_address
, ETH_ALEN
);
4549 mutex_unlock(&bp
->hwrm_cmd_lock
);
4551 if (is_valid_ether_addr(vf
->mac_addr
)) {
4552 /* overwrite netdev dev_adr with admin VF MAC */
4553 memcpy(bp
->dev
->dev_addr
, vf
->mac_addr
, ETH_ALEN
);
4555 eth_hw_addr_random(bp
->dev
);
4556 rc
= bnxt_approve_mac(bp
, bp
->dev
->dev_addr
);
4562 hwrm_func_qcaps_exit
:
4563 mutex_unlock(&bp
->hwrm_cmd_lock
);
4567 static int bnxt_hwrm_func_reset(struct bnxt
*bp
)
4569 struct hwrm_func_reset_input req
= {0};
4571 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_RESET
, -1, -1);
4574 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_RESET_TIMEOUT
);
4577 static int bnxt_hwrm_queue_qportcfg(struct bnxt
*bp
)
4580 struct hwrm_queue_qportcfg_input req
= {0};
4581 struct hwrm_queue_qportcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4584 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_QUEUE_QPORTCFG
, -1, -1);
4586 mutex_lock(&bp
->hwrm_cmd_lock
);
4587 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4591 if (!resp
->max_configurable_queues
) {
4595 bp
->max_tc
= resp
->max_configurable_queues
;
4596 bp
->max_lltc
= resp
->max_configurable_lossless_queues
;
4597 if (bp
->max_tc
> BNXT_MAX_QUEUE
)
4598 bp
->max_tc
= BNXT_MAX_QUEUE
;
4600 if (resp
->queue_cfg_info
& QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG
)
4603 if (bp
->max_lltc
> bp
->max_tc
)
4604 bp
->max_lltc
= bp
->max_tc
;
4606 qptr
= &resp
->queue_id0
;
4607 for (i
= 0; i
< bp
->max_tc
; i
++) {
4608 bp
->q_info
[i
].queue_id
= *qptr
++;
4609 bp
->q_info
[i
].queue_profile
= *qptr
++;
4613 mutex_unlock(&bp
->hwrm_cmd_lock
);
4617 static int bnxt_hwrm_ver_get(struct bnxt
*bp
)
4620 struct hwrm_ver_get_input req
= {0};
4621 struct hwrm_ver_get_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4623 bp
->hwrm_max_req_len
= HWRM_MAX_REQ_LEN
;
4624 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VER_GET
, -1, -1);
4625 req
.hwrm_intf_maj
= HWRM_VERSION_MAJOR
;
4626 req
.hwrm_intf_min
= HWRM_VERSION_MINOR
;
4627 req
.hwrm_intf_upd
= HWRM_VERSION_UPDATE
;
4628 mutex_lock(&bp
->hwrm_cmd_lock
);
4629 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4631 goto hwrm_ver_get_exit
;
4633 memcpy(&bp
->ver_resp
, resp
, sizeof(struct hwrm_ver_get_output
));
4635 bp
->hwrm_spec_code
= resp
->hwrm_intf_maj
<< 16 |
4636 resp
->hwrm_intf_min
<< 8 | resp
->hwrm_intf_upd
;
4637 if (resp
->hwrm_intf_maj
< 1) {
4638 netdev_warn(bp
->dev
, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
4639 resp
->hwrm_intf_maj
, resp
->hwrm_intf_min
,
4640 resp
->hwrm_intf_upd
);
4641 netdev_warn(bp
->dev
, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
4643 snprintf(bp
->fw_ver_str
, BC_HWRM_STR_LEN
, "%d.%d.%d/%d.%d.%d",
4644 resp
->hwrm_fw_maj
, resp
->hwrm_fw_min
, resp
->hwrm_fw_bld
,
4645 resp
->hwrm_intf_maj
, resp
->hwrm_intf_min
, resp
->hwrm_intf_upd
);
4647 bp
->hwrm_cmd_timeout
= le16_to_cpu(resp
->def_req_timeout
);
4648 if (!bp
->hwrm_cmd_timeout
)
4649 bp
->hwrm_cmd_timeout
= DFLT_HWRM_CMD_TIMEOUT
;
4651 if (resp
->hwrm_intf_maj
>= 1)
4652 bp
->hwrm_max_req_len
= le16_to_cpu(resp
->max_req_win_len
);
4654 bp
->chip_num
= le16_to_cpu(resp
->chip_num
);
4655 if (bp
->chip_num
== CHIP_NUM_58700
&& !resp
->chip_rev
&&
4657 bp
->flags
|= BNXT_FLAG_CHIP_NITRO_A0
;
4660 mutex_unlock(&bp
->hwrm_cmd_lock
);
4664 int bnxt_hwrm_fw_set_time(struct bnxt
*bp
)
4666 #if IS_ENABLED(CONFIG_RTC_LIB)
4667 struct hwrm_fw_set_time_input req
= {0};
4671 if (bp
->hwrm_spec_code
< 0x10400)
4674 do_gettimeofday(&tv
);
4675 rtc_time_to_tm(tv
.tv_sec
, &tm
);
4676 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FW_SET_TIME
, -1, -1);
4677 req
.year
= cpu_to_le16(1900 + tm
.tm_year
);
4678 req
.month
= 1 + tm
.tm_mon
;
4679 req
.day
= tm
.tm_mday
;
4680 req
.hour
= tm
.tm_hour
;
4681 req
.minute
= tm
.tm_min
;
4682 req
.second
= tm
.tm_sec
;
4683 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4689 static int bnxt_hwrm_port_qstats(struct bnxt
*bp
)
4692 struct bnxt_pf_info
*pf
= &bp
->pf
;
4693 struct hwrm_port_qstats_input req
= {0};
4695 if (!(bp
->flags
& BNXT_FLAG_PORT_STATS
))
4698 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_QSTATS
, -1, -1);
4699 req
.port_id
= cpu_to_le16(pf
->port_id
);
4700 req
.tx_stat_host_addr
= cpu_to_le64(bp
->hw_tx_port_stats_map
);
4701 req
.rx_stat_host_addr
= cpu_to_le64(bp
->hw_rx_port_stats_map
);
4702 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4706 static void bnxt_hwrm_free_tunnel_ports(struct bnxt
*bp
)
4708 if (bp
->vxlan_port_cnt
) {
4709 bnxt_hwrm_tunnel_dst_port_free(
4710 bp
, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN
);
4712 bp
->vxlan_port_cnt
= 0;
4713 if (bp
->nge_port_cnt
) {
4714 bnxt_hwrm_tunnel_dst_port_free(
4715 bp
, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE
);
4717 bp
->nge_port_cnt
= 0;
4720 static int bnxt_set_tpa(struct bnxt
*bp
, bool set_tpa
)
4726 tpa_flags
= bp
->flags
& BNXT_FLAG_TPA
;
4727 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
4728 rc
= bnxt_hwrm_vnic_set_tpa(bp
, i
, tpa_flags
);
4730 netdev_err(bp
->dev
, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
4738 static void bnxt_hwrm_clear_vnic_rss(struct bnxt
*bp
)
4742 for (i
= 0; i
< bp
->nr_vnics
; i
++)
4743 bnxt_hwrm_vnic_set_rss(bp
, i
, false);
4746 static void bnxt_hwrm_resource_free(struct bnxt
*bp
, bool close_path
,
4749 if (bp
->vnic_info
) {
4750 bnxt_hwrm_clear_vnic_filter(bp
);
4751 /* clear all RSS setting before free vnic ctx */
4752 bnxt_hwrm_clear_vnic_rss(bp
);
4753 bnxt_hwrm_vnic_ctx_free(bp
);
4754 /* before free the vnic, undo the vnic tpa settings */
4755 if (bp
->flags
& BNXT_FLAG_TPA
)
4756 bnxt_set_tpa(bp
, false);
4757 bnxt_hwrm_vnic_free(bp
);
4759 bnxt_hwrm_ring_free(bp
, close_path
);
4760 bnxt_hwrm_ring_grp_free(bp
);
4762 bnxt_hwrm_stat_ctx_free(bp
);
4763 bnxt_hwrm_free_tunnel_ports(bp
);
4767 static int bnxt_setup_vnic(struct bnxt
*bp
, u16 vnic_id
)
4769 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[vnic_id
];
4772 if (vnic
->flags
& BNXT_VNIC_RFS_NEW_RSS_FLAG
)
4775 /* allocate context for vnic */
4776 rc
= bnxt_hwrm_vnic_ctx_alloc(bp
, vnic_id
, 0);
4778 netdev_err(bp
->dev
, "hwrm vnic %d alloc failure rc: %x\n",
4780 goto vnic_setup_err
;
4782 bp
->rsscos_nr_ctxs
++;
4784 if (BNXT_CHIP_TYPE_NITRO_A0(bp
)) {
4785 rc
= bnxt_hwrm_vnic_ctx_alloc(bp
, vnic_id
, 1);
4787 netdev_err(bp
->dev
, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
4789 goto vnic_setup_err
;
4791 bp
->rsscos_nr_ctxs
++;
4795 /* configure default vnic, ring grp */
4796 rc
= bnxt_hwrm_vnic_cfg(bp
, vnic_id
);
4798 netdev_err(bp
->dev
, "hwrm vnic %d cfg failure rc: %x\n",
4800 goto vnic_setup_err
;
4803 /* Enable RSS hashing on vnic */
4804 rc
= bnxt_hwrm_vnic_set_rss(bp
, vnic_id
, true);
4806 netdev_err(bp
->dev
, "hwrm vnic %d set rss failure rc: %x\n",
4808 goto vnic_setup_err
;
4811 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
) {
4812 rc
= bnxt_hwrm_vnic_set_hds(bp
, vnic_id
);
4814 netdev_err(bp
->dev
, "hwrm vnic %d set hds failure rc: %x\n",
4823 static int bnxt_alloc_rfs_vnics(struct bnxt
*bp
)
4825 #ifdef CONFIG_RFS_ACCEL
4828 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
4829 struct bnxt_vnic_info
*vnic
;
4830 u16 vnic_id
= i
+ 1;
4833 if (vnic_id
>= bp
->nr_vnics
)
4836 vnic
= &bp
->vnic_info
[vnic_id
];
4837 vnic
->flags
|= BNXT_VNIC_RFS_FLAG
;
4838 if (bp
->flags
& BNXT_FLAG_NEW_RSS_CAP
)
4839 vnic
->flags
|= BNXT_VNIC_RFS_NEW_RSS_FLAG
;
4840 rc
= bnxt_hwrm_vnic_alloc(bp
, vnic_id
, ring_id
, 1);
4842 netdev_err(bp
->dev
, "hwrm vnic %d alloc failure rc: %x\n",
4846 rc
= bnxt_setup_vnic(bp
, vnic_id
);
4856 /* Allow PF and VF with default VLAN to be in promiscuous mode */
4857 static bool bnxt_promisc_ok(struct bnxt
*bp
)
4859 #ifdef CONFIG_BNXT_SRIOV
4860 if (BNXT_VF(bp
) && !bp
->vf
.vlan
)
4866 static int bnxt_setup_nitroa0_vnic(struct bnxt
*bp
)
4868 unsigned int rc
= 0;
4870 rc
= bnxt_hwrm_vnic_alloc(bp
, 1, bp
->rx_nr_rings
- 1, 1);
4872 netdev_err(bp
->dev
, "Cannot allocate special vnic for NS2 A0: %x\n",
4877 rc
= bnxt_hwrm_vnic_cfg(bp
, 1);
4879 netdev_err(bp
->dev
, "Cannot allocate special vnic for NS2 A0: %x\n",
4886 static int bnxt_cfg_rx_mode(struct bnxt
*);
4887 static bool bnxt_mc_list_updated(struct bnxt
*, u32
*);
4889 static int bnxt_init_chip(struct bnxt
*bp
, bool irq_re_init
)
4891 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[0];
4893 unsigned int rx_nr_rings
= bp
->rx_nr_rings
;
4896 rc
= bnxt_hwrm_stat_ctx_alloc(bp
);
4898 netdev_err(bp
->dev
, "hwrm stat ctx alloc failure rc: %x\n",
4904 rc
= bnxt_hwrm_ring_alloc(bp
);
4906 netdev_err(bp
->dev
, "hwrm ring alloc failure rc: %x\n", rc
);
4910 rc
= bnxt_hwrm_ring_grp_alloc(bp
);
4912 netdev_err(bp
->dev
, "hwrm_ring_grp alloc failure: %x\n", rc
);
4916 if (BNXT_CHIP_TYPE_NITRO_A0(bp
))
4919 /* default vnic 0 */
4920 rc
= bnxt_hwrm_vnic_alloc(bp
, 0, 0, rx_nr_rings
);
4922 netdev_err(bp
->dev
, "hwrm vnic alloc failure rc: %x\n", rc
);
4926 rc
= bnxt_setup_vnic(bp
, 0);
4930 if (bp
->flags
& BNXT_FLAG_RFS
) {
4931 rc
= bnxt_alloc_rfs_vnics(bp
);
4936 if (bp
->flags
& BNXT_FLAG_TPA
) {
4937 rc
= bnxt_set_tpa(bp
, true);
4943 bnxt_update_vf_mac(bp
);
4945 /* Filter for default vnic 0 */
4946 rc
= bnxt_hwrm_set_vnic_filter(bp
, 0, 0, bp
->dev
->dev_addr
);
4948 netdev_err(bp
->dev
, "HWRM vnic filter failure rc: %x\n", rc
);
4951 vnic
->uc_filter_count
= 1;
4953 vnic
->rx_mask
= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST
;
4955 if ((bp
->dev
->flags
& IFF_PROMISC
) && bnxt_promisc_ok(bp
))
4956 vnic
->rx_mask
|= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS
;
4958 if (bp
->dev
->flags
& IFF_ALLMULTI
) {
4959 vnic
->rx_mask
|= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST
;
4960 vnic
->mc_list_count
= 0;
4964 bnxt_mc_list_updated(bp
, &mask
);
4965 vnic
->rx_mask
|= mask
;
4968 rc
= bnxt_cfg_rx_mode(bp
);
4972 rc
= bnxt_hwrm_set_coal(bp
);
4974 netdev_warn(bp
->dev
, "HWRM set coalescing failure rc: %x\n",
4977 if (BNXT_CHIP_TYPE_NITRO_A0(bp
)) {
4978 rc
= bnxt_setup_nitroa0_vnic(bp
);
4980 netdev_err(bp
->dev
, "Special vnic setup failure for NS2 A0 rc: %x\n",
4985 bnxt_hwrm_func_qcfg(bp
);
4986 netdev_update_features(bp
->dev
);
4992 bnxt_hwrm_resource_free(bp
, 0, true);
4997 static int bnxt_shutdown_nic(struct bnxt
*bp
, bool irq_re_init
)
4999 bnxt_hwrm_resource_free(bp
, 1, irq_re_init
);
5003 static int bnxt_init_nic(struct bnxt
*bp
, bool irq_re_init
)
5005 bnxt_init_rx_rings(bp
);
5006 bnxt_init_tx_rings(bp
);
5007 bnxt_init_ring_grps(bp
, irq_re_init
);
5008 bnxt_init_vnics(bp
);
5010 return bnxt_init_chip(bp
, irq_re_init
);
5013 static int bnxt_set_real_num_queues(struct bnxt
*bp
)
5016 struct net_device
*dev
= bp
->dev
;
5018 rc
= netif_set_real_num_tx_queues(dev
, bp
->tx_nr_rings
-
5019 bp
->tx_nr_rings_xdp
);
5023 rc
= netif_set_real_num_rx_queues(dev
, bp
->rx_nr_rings
);
5027 #ifdef CONFIG_RFS_ACCEL
5028 if (bp
->flags
& BNXT_FLAG_RFS
)
5029 dev
->rx_cpu_rmap
= alloc_irq_cpu_rmap(bp
->rx_nr_rings
);
5035 static int bnxt_trim_rings(struct bnxt
*bp
, int *rx
, int *tx
, int max
,
5038 int _rx
= *rx
, _tx
= *tx
;
5041 *rx
= min_t(int, _rx
, max
);
5042 *tx
= min_t(int, _tx
, max
);
5047 while (_rx
+ _tx
> max
) {
5048 if (_rx
> _tx
&& _rx
> 1)
5059 static void bnxt_setup_msix(struct bnxt
*bp
)
5061 const int len
= sizeof(bp
->irq_tbl
[0].name
);
5062 struct net_device
*dev
= bp
->dev
;
5065 tcs
= netdev_get_num_tc(dev
);
5069 for (i
= 0; i
< tcs
; i
++) {
5070 count
= bp
->tx_nr_rings_per_tc
;
5072 netdev_set_tc_queue(dev
, i
, count
, off
);
5076 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
5079 if (bp
->flags
& BNXT_FLAG_SHARED_RINGS
)
5081 else if (i
< bp
->rx_nr_rings
)
5086 snprintf(bp
->irq_tbl
[i
].name
, len
, "%s-%s-%d", dev
->name
, attr
,
5088 bp
->irq_tbl
[i
].handler
= bnxt_msix
;
5092 static void bnxt_setup_inta(struct bnxt
*bp
)
5094 const int len
= sizeof(bp
->irq_tbl
[0].name
);
5096 if (netdev_get_num_tc(bp
->dev
))
5097 netdev_reset_tc(bp
->dev
);
5099 snprintf(bp
->irq_tbl
[0].name
, len
, "%s-%s-%d", bp
->dev
->name
, "TxRx",
5101 bp
->irq_tbl
[0].handler
= bnxt_inta
;
5104 static int bnxt_setup_int_mode(struct bnxt
*bp
)
5108 if (bp
->flags
& BNXT_FLAG_USING_MSIX
)
5109 bnxt_setup_msix(bp
);
5111 bnxt_setup_inta(bp
);
5113 rc
= bnxt_set_real_num_queues(bp
);
5117 #ifdef CONFIG_RFS_ACCEL
5118 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt
*bp
)
5120 #if defined(CONFIG_BNXT_SRIOV)
5122 return bp
->vf
.max_rsscos_ctxs
;
5124 return bp
->pf
.max_rsscos_ctxs
;
5127 static unsigned int bnxt_get_max_func_vnics(struct bnxt
*bp
)
5129 #if defined(CONFIG_BNXT_SRIOV)
5131 return bp
->vf
.max_vnics
;
5133 return bp
->pf
.max_vnics
;
5137 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt
*bp
)
5139 #if defined(CONFIG_BNXT_SRIOV)
5141 return bp
->vf
.max_stat_ctxs
;
5143 return bp
->pf
.max_stat_ctxs
;
5146 void bnxt_set_max_func_stat_ctxs(struct bnxt
*bp
, unsigned int max
)
5148 #if defined(CONFIG_BNXT_SRIOV)
5150 bp
->vf
.max_stat_ctxs
= max
;
5153 bp
->pf
.max_stat_ctxs
= max
;
5156 unsigned int bnxt_get_max_func_cp_rings(struct bnxt
*bp
)
5158 #if defined(CONFIG_BNXT_SRIOV)
5160 return bp
->vf
.max_cp_rings
;
5162 return bp
->pf
.max_cp_rings
;
5165 void bnxt_set_max_func_cp_rings(struct bnxt
*bp
, unsigned int max
)
5167 #if defined(CONFIG_BNXT_SRIOV)
5169 bp
->vf
.max_cp_rings
= max
;
5172 bp
->pf
.max_cp_rings
= max
;
5175 static unsigned int bnxt_get_max_func_irqs(struct bnxt
*bp
)
5177 #if defined(CONFIG_BNXT_SRIOV)
5179 return bp
->vf
.max_irqs
;
5181 return bp
->pf
.max_irqs
;
5184 void bnxt_set_max_func_irqs(struct bnxt
*bp
, unsigned int max_irqs
)
5186 #if defined(CONFIG_BNXT_SRIOV)
5188 bp
->vf
.max_irqs
= max_irqs
;
5191 bp
->pf
.max_irqs
= max_irqs
;
5194 static int bnxt_init_msix(struct bnxt
*bp
)
5196 int i
, total_vecs
, rc
= 0, min
= 1;
5197 struct msix_entry
*msix_ent
;
5199 total_vecs
= bnxt_get_max_func_irqs(bp
);
5200 msix_ent
= kcalloc(total_vecs
, sizeof(struct msix_entry
), GFP_KERNEL
);
5204 for (i
= 0; i
< total_vecs
; i
++) {
5205 msix_ent
[i
].entry
= i
;
5206 msix_ent
[i
].vector
= 0;
5209 if (!(bp
->flags
& BNXT_FLAG_SHARED_RINGS
))
5212 total_vecs
= pci_enable_msix_range(bp
->pdev
, msix_ent
, min
, total_vecs
);
5213 if (total_vecs
< 0) {
5215 goto msix_setup_exit
;
5218 bp
->irq_tbl
= kcalloc(total_vecs
, sizeof(struct bnxt_irq
), GFP_KERNEL
);
5220 for (i
= 0; i
< total_vecs
; i
++)
5221 bp
->irq_tbl
[i
].vector
= msix_ent
[i
].vector
;
5223 bp
->total_irqs
= total_vecs
;
5224 /* Trim rings based upon num of vectors allocated */
5225 rc
= bnxt_trim_rings(bp
, &bp
->rx_nr_rings
, &bp
->tx_nr_rings
,
5226 total_vecs
, min
== 1);
5228 goto msix_setup_exit
;
5230 bp
->tx_nr_rings_per_tc
= bp
->tx_nr_rings
;
5231 bp
->cp_nr_rings
= (min
== 1) ?
5232 max_t(int, bp
->tx_nr_rings
, bp
->rx_nr_rings
) :
5233 bp
->tx_nr_rings
+ bp
->rx_nr_rings
;
5237 goto msix_setup_exit
;
5239 bp
->flags
|= BNXT_FLAG_USING_MSIX
;
5244 netdev_err(bp
->dev
, "bnxt_init_msix err: %x\n", rc
);
5247 pci_disable_msix(bp
->pdev
);
5252 static int bnxt_init_inta(struct bnxt
*bp
)
5254 bp
->irq_tbl
= kcalloc(1, sizeof(struct bnxt_irq
), GFP_KERNEL
);
5259 bp
->rx_nr_rings
= 1;
5260 bp
->tx_nr_rings
= 1;
5261 bp
->cp_nr_rings
= 1;
5262 bp
->tx_nr_rings_per_tc
= bp
->tx_nr_rings
;
5263 bp
->flags
|= BNXT_FLAG_SHARED_RINGS
;
5264 bp
->irq_tbl
[0].vector
= bp
->pdev
->irq
;
5268 static int bnxt_init_int_mode(struct bnxt
*bp
)
5272 if (bp
->flags
& BNXT_FLAG_MSIX_CAP
)
5273 rc
= bnxt_init_msix(bp
);
5275 if (!(bp
->flags
& BNXT_FLAG_USING_MSIX
) && BNXT_PF(bp
)) {
5276 /* fallback to INTA */
5277 rc
= bnxt_init_inta(bp
);
5282 static void bnxt_clear_int_mode(struct bnxt
*bp
)
5284 if (bp
->flags
& BNXT_FLAG_USING_MSIX
)
5285 pci_disable_msix(bp
->pdev
);
5289 bp
->flags
&= ~BNXT_FLAG_USING_MSIX
;
5292 static void bnxt_free_irq(struct bnxt
*bp
)
5294 struct bnxt_irq
*irq
;
5297 #ifdef CONFIG_RFS_ACCEL
5298 free_irq_cpu_rmap(bp
->dev
->rx_cpu_rmap
);
5299 bp
->dev
->rx_cpu_rmap
= NULL
;
5304 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
5305 irq
= &bp
->irq_tbl
[i
];
5307 free_irq(irq
->vector
, bp
->bnapi
[i
]);
5312 static int bnxt_request_irq(struct bnxt
*bp
)
5315 unsigned long flags
= 0;
5316 #ifdef CONFIG_RFS_ACCEL
5317 struct cpu_rmap
*rmap
= bp
->dev
->rx_cpu_rmap
;
5320 if (!(bp
->flags
& BNXT_FLAG_USING_MSIX
))
5321 flags
= IRQF_SHARED
;
5323 for (i
= 0, j
= 0; i
< bp
->cp_nr_rings
; i
++) {
5324 struct bnxt_irq
*irq
= &bp
->irq_tbl
[i
];
5325 #ifdef CONFIG_RFS_ACCEL
5326 if (rmap
&& bp
->bnapi
[i
]->rx_ring
) {
5327 rc
= irq_cpu_rmap_add(rmap
, irq
->vector
);
5329 netdev_warn(bp
->dev
, "failed adding irq rmap for ring %d\n",
5334 rc
= request_irq(irq
->vector
, irq
->handler
, flags
, irq
->name
,
5344 static void bnxt_del_napi(struct bnxt
*bp
)
5351 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
5352 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
5354 napi_hash_del(&bnapi
->napi
);
5355 netif_napi_del(&bnapi
->napi
);
5357 /* We called napi_hash_del() before netif_napi_del(), we need
5358 * to respect an RCU grace period before freeing napi structures.
5363 static void bnxt_init_napi(struct bnxt
*bp
)
5366 unsigned int cp_nr_rings
= bp
->cp_nr_rings
;
5367 struct bnxt_napi
*bnapi
;
5369 if (bp
->flags
& BNXT_FLAG_USING_MSIX
) {
5370 if (BNXT_CHIP_TYPE_NITRO_A0(bp
))
5372 for (i
= 0; i
< cp_nr_rings
; i
++) {
5373 bnapi
= bp
->bnapi
[i
];
5374 netif_napi_add(bp
->dev
, &bnapi
->napi
,
5377 if (BNXT_CHIP_TYPE_NITRO_A0(bp
)) {
5378 bnapi
= bp
->bnapi
[cp_nr_rings
];
5379 netif_napi_add(bp
->dev
, &bnapi
->napi
,
5380 bnxt_poll_nitroa0
, 64);
5383 bnapi
= bp
->bnapi
[0];
5384 netif_napi_add(bp
->dev
, &bnapi
->napi
, bnxt_poll
, 64);
5388 static void bnxt_disable_napi(struct bnxt
*bp
)
5395 for (i
= 0; i
< bp
->cp_nr_rings
; i
++)
5396 napi_disable(&bp
->bnapi
[i
]->napi
);
5399 static void bnxt_enable_napi(struct bnxt
*bp
)
5403 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
5404 bp
->bnapi
[i
]->in_reset
= false;
5405 napi_enable(&bp
->bnapi
[i
]->napi
);
5409 void bnxt_tx_disable(struct bnxt
*bp
)
5412 struct bnxt_tx_ring_info
*txr
;
5413 struct netdev_queue
*txq
;
5416 for (i
= 0; i
< bp
->tx_nr_rings
; i
++) {
5417 txr
= &bp
->tx_ring
[i
];
5418 txq
= netdev_get_tx_queue(bp
->dev
, i
);
5419 txr
->dev_state
= BNXT_DEV_STATE_CLOSING
;
5422 /* Stop all TX queues */
5423 netif_tx_disable(bp
->dev
);
5424 netif_carrier_off(bp
->dev
);
5427 void bnxt_tx_enable(struct bnxt
*bp
)
5430 struct bnxt_tx_ring_info
*txr
;
5431 struct netdev_queue
*txq
;
5433 for (i
= 0; i
< bp
->tx_nr_rings
; i
++) {
5434 txr
= &bp
->tx_ring
[i
];
5435 txq
= netdev_get_tx_queue(bp
->dev
, i
);
5438 netif_tx_wake_all_queues(bp
->dev
);
5439 if (bp
->link_info
.link_up
)
5440 netif_carrier_on(bp
->dev
);
5443 static void bnxt_report_link(struct bnxt
*bp
)
5445 if (bp
->link_info
.link_up
) {
5447 const char *flow_ctrl
;
5450 netif_carrier_on(bp
->dev
);
5451 if (bp
->link_info
.duplex
== BNXT_LINK_DUPLEX_FULL
)
5455 if (bp
->link_info
.pause
== BNXT_LINK_PAUSE_BOTH
)
5456 flow_ctrl
= "ON - receive & transmit";
5457 else if (bp
->link_info
.pause
== BNXT_LINK_PAUSE_TX
)
5458 flow_ctrl
= "ON - transmit";
5459 else if (bp
->link_info
.pause
== BNXT_LINK_PAUSE_RX
)
5460 flow_ctrl
= "ON - receive";
5463 speed
= bnxt_fw_to_ethtool_speed(bp
->link_info
.link_speed
);
5464 netdev_info(bp
->dev
, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
5465 speed
, duplex
, flow_ctrl
);
5466 if (bp
->flags
& BNXT_FLAG_EEE_CAP
)
5467 netdev_info(bp
->dev
, "EEE is %s\n",
5468 bp
->eee
.eee_active
? "active" :
5470 fec
= bp
->link_info
.fec_cfg
;
5471 if (!(fec
& PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED
))
5472 netdev_info(bp
->dev
, "FEC autoneg %s encodings: %s\n",
5473 (fec
& BNXT_FEC_AUTONEG
) ? "on" : "off",
5474 (fec
& BNXT_FEC_ENC_BASE_R
) ? "BaseR" :
5475 (fec
& BNXT_FEC_ENC_RS
) ? "RS" : "None");
5477 netif_carrier_off(bp
->dev
);
5478 netdev_err(bp
->dev
, "NIC Link is Down\n");
5482 static int bnxt_hwrm_phy_qcaps(struct bnxt
*bp
)
5485 struct hwrm_port_phy_qcaps_input req
= {0};
5486 struct hwrm_port_phy_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
5487 struct bnxt_link_info
*link_info
= &bp
->link_info
;
5489 if (bp
->hwrm_spec_code
< 0x10201)
5492 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_PHY_QCAPS
, -1, -1);
5494 mutex_lock(&bp
->hwrm_cmd_lock
);
5495 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5497 goto hwrm_phy_qcaps_exit
;
5499 if (resp
->eee_supported
& PORT_PHY_QCAPS_RESP_EEE_SUPPORTED
) {
5500 struct ethtool_eee
*eee
= &bp
->eee
;
5501 u16 fw_speeds
= le16_to_cpu(resp
->supported_speeds_eee_mode
);
5503 bp
->flags
|= BNXT_FLAG_EEE_CAP
;
5504 eee
->supported
= _bnxt_fw_to_ethtool_adv_spds(fw_speeds
, 0);
5505 bp
->lpi_tmr_lo
= le32_to_cpu(resp
->tx_lpi_timer_low
) &
5506 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK
;
5507 bp
->lpi_tmr_hi
= le32_to_cpu(resp
->valid_tx_lpi_timer_high
) &
5508 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK
;
5510 link_info
->support_auto_speeds
=
5511 le16_to_cpu(resp
->supported_speeds_auto_mode
);
5513 hwrm_phy_qcaps_exit
:
5514 mutex_unlock(&bp
->hwrm_cmd_lock
);
5518 static int bnxt_update_link(struct bnxt
*bp
, bool chng_link_state
)
5521 struct bnxt_link_info
*link_info
= &bp
->link_info
;
5522 struct hwrm_port_phy_qcfg_input req
= {0};
5523 struct hwrm_port_phy_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
5524 u8 link_up
= link_info
->link_up
;
5527 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_PHY_QCFG
, -1, -1);
5529 mutex_lock(&bp
->hwrm_cmd_lock
);
5530 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5532 mutex_unlock(&bp
->hwrm_cmd_lock
);
5536 memcpy(&link_info
->phy_qcfg_resp
, resp
, sizeof(*resp
));
5537 link_info
->phy_link_status
= resp
->link
;
5538 link_info
->duplex
= resp
->duplex
;
5539 link_info
->pause
= resp
->pause
;
5540 link_info
->auto_mode
= resp
->auto_mode
;
5541 link_info
->auto_pause_setting
= resp
->auto_pause
;
5542 link_info
->lp_pause
= resp
->link_partner_adv_pause
;
5543 link_info
->force_pause_setting
= resp
->force_pause
;
5544 link_info
->duplex_setting
= resp
->duplex
;
5545 if (link_info
->phy_link_status
== BNXT_LINK_LINK
)
5546 link_info
->link_speed
= le16_to_cpu(resp
->link_speed
);
5548 link_info
->link_speed
= 0;
5549 link_info
->force_link_speed
= le16_to_cpu(resp
->force_link_speed
);
5550 link_info
->support_speeds
= le16_to_cpu(resp
->support_speeds
);
5551 link_info
->auto_link_speeds
= le16_to_cpu(resp
->auto_link_speed_mask
);
5552 link_info
->lp_auto_link_speeds
=
5553 le16_to_cpu(resp
->link_partner_adv_speeds
);
5554 link_info
->preemphasis
= le32_to_cpu(resp
->preemphasis
);
5555 link_info
->phy_ver
[0] = resp
->phy_maj
;
5556 link_info
->phy_ver
[1] = resp
->phy_min
;
5557 link_info
->phy_ver
[2] = resp
->phy_bld
;
5558 link_info
->media_type
= resp
->media_type
;
5559 link_info
->phy_type
= resp
->phy_type
;
5560 link_info
->transceiver
= resp
->xcvr_pkg_type
;
5561 link_info
->phy_addr
= resp
->eee_config_phy_addr
&
5562 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK
;
5563 link_info
->module_status
= resp
->module_status
;
5565 if (bp
->flags
& BNXT_FLAG_EEE_CAP
) {
5566 struct ethtool_eee
*eee
= &bp
->eee
;
5569 eee
->eee_active
= 0;
5570 if (resp
->eee_config_phy_addr
&
5571 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE
) {
5572 eee
->eee_active
= 1;
5573 fw_speeds
= le16_to_cpu(
5574 resp
->link_partner_adv_eee_link_speed_mask
);
5575 eee
->lp_advertised
=
5576 _bnxt_fw_to_ethtool_adv_spds(fw_speeds
, 0);
5579 /* Pull initial EEE config */
5580 if (!chng_link_state
) {
5581 if (resp
->eee_config_phy_addr
&
5582 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED
)
5583 eee
->eee_enabled
= 1;
5585 fw_speeds
= le16_to_cpu(resp
->adv_eee_link_speed_mask
);
5587 _bnxt_fw_to_ethtool_adv_spds(fw_speeds
, 0);
5589 if (resp
->eee_config_phy_addr
&
5590 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI
) {
5593 eee
->tx_lpi_enabled
= 1;
5594 tmr
= resp
->xcvr_identifier_type_tx_lpi_timer
;
5595 eee
->tx_lpi_timer
= le32_to_cpu(tmr
) &
5596 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK
;
5601 link_info
->fec_cfg
= PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED
;
5602 if (bp
->hwrm_spec_code
>= 0x10504)
5603 link_info
->fec_cfg
= le16_to_cpu(resp
->fec_cfg
);
5605 /* TODO: need to add more logic to report VF link */
5606 if (chng_link_state
) {
5607 if (link_info
->phy_link_status
== BNXT_LINK_LINK
)
5608 link_info
->link_up
= 1;
5610 link_info
->link_up
= 0;
5611 if (link_up
!= link_info
->link_up
)
5612 bnxt_report_link(bp
);
5614 /* alwasy link down if not require to update link state */
5615 link_info
->link_up
= 0;
5617 mutex_unlock(&bp
->hwrm_cmd_lock
);
5619 diff
= link_info
->support_auto_speeds
^ link_info
->advertising
;
5620 if ((link_info
->support_auto_speeds
| diff
) !=
5621 link_info
->support_auto_speeds
) {
5622 /* An advertised speed is no longer supported, so we need to
5623 * update the advertisement settings. Caller holds RTNL
5624 * so we can modify link settings.
5626 link_info
->advertising
= link_info
->support_auto_speeds
;
5627 if (link_info
->autoneg
& BNXT_AUTONEG_SPEED
)
5628 bnxt_hwrm_set_link_setting(bp
, true, false);
5633 static void bnxt_get_port_module_status(struct bnxt
*bp
)
5635 struct bnxt_link_info
*link_info
= &bp
->link_info
;
5636 struct hwrm_port_phy_qcfg_output
*resp
= &link_info
->phy_qcfg_resp
;
5639 if (bnxt_update_link(bp
, true))
5642 module_status
= link_info
->module_status
;
5643 switch (module_status
) {
5644 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX
:
5645 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN
:
5646 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG
:
5647 netdev_warn(bp
->dev
, "Unqualified SFP+ module detected on port %d\n",
5649 if (bp
->hwrm_spec_code
>= 0x10201) {
5650 netdev_warn(bp
->dev
, "Module part number %s\n",
5651 resp
->phy_vendor_partnumber
);
5653 if (module_status
== PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX
)
5654 netdev_warn(bp
->dev
, "TX is disabled\n");
5655 if (module_status
== PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN
)
5656 netdev_warn(bp
->dev
, "SFP+ module is shutdown\n");
5661 bnxt_hwrm_set_pause_common(struct bnxt
*bp
, struct hwrm_port_phy_cfg_input
*req
)
5663 if (bp
->link_info
.autoneg
& BNXT_AUTONEG_FLOW_CTRL
) {
5664 if (bp
->hwrm_spec_code
>= 0x10201)
5666 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE
;
5667 if (bp
->link_info
.req_flow_ctrl
& BNXT_LINK_PAUSE_RX
)
5668 req
->auto_pause
|= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX
;
5669 if (bp
->link_info
.req_flow_ctrl
& BNXT_LINK_PAUSE_TX
)
5670 req
->auto_pause
|= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX
;
5672 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE
);
5674 if (bp
->link_info
.req_flow_ctrl
& BNXT_LINK_PAUSE_RX
)
5675 req
->force_pause
|= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX
;
5676 if (bp
->link_info
.req_flow_ctrl
& BNXT_LINK_PAUSE_TX
)
5677 req
->force_pause
|= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX
;
5679 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE
);
5680 if (bp
->hwrm_spec_code
>= 0x10201) {
5681 req
->auto_pause
= req
->force_pause
;
5682 req
->enables
|= cpu_to_le32(
5683 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE
);
5688 static void bnxt_hwrm_set_link_common(struct bnxt
*bp
,
5689 struct hwrm_port_phy_cfg_input
*req
)
5691 u8 autoneg
= bp
->link_info
.autoneg
;
5692 u16 fw_link_speed
= bp
->link_info
.req_link_speed
;
5693 u16 advertising
= bp
->link_info
.advertising
;
5695 if (autoneg
& BNXT_AUTONEG_SPEED
) {
5697 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK
;
5699 req
->enables
|= cpu_to_le32(
5700 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK
);
5701 req
->auto_link_speed_mask
= cpu_to_le16(advertising
);
5703 req
->enables
|= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE
);
5705 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG
);
5707 req
->force_link_speed
= cpu_to_le16(fw_link_speed
);
5708 req
->flags
|= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE
);
5711 /* tell chimp that the setting takes effect immediately */
5712 req
->flags
|= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY
);
5715 int bnxt_hwrm_set_pause(struct bnxt
*bp
)
5717 struct hwrm_port_phy_cfg_input req
= {0};
5720 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_PHY_CFG
, -1, -1);
5721 bnxt_hwrm_set_pause_common(bp
, &req
);
5723 if ((bp
->link_info
.autoneg
& BNXT_AUTONEG_FLOW_CTRL
) ||
5724 bp
->link_info
.force_link_chng
)
5725 bnxt_hwrm_set_link_common(bp
, &req
);
5727 mutex_lock(&bp
->hwrm_cmd_lock
);
5728 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5729 if (!rc
&& !(bp
->link_info
.autoneg
& BNXT_AUTONEG_FLOW_CTRL
)) {
5730 /* since changing of pause setting doesn't trigger any link
5731 * change event, the driver needs to update the current pause
5732 * result upon successfully return of the phy_cfg command
5734 bp
->link_info
.pause
=
5735 bp
->link_info
.force_pause_setting
= bp
->link_info
.req_flow_ctrl
;
5736 bp
->link_info
.auto_pause_setting
= 0;
5737 if (!bp
->link_info
.force_link_chng
)
5738 bnxt_report_link(bp
);
5740 bp
->link_info
.force_link_chng
= false;
5741 mutex_unlock(&bp
->hwrm_cmd_lock
);
5745 static void bnxt_hwrm_set_eee(struct bnxt
*bp
,
5746 struct hwrm_port_phy_cfg_input
*req
)
5748 struct ethtool_eee
*eee
= &bp
->eee
;
5750 if (eee
->eee_enabled
) {
5752 u32 flags
= PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE
;
5754 if (eee
->tx_lpi_enabled
)
5755 flags
|= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE
;
5757 flags
|= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE
;
5759 req
->flags
|= cpu_to_le32(flags
);
5760 eee_speeds
= bnxt_get_fw_auto_link_speeds(eee
->advertised
);
5761 req
->eee_link_speed_mask
= cpu_to_le16(eee_speeds
);
5762 req
->tx_lpi_timer
= cpu_to_le32(eee
->tx_lpi_timer
);
5764 req
->flags
|= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE
);
5768 int bnxt_hwrm_set_link_setting(struct bnxt
*bp
, bool set_pause
, bool set_eee
)
5770 struct hwrm_port_phy_cfg_input req
= {0};
5772 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_PHY_CFG
, -1, -1);
5774 bnxt_hwrm_set_pause_common(bp
, &req
);
5776 bnxt_hwrm_set_link_common(bp
, &req
);
5779 bnxt_hwrm_set_eee(bp
, &req
);
5780 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5783 static int bnxt_hwrm_shutdown_link(struct bnxt
*bp
)
5785 struct hwrm_port_phy_cfg_input req
= {0};
5787 if (!BNXT_SINGLE_PF(bp
))
5790 if (pci_num_vf(bp
->pdev
))
5793 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_PHY_CFG
, -1, -1);
5794 req
.flags
= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN
);
5795 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5798 static int bnxt_hwrm_port_led_qcaps(struct bnxt
*bp
)
5800 struct hwrm_port_led_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
5801 struct hwrm_port_led_qcaps_input req
= {0};
5802 struct bnxt_pf_info
*pf
= &bp
->pf
;
5805 if (BNXT_VF(bp
) || bp
->hwrm_spec_code
< 0x10601)
5808 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_LED_QCAPS
, -1, -1);
5809 req
.port_id
= cpu_to_le16(pf
->port_id
);
5810 mutex_lock(&bp
->hwrm_cmd_lock
);
5811 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5813 mutex_unlock(&bp
->hwrm_cmd_lock
);
5816 if (resp
->num_leds
> 0 && resp
->num_leds
< BNXT_MAX_LED
) {
5819 bp
->num_leds
= resp
->num_leds
;
5820 memcpy(bp
->leds
, &resp
->led0_id
, sizeof(bp
->leds
[0]) *
5822 for (i
= 0; i
< bp
->num_leds
; i
++) {
5823 struct bnxt_led_info
*led
= &bp
->leds
[i
];
5824 __le16 caps
= led
->led_state_caps
;
5826 if (!led
->led_group_id
||
5827 !BNXT_LED_ALT_BLINK_CAP(caps
)) {
5833 mutex_unlock(&bp
->hwrm_cmd_lock
);
5837 static bool bnxt_eee_config_ok(struct bnxt
*bp
)
5839 struct ethtool_eee
*eee
= &bp
->eee
;
5840 struct bnxt_link_info
*link_info
= &bp
->link_info
;
5842 if (!(bp
->flags
& BNXT_FLAG_EEE_CAP
))
5845 if (eee
->eee_enabled
) {
5847 _bnxt_fw_to_ethtool_adv_spds(link_info
->advertising
, 0);
5849 if (!(link_info
->autoneg
& BNXT_AUTONEG_SPEED
)) {
5850 eee
->eee_enabled
= 0;
5853 if (eee
->advertised
& ~advertising
) {
5854 eee
->advertised
= advertising
& eee
->supported
;
5861 static int bnxt_update_phy_setting(struct bnxt
*bp
)
5864 bool update_link
= false;
5865 bool update_pause
= false;
5866 bool update_eee
= false;
5867 struct bnxt_link_info
*link_info
= &bp
->link_info
;
5869 rc
= bnxt_update_link(bp
, true);
5871 netdev_err(bp
->dev
, "failed to update link (rc: %x)\n",
5875 if (!BNXT_SINGLE_PF(bp
))
5878 if ((link_info
->autoneg
& BNXT_AUTONEG_FLOW_CTRL
) &&
5879 (link_info
->auto_pause_setting
& BNXT_LINK_PAUSE_BOTH
) !=
5880 link_info
->req_flow_ctrl
)
5881 update_pause
= true;
5882 if (!(link_info
->autoneg
& BNXT_AUTONEG_FLOW_CTRL
) &&
5883 link_info
->force_pause_setting
!= link_info
->req_flow_ctrl
)
5884 update_pause
= true;
5885 if (!(link_info
->autoneg
& BNXT_AUTONEG_SPEED
)) {
5886 if (BNXT_AUTO_MODE(link_info
->auto_mode
))
5888 if (link_info
->req_link_speed
!= link_info
->force_link_speed
)
5890 if (link_info
->req_duplex
!= link_info
->duplex_setting
)
5893 if (link_info
->auto_mode
== BNXT_LINK_AUTO_NONE
)
5895 if (link_info
->advertising
!= link_info
->auto_link_speeds
)
5899 /* The last close may have shutdown the link, so need to call
5900 * PHY_CFG to bring it back up.
5902 if (!netif_carrier_ok(bp
->dev
))
5905 if (!bnxt_eee_config_ok(bp
))
5909 rc
= bnxt_hwrm_set_link_setting(bp
, update_pause
, update_eee
);
5910 else if (update_pause
)
5911 rc
= bnxt_hwrm_set_pause(bp
);
5913 netdev_err(bp
->dev
, "failed to update phy setting (rc: %x)\n",
5921 /* Common routine to pre-map certain register block to different GRC window.
5922 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
5923 * in PF and 3 windows in VF that can be customized to map in different
5926 static void bnxt_preset_reg_win(struct bnxt
*bp
)
5929 /* CAG registers map to GRC window #4 */
5930 writel(BNXT_CAG_REG_BASE
,
5931 bp
->bar0
+ BNXT_GRCPF_REG_WINDOW_BASE_OUT
+ 12);
5935 static int __bnxt_open_nic(struct bnxt
*bp
, bool irq_re_init
, bool link_re_init
)
5939 bnxt_preset_reg_win(bp
);
5940 netif_carrier_off(bp
->dev
);
5942 rc
= bnxt_setup_int_mode(bp
);
5944 netdev_err(bp
->dev
, "bnxt_setup_int_mode err: %x\n",
5949 if ((bp
->flags
& BNXT_FLAG_RFS
) &&
5950 !(bp
->flags
& BNXT_FLAG_USING_MSIX
)) {
5951 /* disable RFS if falling back to INTA */
5952 bp
->dev
->hw_features
&= ~NETIF_F_NTUPLE
;
5953 bp
->flags
&= ~BNXT_FLAG_RFS
;
5956 rc
= bnxt_alloc_mem(bp
, irq_re_init
);
5958 netdev_err(bp
->dev
, "bnxt_alloc_mem err: %x\n", rc
);
5959 goto open_err_free_mem
;
5964 rc
= bnxt_request_irq(bp
);
5966 netdev_err(bp
->dev
, "bnxt_request_irq err: %x\n", rc
);
5971 bnxt_enable_napi(bp
);
5973 rc
= bnxt_init_nic(bp
, irq_re_init
);
5975 netdev_err(bp
->dev
, "bnxt_init_nic err: %x\n", rc
);
5980 rc
= bnxt_update_phy_setting(bp
);
5982 netdev_warn(bp
->dev
, "failed to update phy settings\n");
5986 udp_tunnel_get_rx_info(bp
->dev
);
5988 set_bit(BNXT_STATE_OPEN
, &bp
->state
);
5989 bnxt_enable_int(bp
);
5990 /* Enable TX queues */
5992 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
5993 /* Poll link status and check for SFP+ module status */
5994 bnxt_get_port_module_status(bp
);
5999 bnxt_disable_napi(bp
);
6005 bnxt_free_mem(bp
, true);
6009 /* rtnl_lock held */
6010 int bnxt_open_nic(struct bnxt
*bp
, bool irq_re_init
, bool link_re_init
)
6014 rc
= __bnxt_open_nic(bp
, irq_re_init
, link_re_init
);
6016 netdev_err(bp
->dev
, "nic open fail (rc: %x)\n", rc
);
6022 static int bnxt_open(struct net_device
*dev
)
6024 struct bnxt
*bp
= netdev_priv(dev
);
6026 return __bnxt_open_nic(bp
, true, true);
6029 int bnxt_close_nic(struct bnxt
*bp
, bool irq_re_init
, bool link_re_init
)
6033 #ifdef CONFIG_BNXT_SRIOV
6034 if (bp
->sriov_cfg
) {
6035 rc
= wait_event_interruptible_timeout(bp
->sriov_cfg_wait
,
6037 BNXT_SRIOV_CFG_WAIT_TMO
);
6039 netdev_warn(bp
->dev
, "timeout waiting for SRIOV config operation to complete!\n");
6042 /* Change device state to avoid TX queue wake up's */
6043 bnxt_tx_disable(bp
);
6045 clear_bit(BNXT_STATE_OPEN
, &bp
->state
);
6046 smp_mb__after_atomic();
6047 while (test_bit(BNXT_STATE_IN_SP_TASK
, &bp
->state
))
6050 /* Flush rings and and disable interrupts */
6051 bnxt_shutdown_nic(bp
, irq_re_init
);
6053 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
6055 bnxt_disable_napi(bp
);
6056 del_timer_sync(&bp
->timer
);
6063 bnxt_free_mem(bp
, irq_re_init
);
6067 static int bnxt_close(struct net_device
*dev
)
6069 struct bnxt
*bp
= netdev_priv(dev
);
6071 bnxt_close_nic(bp
, true, true);
6072 bnxt_hwrm_shutdown_link(bp
);
6076 /* rtnl_lock held */
6077 static int bnxt_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
6083 if (!netif_running(dev
))
6090 if (!netif_running(dev
))
6103 bnxt_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
6106 struct bnxt
*bp
= netdev_priv(dev
);
6111 /* TODO check if we need to synchronize with bnxt_close path */
6112 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
6113 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
6114 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
6115 struct ctx_hw_stats
*hw_stats
= cpr
->hw_stats
;
6117 stats
->rx_packets
+= le64_to_cpu(hw_stats
->rx_ucast_pkts
);
6118 stats
->rx_packets
+= le64_to_cpu(hw_stats
->rx_mcast_pkts
);
6119 stats
->rx_packets
+= le64_to_cpu(hw_stats
->rx_bcast_pkts
);
6121 stats
->tx_packets
+= le64_to_cpu(hw_stats
->tx_ucast_pkts
);
6122 stats
->tx_packets
+= le64_to_cpu(hw_stats
->tx_mcast_pkts
);
6123 stats
->tx_packets
+= le64_to_cpu(hw_stats
->tx_bcast_pkts
);
6125 stats
->rx_bytes
+= le64_to_cpu(hw_stats
->rx_ucast_bytes
);
6126 stats
->rx_bytes
+= le64_to_cpu(hw_stats
->rx_mcast_bytes
);
6127 stats
->rx_bytes
+= le64_to_cpu(hw_stats
->rx_bcast_bytes
);
6129 stats
->tx_bytes
+= le64_to_cpu(hw_stats
->tx_ucast_bytes
);
6130 stats
->tx_bytes
+= le64_to_cpu(hw_stats
->tx_mcast_bytes
);
6131 stats
->tx_bytes
+= le64_to_cpu(hw_stats
->tx_bcast_bytes
);
6133 stats
->rx_missed_errors
+=
6134 le64_to_cpu(hw_stats
->rx_discard_pkts
);
6136 stats
->multicast
+= le64_to_cpu(hw_stats
->rx_mcast_pkts
);
6138 stats
->tx_dropped
+= le64_to_cpu(hw_stats
->tx_drop_pkts
);
6141 if (bp
->flags
& BNXT_FLAG_PORT_STATS
) {
6142 struct rx_port_stats
*rx
= bp
->hw_rx_port_stats
;
6143 struct tx_port_stats
*tx
= bp
->hw_tx_port_stats
;
6145 stats
->rx_crc_errors
= le64_to_cpu(rx
->rx_fcs_err_frames
);
6146 stats
->rx_frame_errors
= le64_to_cpu(rx
->rx_align_err_frames
);
6147 stats
->rx_length_errors
= le64_to_cpu(rx
->rx_undrsz_frames
) +
6148 le64_to_cpu(rx
->rx_ovrsz_frames
) +
6149 le64_to_cpu(rx
->rx_runt_frames
);
6150 stats
->rx_errors
= le64_to_cpu(rx
->rx_false_carrier_frames
) +
6151 le64_to_cpu(rx
->rx_jbr_frames
);
6152 stats
->collisions
= le64_to_cpu(tx
->tx_total_collisions
);
6153 stats
->tx_fifo_errors
= le64_to_cpu(tx
->tx_fifo_underruns
);
6154 stats
->tx_errors
= le64_to_cpu(tx
->tx_err
);
6158 static bool bnxt_mc_list_updated(struct bnxt
*bp
, u32
*rx_mask
)
6160 struct net_device
*dev
= bp
->dev
;
6161 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[0];
6162 struct netdev_hw_addr
*ha
;
6165 bool update
= false;
6168 netdev_for_each_mc_addr(ha
, dev
) {
6169 if (mc_count
>= BNXT_MAX_MC_ADDRS
) {
6170 *rx_mask
|= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST
;
6171 vnic
->mc_list_count
= 0;
6175 if (!ether_addr_equal(haddr
, vnic
->mc_list
+ off
)) {
6176 memcpy(vnic
->mc_list
+ off
, haddr
, ETH_ALEN
);
6183 *rx_mask
|= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST
;
6185 if (mc_count
!= vnic
->mc_list_count
) {
6186 vnic
->mc_list_count
= mc_count
;
6192 static bool bnxt_uc_list_updated(struct bnxt
*bp
)
6194 struct net_device
*dev
= bp
->dev
;
6195 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[0];
6196 struct netdev_hw_addr
*ha
;
6199 if (netdev_uc_count(dev
) != (vnic
->uc_filter_count
- 1))
6202 netdev_for_each_uc_addr(ha
, dev
) {
6203 if (!ether_addr_equal(ha
->addr
, vnic
->uc_list
+ off
))
6211 static void bnxt_set_rx_mode(struct net_device
*dev
)
6213 struct bnxt
*bp
= netdev_priv(dev
);
6214 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[0];
6215 u32 mask
= vnic
->rx_mask
;
6216 bool mc_update
= false;
6219 if (!netif_running(dev
))
6222 mask
&= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS
|
6223 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST
|
6224 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST
);
6226 if ((dev
->flags
& IFF_PROMISC
) && bnxt_promisc_ok(bp
))
6227 mask
|= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS
;
6229 uc_update
= bnxt_uc_list_updated(bp
);
6231 if (dev
->flags
& IFF_ALLMULTI
) {
6232 mask
|= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST
;
6233 vnic
->mc_list_count
= 0;
6235 mc_update
= bnxt_mc_list_updated(bp
, &mask
);
6238 if (mask
!= vnic
->rx_mask
|| uc_update
|| mc_update
) {
6239 vnic
->rx_mask
= mask
;
6241 set_bit(BNXT_RX_MASK_SP_EVENT
, &bp
->sp_event
);
6242 schedule_work(&bp
->sp_task
);
6246 static int bnxt_cfg_rx_mode(struct bnxt
*bp
)
6248 struct net_device
*dev
= bp
->dev
;
6249 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[0];
6250 struct netdev_hw_addr
*ha
;
6254 netif_addr_lock_bh(dev
);
6255 uc_update
= bnxt_uc_list_updated(bp
);
6256 netif_addr_unlock_bh(dev
);
6261 mutex_lock(&bp
->hwrm_cmd_lock
);
6262 for (i
= 1; i
< vnic
->uc_filter_count
; i
++) {
6263 struct hwrm_cfa_l2_filter_free_input req
= {0};
6265 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_CFA_L2_FILTER_FREE
, -1,
6268 req
.l2_filter_id
= vnic
->fw_l2_filter_id
[i
];
6270 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
6273 mutex_unlock(&bp
->hwrm_cmd_lock
);
6275 vnic
->uc_filter_count
= 1;
6277 netif_addr_lock_bh(dev
);
6278 if (netdev_uc_count(dev
) > (BNXT_MAX_UC_ADDRS
- 1)) {
6279 vnic
->rx_mask
|= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS
;
6281 netdev_for_each_uc_addr(ha
, dev
) {
6282 memcpy(vnic
->uc_list
+ off
, ha
->addr
, ETH_ALEN
);
6284 vnic
->uc_filter_count
++;
6287 netif_addr_unlock_bh(dev
);
6289 for (i
= 1, off
= 0; i
< vnic
->uc_filter_count
; i
++, off
+= ETH_ALEN
) {
6290 rc
= bnxt_hwrm_set_vnic_filter(bp
, 0, i
, vnic
->uc_list
+ off
);
6292 netdev_err(bp
->dev
, "HWRM vnic filter failure rc: %x\n",
6294 vnic
->uc_filter_count
= i
;
6300 rc
= bnxt_hwrm_cfa_l2_set_rx_mask(bp
, 0);
6302 netdev_err(bp
->dev
, "HWRM cfa l2 rx mask failure rc: %x\n",
6308 /* If the chip and firmware supports RFS */
6309 static bool bnxt_rfs_supported(struct bnxt
*bp
)
6311 if (BNXT_PF(bp
) && !BNXT_CHIP_TYPE_NITRO_A0(bp
))
6313 if (bp
->flags
& BNXT_FLAG_NEW_RSS_CAP
)
6318 /* If runtime conditions support RFS */
6319 static bool bnxt_rfs_capable(struct bnxt
*bp
)
6321 #ifdef CONFIG_RFS_ACCEL
6322 int vnics
, max_vnics
, max_rss_ctxs
;
6324 if (!(bp
->flags
& BNXT_FLAG_MSIX_CAP
))
6327 vnics
= 1 + bp
->rx_nr_rings
;
6328 max_vnics
= bnxt_get_max_func_vnics(bp
);
6329 max_rss_ctxs
= bnxt_get_max_func_rss_ctxs(bp
);
6331 /* RSS contexts not a limiting factor */
6332 if (bp
->flags
& BNXT_FLAG_NEW_RSS_CAP
)
6333 max_rss_ctxs
= max_vnics
;
6334 if (vnics
> max_vnics
|| vnics
> max_rss_ctxs
) {
6335 netdev_warn(bp
->dev
,
6336 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
6337 min(max_rss_ctxs
- 1, max_vnics
- 1));
6347 static netdev_features_t
bnxt_fix_features(struct net_device
*dev
,
6348 netdev_features_t features
)
6350 struct bnxt
*bp
= netdev_priv(dev
);
6352 if ((features
& NETIF_F_NTUPLE
) && !bnxt_rfs_capable(bp
))
6353 features
&= ~NETIF_F_NTUPLE
;
6355 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
6356 * turned on or off together.
6358 if ((features
& (NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_HW_VLAN_STAG_RX
)) !=
6359 (NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_HW_VLAN_STAG_RX
)) {
6360 if (dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
)
6361 features
&= ~(NETIF_F_HW_VLAN_CTAG_RX
|
6362 NETIF_F_HW_VLAN_STAG_RX
);
6364 features
|= NETIF_F_HW_VLAN_CTAG_RX
|
6365 NETIF_F_HW_VLAN_STAG_RX
;
6367 #ifdef CONFIG_BNXT_SRIOV
6370 features
&= ~(NETIF_F_HW_VLAN_CTAG_RX
|
6371 NETIF_F_HW_VLAN_STAG_RX
);
6378 static int bnxt_set_features(struct net_device
*dev
, netdev_features_t features
)
6380 struct bnxt
*bp
= netdev_priv(dev
);
6381 u32 flags
= bp
->flags
;
6384 bool re_init
= false;
6385 bool update_tpa
= false;
6387 flags
&= ~BNXT_FLAG_ALL_CONFIG_FEATS
;
6388 if ((features
& NETIF_F_GRO
) && !BNXT_CHIP_TYPE_NITRO_A0(bp
))
6389 flags
|= BNXT_FLAG_GRO
;
6390 if (features
& NETIF_F_LRO
)
6391 flags
|= BNXT_FLAG_LRO
;
6393 if (bp
->flags
& BNXT_FLAG_NO_AGG_RINGS
)
6394 flags
&= ~BNXT_FLAG_TPA
;
6396 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
6397 flags
|= BNXT_FLAG_STRIP_VLAN
;
6399 if (features
& NETIF_F_NTUPLE
)
6400 flags
|= BNXT_FLAG_RFS
;
6402 changes
= flags
^ bp
->flags
;
6403 if (changes
& BNXT_FLAG_TPA
) {
6405 if ((bp
->flags
& BNXT_FLAG_TPA
) == 0 ||
6406 (flags
& BNXT_FLAG_TPA
) == 0)
6410 if (changes
& ~BNXT_FLAG_TPA
)
6413 if (flags
!= bp
->flags
) {
6414 u32 old_flags
= bp
->flags
;
6418 if (!test_bit(BNXT_STATE_OPEN
, &bp
->state
)) {
6420 bnxt_set_ring_params(bp
);
6425 bnxt_close_nic(bp
, false, false);
6427 bnxt_set_ring_params(bp
);
6429 return bnxt_open_nic(bp
, false, false);
6432 rc
= bnxt_set_tpa(bp
,
6433 (flags
& BNXT_FLAG_TPA
) ?
6436 bp
->flags
= old_flags
;
6442 static void bnxt_dump_tx_sw_state(struct bnxt_napi
*bnapi
)
6444 struct bnxt_tx_ring_info
*txr
= bnapi
->tx_ring
;
6445 int i
= bnapi
->index
;
6450 netdev_info(bnapi
->bp
->dev
, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
6451 i
, txr
->tx_ring_struct
.fw_ring_id
, txr
->tx_prod
,
6455 static void bnxt_dump_rx_sw_state(struct bnxt_napi
*bnapi
)
6457 struct bnxt_rx_ring_info
*rxr
= bnapi
->rx_ring
;
6458 int i
= bnapi
->index
;
6463 netdev_info(bnapi
->bp
->dev
, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
6464 i
, rxr
->rx_ring_struct
.fw_ring_id
, rxr
->rx_prod
,
6465 rxr
->rx_agg_ring_struct
.fw_ring_id
, rxr
->rx_agg_prod
,
6466 rxr
->rx_sw_agg_prod
);
6469 static void bnxt_dump_cp_sw_state(struct bnxt_napi
*bnapi
)
6471 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
6472 int i
= bnapi
->index
;
6474 netdev_info(bnapi
->bp
->dev
, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
6475 i
, cpr
->cp_ring_struct
.fw_ring_id
, cpr
->cp_raw_cons
);
6478 static void bnxt_dbg_dump_states(struct bnxt
*bp
)
6481 struct bnxt_napi
*bnapi
;
6483 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
6484 bnapi
= bp
->bnapi
[i
];
6485 if (netif_msg_drv(bp
)) {
6486 bnxt_dump_tx_sw_state(bnapi
);
6487 bnxt_dump_rx_sw_state(bnapi
);
6488 bnxt_dump_cp_sw_state(bnapi
);
6493 static void bnxt_reset_task(struct bnxt
*bp
, bool silent
)
6496 bnxt_dbg_dump_states(bp
);
6497 if (netif_running(bp
->dev
)) {
6498 bnxt_close_nic(bp
, false, false);
6499 bnxt_open_nic(bp
, false, false);
6503 static void bnxt_tx_timeout(struct net_device
*dev
)
6505 struct bnxt
*bp
= netdev_priv(dev
);
6507 netdev_err(bp
->dev
, "TX timeout detected, starting reset task!\n");
6508 set_bit(BNXT_RESET_TASK_SP_EVENT
, &bp
->sp_event
);
6509 schedule_work(&bp
->sp_task
);
6512 #ifdef CONFIG_NET_POLL_CONTROLLER
6513 static void bnxt_poll_controller(struct net_device
*dev
)
6515 struct bnxt
*bp
= netdev_priv(dev
);
6518 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
6519 struct bnxt_irq
*irq
= &bp
->irq_tbl
[i
];
6521 disable_irq(irq
->vector
);
6522 irq
->handler(irq
->vector
, bp
->bnapi
[i
]);
6523 enable_irq(irq
->vector
);
6528 static void bnxt_timer(unsigned long data
)
6530 struct bnxt
*bp
= (struct bnxt
*)data
;
6531 struct net_device
*dev
= bp
->dev
;
6533 if (!netif_running(dev
))
6536 if (atomic_read(&bp
->intr_sem
) != 0)
6537 goto bnxt_restart_timer
;
6539 if (bp
->link_info
.link_up
&& (bp
->flags
& BNXT_FLAG_PORT_STATS
)) {
6540 set_bit(BNXT_PERIODIC_STATS_SP_EVENT
, &bp
->sp_event
);
6541 schedule_work(&bp
->sp_task
);
6544 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
6547 static void bnxt_rtnl_lock_sp(struct bnxt
*bp
)
6549 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
6550 * set. If the device is being closed, bnxt_close() may be holding
6551 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
6552 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
6554 clear_bit(BNXT_STATE_IN_SP_TASK
, &bp
->state
);
6558 static void bnxt_rtnl_unlock_sp(struct bnxt
*bp
)
6560 set_bit(BNXT_STATE_IN_SP_TASK
, &bp
->state
);
6564 /* Only called from bnxt_sp_task() */
6565 static void bnxt_reset(struct bnxt
*bp
, bool silent
)
6567 bnxt_rtnl_lock_sp(bp
);
6568 if (test_bit(BNXT_STATE_OPEN
, &bp
->state
))
6569 bnxt_reset_task(bp
, silent
);
6570 bnxt_rtnl_unlock_sp(bp
);
6573 static void bnxt_cfg_ntp_filters(struct bnxt
*);
6575 static void bnxt_sp_task(struct work_struct
*work
)
6577 struct bnxt
*bp
= container_of(work
, struct bnxt
, sp_task
);
6579 set_bit(BNXT_STATE_IN_SP_TASK
, &bp
->state
);
6580 smp_mb__after_atomic();
6581 if (!test_bit(BNXT_STATE_OPEN
, &bp
->state
)) {
6582 clear_bit(BNXT_STATE_IN_SP_TASK
, &bp
->state
);
6586 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT
, &bp
->sp_event
))
6587 bnxt_cfg_rx_mode(bp
);
6589 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT
, &bp
->sp_event
))
6590 bnxt_cfg_ntp_filters(bp
);
6591 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT
, &bp
->sp_event
))
6592 bnxt_hwrm_exec_fwd_req(bp
);
6593 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT
, &bp
->sp_event
)) {
6594 bnxt_hwrm_tunnel_dst_port_alloc(
6596 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN
);
6598 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT
, &bp
->sp_event
)) {
6599 bnxt_hwrm_tunnel_dst_port_free(
6600 bp
, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN
);
6602 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT
, &bp
->sp_event
)) {
6603 bnxt_hwrm_tunnel_dst_port_alloc(
6605 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE
);
6607 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT
, &bp
->sp_event
)) {
6608 bnxt_hwrm_tunnel_dst_port_free(
6609 bp
, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE
);
6611 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT
, &bp
->sp_event
))
6612 bnxt_hwrm_port_qstats(bp
);
6614 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
6615 * must be the last functions to be called before exiting.
6617 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT
, &bp
->sp_event
)) {
6620 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT
,
6622 bnxt_hwrm_phy_qcaps(bp
);
6624 bnxt_rtnl_lock_sp(bp
);
6625 if (test_bit(BNXT_STATE_OPEN
, &bp
->state
))
6626 rc
= bnxt_update_link(bp
, true);
6627 bnxt_rtnl_unlock_sp(bp
);
6629 netdev_err(bp
->dev
, "SP task can't update link (rc: %x)\n",
6632 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT
, &bp
->sp_event
)) {
6633 bnxt_rtnl_lock_sp(bp
);
6634 if (test_bit(BNXT_STATE_OPEN
, &bp
->state
))
6635 bnxt_get_port_module_status(bp
);
6636 bnxt_rtnl_unlock_sp(bp
);
6638 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT
, &bp
->sp_event
))
6639 bnxt_reset(bp
, false);
6641 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT
, &bp
->sp_event
))
6642 bnxt_reset(bp
, true);
6644 smp_mb__before_atomic();
6645 clear_bit(BNXT_STATE_IN_SP_TASK
, &bp
->state
);
6648 /* Under rtnl_lock */
6649 int bnxt_reserve_rings(struct bnxt
*bp
, int tx
, int rx
, int tcs
, int tx_xdp
)
6651 int max_rx
, max_tx
, tx_sets
= 1;
6652 int tx_rings_needed
;
6656 if (!(bp
->flags
& BNXT_FLAG_SHARED_RINGS
))
6662 rc
= bnxt_get_max_rings(bp
, &max_rx
, &max_tx
, sh
);
6669 tx_rings_needed
= tx
* tx_sets
+ tx_xdp
;
6670 if (max_tx
< tx_rings_needed
)
6673 if (bnxt_hwrm_reserve_tx_rings(bp
, &tx_rings_needed
) ||
6674 tx_rings_needed
< (tx
* tx_sets
+ tx_xdp
))
6679 static void bnxt_unmap_bars(struct bnxt
*bp
, struct pci_dev
*pdev
)
6682 pci_iounmap(pdev
, bp
->bar2
);
6687 pci_iounmap(pdev
, bp
->bar1
);
6692 pci_iounmap(pdev
, bp
->bar0
);
6697 static void bnxt_cleanup_pci(struct bnxt
*bp
)
6699 bnxt_unmap_bars(bp
, bp
->pdev
);
6700 pci_release_regions(bp
->pdev
);
6701 pci_disable_device(bp
->pdev
);
6704 static int bnxt_init_board(struct pci_dev
*pdev
, struct net_device
*dev
)
6707 struct bnxt
*bp
= netdev_priv(dev
);
6709 SET_NETDEV_DEV(dev
, &pdev
->dev
);
6711 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6712 rc
= pci_enable_device(pdev
);
6714 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
6718 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
6720 "Cannot find PCI device base address, aborting\n");
6722 goto init_err_disable
;
6725 rc
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
6727 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
6728 goto init_err_disable
;
6731 if (dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64)) != 0 &&
6732 dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32)) != 0) {
6733 dev_err(&pdev
->dev
, "System does not support DMA, aborting\n");
6734 goto init_err_disable
;
6737 pci_set_master(pdev
);
6742 bp
->bar0
= pci_ioremap_bar(pdev
, 0);
6744 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
6746 goto init_err_release
;
6749 bp
->bar1
= pci_ioremap_bar(pdev
, 2);
6751 dev_err(&pdev
->dev
, "Cannot map doorbell registers, aborting\n");
6753 goto init_err_release
;
6756 bp
->bar2
= pci_ioremap_bar(pdev
, 4);
6758 dev_err(&pdev
->dev
, "Cannot map bar4 registers, aborting\n");
6760 goto init_err_release
;
6763 pci_enable_pcie_error_reporting(pdev
);
6765 INIT_WORK(&bp
->sp_task
, bnxt_sp_task
);
6767 spin_lock_init(&bp
->ntp_fltr_lock
);
6769 bp
->rx_ring_size
= BNXT_DEFAULT_RX_RING_SIZE
;
6770 bp
->tx_ring_size
= BNXT_DEFAULT_TX_RING_SIZE
;
6772 /* tick values in micro seconds */
6773 bp
->rx_coal_ticks
= 12;
6774 bp
->rx_coal_bufs
= 30;
6775 bp
->rx_coal_ticks_irq
= 1;
6776 bp
->rx_coal_bufs_irq
= 2;
6778 bp
->tx_coal_ticks
= 25;
6779 bp
->tx_coal_bufs
= 30;
6780 bp
->tx_coal_ticks_irq
= 2;
6781 bp
->tx_coal_bufs_irq
= 2;
6783 bp
->stats_coal_ticks
= BNXT_DEF_STATS_COAL_TICKS
;
6785 init_timer(&bp
->timer
);
6786 bp
->timer
.data
= (unsigned long)bp
;
6787 bp
->timer
.function
= bnxt_timer
;
6788 bp
->current_interval
= BNXT_TIMER_INTERVAL
;
6790 clear_bit(BNXT_STATE_OPEN
, &bp
->state
);
6794 bnxt_unmap_bars(bp
, pdev
);
6795 pci_release_regions(pdev
);
6798 pci_disable_device(pdev
);
6804 /* rtnl_lock held */
6805 static int bnxt_change_mac_addr(struct net_device
*dev
, void *p
)
6807 struct sockaddr
*addr
= p
;
6808 struct bnxt
*bp
= netdev_priv(dev
);
6811 if (!is_valid_ether_addr(addr
->sa_data
))
6812 return -EADDRNOTAVAIL
;
6814 rc
= bnxt_approve_mac(bp
, addr
->sa_data
);
6818 if (ether_addr_equal(addr
->sa_data
, dev
->dev_addr
))
6821 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
6822 if (netif_running(dev
)) {
6823 bnxt_close_nic(bp
, false, false);
6824 rc
= bnxt_open_nic(bp
, false, false);
6830 /* rtnl_lock held */
6831 static int bnxt_change_mtu(struct net_device
*dev
, int new_mtu
)
6833 struct bnxt
*bp
= netdev_priv(dev
);
6835 if (netif_running(dev
))
6836 bnxt_close_nic(bp
, false, false);
6839 bnxt_set_ring_params(bp
);
6841 if (netif_running(dev
))
6842 return bnxt_open_nic(bp
, false, false);
6847 int bnxt_setup_mq_tc(struct net_device
*dev
, u8 tc
)
6849 struct bnxt
*bp
= netdev_priv(dev
);
6853 if (tc
> bp
->max_tc
) {
6854 netdev_err(dev
, "Too many traffic classes requested: %d. Max supported is %d.\n",
6859 if (netdev_get_num_tc(dev
) == tc
)
6862 if (bp
->flags
& BNXT_FLAG_SHARED_RINGS
)
6865 rc
= bnxt_reserve_rings(bp
, bp
->tx_nr_rings_per_tc
, bp
->rx_nr_rings
,
6866 tc
, bp
->tx_nr_rings_xdp
);
6870 /* Needs to close the device and do hw resource re-allocations */
6871 if (netif_running(bp
->dev
))
6872 bnxt_close_nic(bp
, true, false);
6875 bp
->tx_nr_rings
= bp
->tx_nr_rings_per_tc
* tc
;
6876 netdev_set_num_tc(dev
, tc
);
6878 bp
->tx_nr_rings
= bp
->tx_nr_rings_per_tc
;
6879 netdev_reset_tc(dev
);
6881 bp
->cp_nr_rings
= sh
? max_t(int, bp
->tx_nr_rings
, bp
->rx_nr_rings
) :
6882 bp
->tx_nr_rings
+ bp
->rx_nr_rings
;
6883 bp
->num_stat_ctxs
= bp
->cp_nr_rings
;
6885 if (netif_running(bp
->dev
))
6886 return bnxt_open_nic(bp
, true, false);
6891 static int bnxt_setup_tc(struct net_device
*dev
, u32 handle
, __be16 proto
,
6892 struct tc_to_netdev
*ntc
)
6894 if (ntc
->type
!= TC_SETUP_MQPRIO
)
6897 return bnxt_setup_mq_tc(dev
, ntc
->tc
);
6900 #ifdef CONFIG_RFS_ACCEL
6901 static bool bnxt_fltr_match(struct bnxt_ntuple_filter
*f1
,
6902 struct bnxt_ntuple_filter
*f2
)
6904 struct flow_keys
*keys1
= &f1
->fkeys
;
6905 struct flow_keys
*keys2
= &f2
->fkeys
;
6907 if (keys1
->addrs
.v4addrs
.src
== keys2
->addrs
.v4addrs
.src
&&
6908 keys1
->addrs
.v4addrs
.dst
== keys2
->addrs
.v4addrs
.dst
&&
6909 keys1
->ports
.ports
== keys2
->ports
.ports
&&
6910 keys1
->basic
.ip_proto
== keys2
->basic
.ip_proto
&&
6911 keys1
->basic
.n_proto
== keys2
->basic
.n_proto
&&
6912 keys1
->control
.flags
== keys2
->control
.flags
&&
6913 ether_addr_equal(f1
->src_mac_addr
, f2
->src_mac_addr
) &&
6914 ether_addr_equal(f1
->dst_mac_addr
, f2
->dst_mac_addr
))
6920 static int bnxt_rx_flow_steer(struct net_device
*dev
, const struct sk_buff
*skb
,
6921 u16 rxq_index
, u32 flow_id
)
6923 struct bnxt
*bp
= netdev_priv(dev
);
6924 struct bnxt_ntuple_filter
*fltr
, *new_fltr
;
6925 struct flow_keys
*fkeys
;
6926 struct ethhdr
*eth
= (struct ethhdr
*)skb_mac_header(skb
);
6927 int rc
= 0, idx
, bit_id
, l2_idx
= 0;
6928 struct hlist_head
*head
;
6930 if (!ether_addr_equal(dev
->dev_addr
, eth
->h_dest
)) {
6931 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[0];
6934 netif_addr_lock_bh(dev
);
6935 for (j
= 0; j
< vnic
->uc_filter_count
; j
++, off
+= ETH_ALEN
) {
6936 if (ether_addr_equal(eth
->h_dest
,
6937 vnic
->uc_list
+ off
)) {
6942 netif_addr_unlock_bh(dev
);
6946 new_fltr
= kzalloc(sizeof(*new_fltr
), GFP_ATOMIC
);
6950 fkeys
= &new_fltr
->fkeys
;
6951 if (!skb_flow_dissect_flow_keys(skb
, fkeys
, 0)) {
6952 rc
= -EPROTONOSUPPORT
;
6956 if ((fkeys
->basic
.n_proto
!= htons(ETH_P_IP
) &&
6957 fkeys
->basic
.n_proto
!= htons(ETH_P_IPV6
)) ||
6958 ((fkeys
->basic
.ip_proto
!= IPPROTO_TCP
) &&
6959 (fkeys
->basic
.ip_proto
!= IPPROTO_UDP
))) {
6960 rc
= -EPROTONOSUPPORT
;
6963 if (fkeys
->basic
.n_proto
== htons(ETH_P_IPV6
) &&
6964 bp
->hwrm_spec_code
< 0x10601) {
6965 rc
= -EPROTONOSUPPORT
;
6968 if ((fkeys
->control
.flags
& FLOW_DIS_ENCAPSULATION
) &&
6969 bp
->hwrm_spec_code
< 0x10601) {
6970 rc
= -EPROTONOSUPPORT
;
6974 memcpy(new_fltr
->dst_mac_addr
, eth
->h_dest
, ETH_ALEN
);
6975 memcpy(new_fltr
->src_mac_addr
, eth
->h_source
, ETH_ALEN
);
6977 idx
= skb_get_hash_raw(skb
) & BNXT_NTP_FLTR_HASH_MASK
;
6978 head
= &bp
->ntp_fltr_hash_tbl
[idx
];
6980 hlist_for_each_entry_rcu(fltr
, head
, hash
) {
6981 if (bnxt_fltr_match(fltr
, new_fltr
)) {
6989 spin_lock_bh(&bp
->ntp_fltr_lock
);
6990 bit_id
= bitmap_find_free_region(bp
->ntp_fltr_bmap
,
6991 BNXT_NTP_FLTR_MAX_FLTR
, 0);
6993 spin_unlock_bh(&bp
->ntp_fltr_lock
);
6998 new_fltr
->sw_id
= (u16
)bit_id
;
6999 new_fltr
->flow_id
= flow_id
;
7000 new_fltr
->l2_fltr_idx
= l2_idx
;
7001 new_fltr
->rxq
= rxq_index
;
7002 hlist_add_head_rcu(&new_fltr
->hash
, head
);
7003 bp
->ntp_fltr_count
++;
7004 spin_unlock_bh(&bp
->ntp_fltr_lock
);
7006 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT
, &bp
->sp_event
);
7007 schedule_work(&bp
->sp_task
);
7009 return new_fltr
->sw_id
;
7016 static void bnxt_cfg_ntp_filters(struct bnxt
*bp
)
7020 for (i
= 0; i
< BNXT_NTP_FLTR_HASH_SIZE
; i
++) {
7021 struct hlist_head
*head
;
7022 struct hlist_node
*tmp
;
7023 struct bnxt_ntuple_filter
*fltr
;
7026 head
= &bp
->ntp_fltr_hash_tbl
[i
];
7027 hlist_for_each_entry_safe(fltr
, tmp
, head
, hash
) {
7030 if (test_bit(BNXT_FLTR_VALID
, &fltr
->state
)) {
7031 if (rps_may_expire_flow(bp
->dev
, fltr
->rxq
,
7034 bnxt_hwrm_cfa_ntuple_filter_free(bp
,
7039 rc
= bnxt_hwrm_cfa_ntuple_filter_alloc(bp
,
7044 set_bit(BNXT_FLTR_VALID
, &fltr
->state
);
7048 spin_lock_bh(&bp
->ntp_fltr_lock
);
7049 hlist_del_rcu(&fltr
->hash
);
7050 bp
->ntp_fltr_count
--;
7051 spin_unlock_bh(&bp
->ntp_fltr_lock
);
7053 clear_bit(fltr
->sw_id
, bp
->ntp_fltr_bmap
);
7058 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT
, &bp
->sp_event
))
7059 netdev_info(bp
->dev
, "Receive PF driver unload event!");
7064 static void bnxt_cfg_ntp_filters(struct bnxt
*bp
)
7068 #endif /* CONFIG_RFS_ACCEL */
7070 static void bnxt_udp_tunnel_add(struct net_device
*dev
,
7071 struct udp_tunnel_info
*ti
)
7073 struct bnxt
*bp
= netdev_priv(dev
);
7075 if (ti
->sa_family
!= AF_INET6
&& ti
->sa_family
!= AF_INET
)
7078 if (!netif_running(dev
))
7082 case UDP_TUNNEL_TYPE_VXLAN
:
7083 if (bp
->vxlan_port_cnt
&& bp
->vxlan_port
!= ti
->port
)
7086 bp
->vxlan_port_cnt
++;
7087 if (bp
->vxlan_port_cnt
== 1) {
7088 bp
->vxlan_port
= ti
->port
;
7089 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT
, &bp
->sp_event
);
7090 schedule_work(&bp
->sp_task
);
7093 case UDP_TUNNEL_TYPE_GENEVE
:
7094 if (bp
->nge_port_cnt
&& bp
->nge_port
!= ti
->port
)
7098 if (bp
->nge_port_cnt
== 1) {
7099 bp
->nge_port
= ti
->port
;
7100 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT
, &bp
->sp_event
);
7107 schedule_work(&bp
->sp_task
);
7110 static void bnxt_udp_tunnel_del(struct net_device
*dev
,
7111 struct udp_tunnel_info
*ti
)
7113 struct bnxt
*bp
= netdev_priv(dev
);
7115 if (ti
->sa_family
!= AF_INET6
&& ti
->sa_family
!= AF_INET
)
7118 if (!netif_running(dev
))
7122 case UDP_TUNNEL_TYPE_VXLAN
:
7123 if (!bp
->vxlan_port_cnt
|| bp
->vxlan_port
!= ti
->port
)
7125 bp
->vxlan_port_cnt
--;
7127 if (bp
->vxlan_port_cnt
!= 0)
7130 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT
, &bp
->sp_event
);
7132 case UDP_TUNNEL_TYPE_GENEVE
:
7133 if (!bp
->nge_port_cnt
|| bp
->nge_port
!= ti
->port
)
7137 if (bp
->nge_port_cnt
!= 0)
7140 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT
, &bp
->sp_event
);
7146 schedule_work(&bp
->sp_task
);
7149 static const struct net_device_ops bnxt_netdev_ops
= {
7150 .ndo_open
= bnxt_open
,
7151 .ndo_start_xmit
= bnxt_start_xmit
,
7152 .ndo_stop
= bnxt_close
,
7153 .ndo_get_stats64
= bnxt_get_stats64
,
7154 .ndo_set_rx_mode
= bnxt_set_rx_mode
,
7155 .ndo_do_ioctl
= bnxt_ioctl
,
7156 .ndo_validate_addr
= eth_validate_addr
,
7157 .ndo_set_mac_address
= bnxt_change_mac_addr
,
7158 .ndo_change_mtu
= bnxt_change_mtu
,
7159 .ndo_fix_features
= bnxt_fix_features
,
7160 .ndo_set_features
= bnxt_set_features
,
7161 .ndo_tx_timeout
= bnxt_tx_timeout
,
7162 #ifdef CONFIG_BNXT_SRIOV
7163 .ndo_get_vf_config
= bnxt_get_vf_config
,
7164 .ndo_set_vf_mac
= bnxt_set_vf_mac
,
7165 .ndo_set_vf_vlan
= bnxt_set_vf_vlan
,
7166 .ndo_set_vf_rate
= bnxt_set_vf_bw
,
7167 .ndo_set_vf_link_state
= bnxt_set_vf_link_state
,
7168 .ndo_set_vf_spoofchk
= bnxt_set_vf_spoofchk
,
7170 #ifdef CONFIG_NET_POLL_CONTROLLER
7171 .ndo_poll_controller
= bnxt_poll_controller
,
7173 .ndo_setup_tc
= bnxt_setup_tc
,
7174 #ifdef CONFIG_RFS_ACCEL
7175 .ndo_rx_flow_steer
= bnxt_rx_flow_steer
,
7177 .ndo_udp_tunnel_add
= bnxt_udp_tunnel_add
,
7178 .ndo_udp_tunnel_del
= bnxt_udp_tunnel_del
,
7179 .ndo_xdp
= bnxt_xdp
,
7182 static void bnxt_remove_one(struct pci_dev
*pdev
)
7184 struct net_device
*dev
= pci_get_drvdata(pdev
);
7185 struct bnxt
*bp
= netdev_priv(dev
);
7188 bnxt_sriov_disable(bp
);
7190 pci_disable_pcie_error_reporting(pdev
);
7191 unregister_netdev(dev
);
7192 cancel_work_sync(&bp
->sp_task
);
7195 bnxt_clear_int_mode(bp
);
7196 bnxt_hwrm_func_drv_unrgtr(bp
);
7197 bnxt_free_hwrm_resources(bp
);
7202 bpf_prog_put(bp
->xdp_prog
);
7203 bnxt_cleanup_pci(bp
);
7207 static int bnxt_probe_phy(struct bnxt
*bp
)
7210 struct bnxt_link_info
*link_info
= &bp
->link_info
;
7212 rc
= bnxt_hwrm_phy_qcaps(bp
);
7214 netdev_err(bp
->dev
, "Probe phy can't get phy capabilities (rc: %x)\n",
7219 rc
= bnxt_update_link(bp
, false);
7221 netdev_err(bp
->dev
, "Probe phy can't update link (rc: %x)\n",
7226 /* Older firmware does not have supported_auto_speeds, so assume
7227 * that all supported speeds can be autonegotiated.
7229 if (link_info
->auto_link_speeds
&& !link_info
->support_auto_speeds
)
7230 link_info
->support_auto_speeds
= link_info
->support_speeds
;
7232 /*initialize the ethool setting copy with NVM settings */
7233 if (BNXT_AUTO_MODE(link_info
->auto_mode
)) {
7234 link_info
->autoneg
= BNXT_AUTONEG_SPEED
;
7235 if (bp
->hwrm_spec_code
>= 0x10201) {
7236 if (link_info
->auto_pause_setting
&
7237 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE
)
7238 link_info
->autoneg
|= BNXT_AUTONEG_FLOW_CTRL
;
7240 link_info
->autoneg
|= BNXT_AUTONEG_FLOW_CTRL
;
7242 link_info
->advertising
= link_info
->auto_link_speeds
;
7244 link_info
->req_link_speed
= link_info
->force_link_speed
;
7245 link_info
->req_duplex
= link_info
->duplex_setting
;
7247 if (link_info
->autoneg
& BNXT_AUTONEG_FLOW_CTRL
)
7248 link_info
->req_flow_ctrl
=
7249 link_info
->auto_pause_setting
& BNXT_LINK_PAUSE_BOTH
;
7251 link_info
->req_flow_ctrl
= link_info
->force_pause_setting
;
7255 static int bnxt_get_max_irq(struct pci_dev
*pdev
)
7259 if (!pdev
->msix_cap
)
7262 pci_read_config_word(pdev
, pdev
->msix_cap
+ PCI_MSIX_FLAGS
, &ctrl
);
7263 return (ctrl
& PCI_MSIX_FLAGS_QSIZE
) + 1;
7266 static void _bnxt_get_max_rings(struct bnxt
*bp
, int *max_rx
, int *max_tx
,
7269 int max_ring_grps
= 0;
7271 #ifdef CONFIG_BNXT_SRIOV
7273 *max_tx
= bp
->vf
.max_tx_rings
;
7274 *max_rx
= bp
->vf
.max_rx_rings
;
7275 *max_cp
= min_t(int, bp
->vf
.max_irqs
, bp
->vf
.max_cp_rings
);
7276 *max_cp
= min_t(int, *max_cp
, bp
->vf
.max_stat_ctxs
);
7277 max_ring_grps
= bp
->vf
.max_hw_ring_grps
;
7281 *max_tx
= bp
->pf
.max_tx_rings
;
7282 *max_rx
= bp
->pf
.max_rx_rings
;
7283 *max_cp
= min_t(int, bp
->pf
.max_irqs
, bp
->pf
.max_cp_rings
);
7284 *max_cp
= min_t(int, *max_cp
, bp
->pf
.max_stat_ctxs
);
7285 max_ring_grps
= bp
->pf
.max_hw_ring_grps
;
7287 if (BNXT_CHIP_TYPE_NITRO_A0(bp
) && BNXT_PF(bp
)) {
7291 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
7293 *max_rx
= min_t(int, *max_rx
, max_ring_grps
);
7296 int bnxt_get_max_rings(struct bnxt
*bp
, int *max_rx
, int *max_tx
, bool shared
)
7300 _bnxt_get_max_rings(bp
, &rx
, &tx
, &cp
);
7301 if (!rx
|| !tx
|| !cp
)
7306 return bnxt_trim_rings(bp
, max_rx
, max_tx
, cp
, shared
);
7309 static int bnxt_get_dflt_rings(struct bnxt
*bp
, int *max_rx
, int *max_tx
,
7314 rc
= bnxt_get_max_rings(bp
, max_rx
, max_tx
, shared
);
7315 if (rc
&& (bp
->flags
& BNXT_FLAG_AGG_RINGS
)) {
7316 /* Not enough rings, try disabling agg rings. */
7317 bp
->flags
&= ~BNXT_FLAG_AGG_RINGS
;
7318 rc
= bnxt_get_max_rings(bp
, max_rx
, max_tx
, shared
);
7321 bp
->flags
|= BNXT_FLAG_NO_AGG_RINGS
;
7322 bp
->dev
->hw_features
&= ~NETIF_F_LRO
;
7323 bp
->dev
->features
&= ~NETIF_F_LRO
;
7324 bnxt_set_ring_params(bp
);
7327 if (bp
->flags
& BNXT_FLAG_ROCE_CAP
) {
7328 int max_cp
, max_stat
, max_irq
;
7330 /* Reserve minimum resources for RoCE */
7331 max_cp
= bnxt_get_max_func_cp_rings(bp
);
7332 max_stat
= bnxt_get_max_func_stat_ctxs(bp
);
7333 max_irq
= bnxt_get_max_func_irqs(bp
);
7334 if (max_cp
<= BNXT_MIN_ROCE_CP_RINGS
||
7335 max_irq
<= BNXT_MIN_ROCE_CP_RINGS
||
7336 max_stat
<= BNXT_MIN_ROCE_STAT_CTXS
)
7339 max_cp
-= BNXT_MIN_ROCE_CP_RINGS
;
7340 max_irq
-= BNXT_MIN_ROCE_CP_RINGS
;
7341 max_stat
-= BNXT_MIN_ROCE_STAT_CTXS
;
7342 max_cp
= min_t(int, max_cp
, max_irq
);
7343 max_cp
= min_t(int, max_cp
, max_stat
);
7344 rc
= bnxt_trim_rings(bp
, max_rx
, max_tx
, max_cp
, shared
);
7351 static int bnxt_set_dflt_rings(struct bnxt
*bp
)
7353 int dflt_rings
, max_rx_rings
, max_tx_rings
, rc
;
7357 bp
->flags
|= BNXT_FLAG_SHARED_RINGS
;
7358 dflt_rings
= netif_get_num_default_rss_queues();
7359 rc
= bnxt_get_dflt_rings(bp
, &max_rx_rings
, &max_tx_rings
, sh
);
7362 bp
->rx_nr_rings
= min_t(int, dflt_rings
, max_rx_rings
);
7363 bp
->tx_nr_rings_per_tc
= min_t(int, dflt_rings
, max_tx_rings
);
7365 rc
= bnxt_hwrm_reserve_tx_rings(bp
, &bp
->tx_nr_rings_per_tc
);
7367 netdev_warn(bp
->dev
, "Unable to reserve tx rings\n");
7369 bp
->tx_nr_rings
= bp
->tx_nr_rings_per_tc
;
7370 bp
->cp_nr_rings
= sh
? max_t(int, bp
->tx_nr_rings
, bp
->rx_nr_rings
) :
7371 bp
->tx_nr_rings
+ bp
->rx_nr_rings
;
7372 bp
->num_stat_ctxs
= bp
->cp_nr_rings
;
7373 if (BNXT_CHIP_TYPE_NITRO_A0(bp
)) {
7380 void bnxt_restore_pf_fw_resources(struct bnxt
*bp
)
7383 bnxt_hwrm_func_qcaps(bp
);
7384 bnxt_subtract_ulp_resources(bp
, BNXT_ROCE_ULP
);
7387 static void bnxt_parse_log_pcie_link(struct bnxt
*bp
)
7389 enum pcie_link_width width
= PCIE_LNK_WIDTH_UNKNOWN
;
7390 enum pci_bus_speed speed
= PCI_SPEED_UNKNOWN
;
7392 if (pcie_get_minimum_link(bp
->pdev
, &speed
, &width
) ||
7393 speed
== PCI_SPEED_UNKNOWN
|| width
== PCIE_LNK_WIDTH_UNKNOWN
)
7394 netdev_info(bp
->dev
, "Failed to determine PCIe Link Info\n");
7396 netdev_info(bp
->dev
, "PCIe: Speed %s Width x%d\n",
7397 speed
== PCIE_SPEED_2_5GT
? "2.5GT/s" :
7398 speed
== PCIE_SPEED_5_0GT
? "5.0GT/s" :
7399 speed
== PCIE_SPEED_8_0GT
? "8.0GT/s" :
7403 static int bnxt_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
7405 static int version_printed
;
7406 struct net_device
*dev
;
7410 if (pci_is_bridge(pdev
))
7413 if (version_printed
++ == 0)
7414 pr_info("%s", version
);
7416 max_irqs
= bnxt_get_max_irq(pdev
);
7417 dev
= alloc_etherdev_mq(sizeof(*bp
), max_irqs
);
7421 bp
= netdev_priv(dev
);
7423 if (bnxt_vf_pciid(ent
->driver_data
))
7424 bp
->flags
|= BNXT_FLAG_VF
;
7427 bp
->flags
|= BNXT_FLAG_MSIX_CAP
;
7429 rc
= bnxt_init_board(pdev
, dev
);
7433 dev
->netdev_ops
= &bnxt_netdev_ops
;
7434 dev
->watchdog_timeo
= BNXT_TX_TIMEOUT
;
7435 dev
->ethtool_ops
= &bnxt_ethtool_ops
;
7436 pci_set_drvdata(pdev
, dev
);
7438 rc
= bnxt_alloc_hwrm_resources(bp
);
7440 goto init_err_pci_clean
;
7442 mutex_init(&bp
->hwrm_cmd_lock
);
7443 rc
= bnxt_hwrm_ver_get(bp
);
7445 goto init_err_pci_clean
;
7447 bnxt_hwrm_fw_set_time(bp
);
7449 dev
->hw_features
= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_SG
|
7450 NETIF_F_TSO
| NETIF_F_TSO6
|
7451 NETIF_F_GSO_UDP_TUNNEL
| NETIF_F_GSO_GRE
|
7452 NETIF_F_GSO_IPXIP4
|
7453 NETIF_F_GSO_UDP_TUNNEL_CSUM
| NETIF_F_GSO_GRE_CSUM
|
7454 NETIF_F_GSO_PARTIAL
| NETIF_F_RXHASH
|
7455 NETIF_F_RXCSUM
| NETIF_F_GRO
;
7457 if (!BNXT_CHIP_TYPE_NITRO_A0(bp
))
7458 dev
->hw_features
|= NETIF_F_LRO
;
7460 dev
->hw_enc_features
=
7461 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_SG
|
7462 NETIF_F_TSO
| NETIF_F_TSO6
|
7463 NETIF_F_GSO_UDP_TUNNEL
| NETIF_F_GSO_GRE
|
7464 NETIF_F_GSO_UDP_TUNNEL_CSUM
| NETIF_F_GSO_GRE_CSUM
|
7465 NETIF_F_GSO_IPXIP4
| NETIF_F_GSO_PARTIAL
;
7466 dev
->gso_partial_features
= NETIF_F_GSO_UDP_TUNNEL_CSUM
|
7467 NETIF_F_GSO_GRE_CSUM
;
7468 dev
->vlan_features
= dev
->hw_features
| NETIF_F_HIGHDMA
;
7469 dev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_HW_VLAN_CTAG_TX
|
7470 NETIF_F_HW_VLAN_STAG_RX
| NETIF_F_HW_VLAN_STAG_TX
;
7471 dev
->features
|= dev
->hw_features
| NETIF_F_HIGHDMA
;
7472 dev
->priv_flags
|= IFF_UNICAST_FLT
;
7474 /* MTU range: 60 - 9500 */
7475 dev
->min_mtu
= ETH_ZLEN
;
7476 dev
->max_mtu
= BNXT_MAX_MTU
;
7480 #ifdef CONFIG_BNXT_SRIOV
7481 init_waitqueue_head(&bp
->sriov_cfg_wait
);
7483 bp
->gro_func
= bnxt_gro_func_5730x
;
7484 if (BNXT_CHIP_NUM_57X1X(bp
->chip_num
))
7485 bp
->gro_func
= bnxt_gro_func_5731x
;
7487 rc
= bnxt_hwrm_func_drv_rgtr(bp
);
7489 goto init_err_pci_clean
;
7491 rc
= bnxt_hwrm_func_rgtr_async_events(bp
, NULL
, 0);
7493 goto init_err_pci_clean
;
7495 bp
->ulp_probe
= bnxt_ulp_probe
;
7497 /* Get the MAX capabilities for this function */
7498 rc
= bnxt_hwrm_func_qcaps(bp
);
7500 netdev_err(bp
->dev
, "hwrm query capability failure rc: %x\n",
7503 goto init_err_pci_clean
;
7506 rc
= bnxt_hwrm_queue_qportcfg(bp
);
7508 netdev_err(bp
->dev
, "hwrm query qportcfg failure rc: %x\n",
7511 goto init_err_pci_clean
;
7514 bnxt_hwrm_func_qcfg(bp
);
7515 bnxt_hwrm_port_led_qcaps(bp
);
7517 bnxt_set_rx_skb_mode(bp
, false);
7518 bnxt_set_tpa_flags(bp
);
7519 bnxt_set_ring_params(bp
);
7520 bnxt_set_max_func_irqs(bp
, max_irqs
);
7521 rc
= bnxt_set_dflt_rings(bp
);
7523 netdev_err(bp
->dev
, "Not enough rings available.\n");
7525 goto init_err_pci_clean
;
7528 /* Default RSS hash cfg. */
7529 bp
->rss_hash_cfg
= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4
|
7530 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4
|
7531 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6
|
7532 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6
;
7533 if (!BNXT_CHIP_NUM_57X0X(bp
->chip_num
) &&
7534 !BNXT_CHIP_TYPE_NITRO_A0(bp
) &&
7535 bp
->hwrm_spec_code
>= 0x10501) {
7536 bp
->flags
|= BNXT_FLAG_UDP_RSS_CAP
;
7537 bp
->rss_hash_cfg
|= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4
|
7538 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6
;
7541 bnxt_hwrm_vnic_qcaps(bp
);
7542 if (bnxt_rfs_supported(bp
)) {
7543 dev
->hw_features
|= NETIF_F_NTUPLE
;
7544 if (bnxt_rfs_capable(bp
)) {
7545 bp
->flags
|= BNXT_FLAG_RFS
;
7546 dev
->features
|= NETIF_F_NTUPLE
;
7550 if (dev
->hw_features
& NETIF_F_HW_VLAN_CTAG_RX
)
7551 bp
->flags
|= BNXT_FLAG_STRIP_VLAN
;
7553 rc
= bnxt_probe_phy(bp
);
7555 goto init_err_pci_clean
;
7557 rc
= bnxt_hwrm_func_reset(bp
);
7559 goto init_err_pci_clean
;
7561 rc
= bnxt_init_int_mode(bp
);
7563 goto init_err_pci_clean
;
7565 rc
= register_netdev(dev
);
7567 goto init_err_clr_int
;
7569 netdev_info(dev
, "%s found at mem %lx, node addr %pM\n",
7570 board_info
[ent
->driver_data
].name
,
7571 (long)pci_resource_start(pdev
, 0), dev
->dev_addr
);
7573 bnxt_parse_log_pcie_link(bp
);
7578 bnxt_clear_int_mode(bp
);
7581 bnxt_cleanup_pci(bp
);
7589 * bnxt_io_error_detected - called when PCI error is detected
7590 * @pdev: Pointer to PCI device
7591 * @state: The current pci connection state
7593 * This function is called after a PCI bus error affecting
7594 * this device has been detected.
7596 static pci_ers_result_t
bnxt_io_error_detected(struct pci_dev
*pdev
,
7597 pci_channel_state_t state
)
7599 struct net_device
*netdev
= pci_get_drvdata(pdev
);
7600 struct bnxt
*bp
= netdev_priv(netdev
);
7602 netdev_info(netdev
, "PCI I/O error detected\n");
7605 netif_device_detach(netdev
);
7609 if (state
== pci_channel_io_perm_failure
) {
7611 return PCI_ERS_RESULT_DISCONNECT
;
7614 if (netif_running(netdev
))
7617 pci_disable_device(pdev
);
7620 /* Request a slot slot reset. */
7621 return PCI_ERS_RESULT_NEED_RESET
;
7625 * bnxt_io_slot_reset - called after the pci bus has been reset.
7626 * @pdev: Pointer to PCI device
7628 * Restart the card from scratch, as if from a cold-boot.
7629 * At this point, the card has exprienced a hard reset,
7630 * followed by fixups by BIOS, and has its config space
7631 * set up identically to what it was at cold boot.
7633 static pci_ers_result_t
bnxt_io_slot_reset(struct pci_dev
*pdev
)
7635 struct net_device
*netdev
= pci_get_drvdata(pdev
);
7636 struct bnxt
*bp
= netdev_priv(netdev
);
7638 pci_ers_result_t result
= PCI_ERS_RESULT_DISCONNECT
;
7640 netdev_info(bp
->dev
, "PCI Slot Reset\n");
7644 if (pci_enable_device(pdev
)) {
7646 "Cannot re-enable PCI device after reset.\n");
7648 pci_set_master(pdev
);
7650 err
= bnxt_hwrm_func_reset(bp
);
7651 if (!err
&& netif_running(netdev
))
7652 err
= bnxt_open(netdev
);
7655 result
= PCI_ERS_RESULT_RECOVERED
;
7660 if (result
!= PCI_ERS_RESULT_RECOVERED
&& netif_running(netdev
))
7665 err
= pci_cleanup_aer_uncorrect_error_status(pdev
);
7668 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
7669 err
); /* non-fatal, continue */
7672 return PCI_ERS_RESULT_RECOVERED
;
7676 * bnxt_io_resume - called when traffic can start flowing again.
7677 * @pdev: Pointer to PCI device
7679 * This callback is called when the error recovery driver tells
7680 * us that its OK to resume normal operation.
7682 static void bnxt_io_resume(struct pci_dev
*pdev
)
7684 struct net_device
*netdev
= pci_get_drvdata(pdev
);
7688 netif_device_attach(netdev
);
7693 static const struct pci_error_handlers bnxt_err_handler
= {
7694 .error_detected
= bnxt_io_error_detected
,
7695 .slot_reset
= bnxt_io_slot_reset
,
7696 .resume
= bnxt_io_resume
7699 static struct pci_driver bnxt_pci_driver
= {
7700 .name
= DRV_MODULE_NAME
,
7701 .id_table
= bnxt_pci_tbl
,
7702 .probe
= bnxt_init_one
,
7703 .remove
= bnxt_remove_one
,
7704 .err_handler
= &bnxt_err_handler
,
7705 #if defined(CONFIG_BNXT_SRIOV)
7706 .sriov_configure
= bnxt_sriov_configure
,
7710 module_pci_driver(bnxt_pci_driver
);