1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2018 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
11 #include <linux/module.h>
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/if_bridge.h>
37 #include <linux/rtc.h>
38 #include <linux/bpf.h>
42 #include <net/checksum.h>
43 #include <net/ip6_checksum.h>
44 #include <net/udp_tunnel.h>
45 #include <linux/workqueue.h>
46 #include <linux/prefetch.h>
47 #include <linux/cache.h>
48 #include <linux/log2.h>
49 #include <linux/aer.h>
50 #include <linux/bitmap.h>
51 #include <linux/cpu_rmap.h>
52 #include <linux/cpumask.h>
53 #include <net/pkt_cls.h>
58 #include "bnxt_sriov.h"
59 #include "bnxt_ethtool.h"
64 #include "bnxt_devlink.h"
66 #define BNXT_TX_TIMEOUT (5 * HZ)
68 static const char version
[] =
69 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME
" v" DRV_MODULE_VERSION
"\n";
71 MODULE_LICENSE("GPL");
72 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
73 MODULE_VERSION(DRV_MODULE_VERSION
);
75 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
76 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
77 #define BNXT_RX_COPY_THRESH 256
79 #define BNXT_TX_PUSH_THRESH 164
119 /* indexed by enum above */
120 static const struct {
123 [BCM57301
] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
124 [BCM57302
] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
125 [BCM57304
] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
126 [BCM57417_NPAR
] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
127 [BCM58700
] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
128 [BCM57311
] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
129 [BCM57312
] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
130 [BCM57402
] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
131 [BCM57404
] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
132 [BCM57406
] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
133 [BCM57402_NPAR
] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
134 [BCM57407
] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
135 [BCM57412
] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
136 [BCM57414
] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
137 [BCM57416
] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
138 [BCM57417
] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
139 [BCM57412_NPAR
] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
140 [BCM57314
] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
141 [BCM57417_SFP
] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
142 [BCM57416_SFP
] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
143 [BCM57404_NPAR
] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
144 [BCM57406_NPAR
] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
145 [BCM57407_SFP
] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
146 [BCM57407_NPAR
] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
147 [BCM57414_NPAR
] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
148 [BCM57416_NPAR
] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
149 [BCM57452
] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
150 [BCM57454
] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
151 [BCM5745x_NPAR
] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
152 [BCM58802
] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
153 [BCM58804
] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
154 [BCM58808
] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
155 [NETXTREME_E_VF
] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
156 [NETXTREME_C_VF
] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
157 [NETXTREME_S_VF
] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
160 static const struct pci_device_id bnxt_pci_tbl
[] = {
161 { PCI_VDEVICE(BROADCOM
, 0x1604), .driver_data
= BCM5745x_NPAR
},
162 { PCI_VDEVICE(BROADCOM
, 0x1605), .driver_data
= BCM5745x_NPAR
},
163 { PCI_VDEVICE(BROADCOM
, 0x1614), .driver_data
= BCM57454
},
164 { PCI_VDEVICE(BROADCOM
, 0x16c0), .driver_data
= BCM57417_NPAR
},
165 { PCI_VDEVICE(BROADCOM
, 0x16c8), .driver_data
= BCM57301
},
166 { PCI_VDEVICE(BROADCOM
, 0x16c9), .driver_data
= BCM57302
},
167 { PCI_VDEVICE(BROADCOM
, 0x16ca), .driver_data
= BCM57304
},
168 { PCI_VDEVICE(BROADCOM
, 0x16cc), .driver_data
= BCM57417_NPAR
},
169 { PCI_VDEVICE(BROADCOM
, 0x16cd), .driver_data
= BCM58700
},
170 { PCI_VDEVICE(BROADCOM
, 0x16ce), .driver_data
= BCM57311
},
171 { PCI_VDEVICE(BROADCOM
, 0x16cf), .driver_data
= BCM57312
},
172 { PCI_VDEVICE(BROADCOM
, 0x16d0), .driver_data
= BCM57402
},
173 { PCI_VDEVICE(BROADCOM
, 0x16d1), .driver_data
= BCM57404
},
174 { PCI_VDEVICE(BROADCOM
, 0x16d2), .driver_data
= BCM57406
},
175 { PCI_VDEVICE(BROADCOM
, 0x16d4), .driver_data
= BCM57402_NPAR
},
176 { PCI_VDEVICE(BROADCOM
, 0x16d5), .driver_data
= BCM57407
},
177 { PCI_VDEVICE(BROADCOM
, 0x16d6), .driver_data
= BCM57412
},
178 { PCI_VDEVICE(BROADCOM
, 0x16d7), .driver_data
= BCM57414
},
179 { PCI_VDEVICE(BROADCOM
, 0x16d8), .driver_data
= BCM57416
},
180 { PCI_VDEVICE(BROADCOM
, 0x16d9), .driver_data
= BCM57417
},
181 { PCI_VDEVICE(BROADCOM
, 0x16de), .driver_data
= BCM57412_NPAR
},
182 { PCI_VDEVICE(BROADCOM
, 0x16df), .driver_data
= BCM57314
},
183 { PCI_VDEVICE(BROADCOM
, 0x16e2), .driver_data
= BCM57417_SFP
},
184 { PCI_VDEVICE(BROADCOM
, 0x16e3), .driver_data
= BCM57416_SFP
},
185 { PCI_VDEVICE(BROADCOM
, 0x16e7), .driver_data
= BCM57404_NPAR
},
186 { PCI_VDEVICE(BROADCOM
, 0x16e8), .driver_data
= BCM57406_NPAR
},
187 { PCI_VDEVICE(BROADCOM
, 0x16e9), .driver_data
= BCM57407_SFP
},
188 { PCI_VDEVICE(BROADCOM
, 0x16ea), .driver_data
= BCM57407_NPAR
},
189 { PCI_VDEVICE(BROADCOM
, 0x16eb), .driver_data
= BCM57412_NPAR
},
190 { PCI_VDEVICE(BROADCOM
, 0x16ec), .driver_data
= BCM57414_NPAR
},
191 { PCI_VDEVICE(BROADCOM
, 0x16ed), .driver_data
= BCM57414_NPAR
},
192 { PCI_VDEVICE(BROADCOM
, 0x16ee), .driver_data
= BCM57416_NPAR
},
193 { PCI_VDEVICE(BROADCOM
, 0x16ef), .driver_data
= BCM57416_NPAR
},
194 { PCI_VDEVICE(BROADCOM
, 0x16f0), .driver_data
= BCM58808
},
195 { PCI_VDEVICE(BROADCOM
, 0x16f1), .driver_data
= BCM57452
},
196 { PCI_VDEVICE(BROADCOM
, 0xd802), .driver_data
= BCM58802
},
197 { PCI_VDEVICE(BROADCOM
, 0xd804), .driver_data
= BCM58804
},
198 #ifdef CONFIG_BNXT_SRIOV
199 { PCI_VDEVICE(BROADCOM
, 0x1606), .driver_data
= NETXTREME_E_VF
},
200 { PCI_VDEVICE(BROADCOM
, 0x1609), .driver_data
= NETXTREME_E_VF
},
201 { PCI_VDEVICE(BROADCOM
, 0x16c1), .driver_data
= NETXTREME_E_VF
},
202 { PCI_VDEVICE(BROADCOM
, 0x16cb), .driver_data
= NETXTREME_C_VF
},
203 { PCI_VDEVICE(BROADCOM
, 0x16d3), .driver_data
= NETXTREME_E_VF
},
204 { PCI_VDEVICE(BROADCOM
, 0x16dc), .driver_data
= NETXTREME_E_VF
},
205 { PCI_VDEVICE(BROADCOM
, 0x16e1), .driver_data
= NETXTREME_C_VF
},
206 { PCI_VDEVICE(BROADCOM
, 0x16e5), .driver_data
= NETXTREME_C_VF
},
207 { PCI_VDEVICE(BROADCOM
, 0xd800), .driver_data
= NETXTREME_S_VF
},
212 MODULE_DEVICE_TABLE(pci
, bnxt_pci_tbl
);
214 static const u16 bnxt_vf_req_snif
[] = {
218 HWRM_CFA_L2_FILTER_ALLOC
,
221 static const u16 bnxt_async_events_arr
[] = {
222 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE
,
223 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD
,
224 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED
,
225 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE
,
226 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE
,
229 static struct workqueue_struct
*bnxt_pf_wq
;
231 static bool bnxt_vf_pciid(enum board_idx idx
)
233 return (idx
== NETXTREME_C_VF
|| idx
== NETXTREME_E_VF
||
234 idx
== NETXTREME_S_VF
);
237 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
238 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
239 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
241 #define BNXT_CP_DB_REARM(db, raw_cons) \
242 writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
244 #define BNXT_CP_DB(db, raw_cons) \
245 writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
247 #define BNXT_CP_DB_IRQ_DIS(db) \
248 writel(DB_CP_IRQ_DIS_FLAGS, db)
250 const u16 bnxt_lhint_arr
[] = {
251 TX_BD_FLAGS_LHINT_512_AND_SMALLER
,
252 TX_BD_FLAGS_LHINT_512_TO_1023
,
253 TX_BD_FLAGS_LHINT_1024_TO_2047
,
254 TX_BD_FLAGS_LHINT_1024_TO_2047
,
255 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
256 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
257 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
258 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
259 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
260 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
261 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
262 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
263 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
264 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
265 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
266 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
267 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
268 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
269 TX_BD_FLAGS_LHINT_2048_AND_LARGER
,
272 static u16
bnxt_xmit_get_cfa_action(struct sk_buff
*skb
)
274 struct metadata_dst
*md_dst
= skb_metadata_dst(skb
);
276 if (!md_dst
|| md_dst
->type
!= METADATA_HW_PORT_MUX
)
279 return md_dst
->u
.port_info
.port_id
;
282 static netdev_tx_t
bnxt_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
284 struct bnxt
*bp
= netdev_priv(dev
);
286 struct tx_bd_ext
*txbd1
;
287 struct netdev_queue
*txq
;
290 unsigned int length
, pad
= 0;
291 u32 len
, free_size
, vlan_tag_flags
, cfa_action
, flags
;
293 struct pci_dev
*pdev
= bp
->pdev
;
294 struct bnxt_tx_ring_info
*txr
;
295 struct bnxt_sw_tx_bd
*tx_buf
;
297 i
= skb_get_queue_mapping(skb
);
298 if (unlikely(i
>= bp
->tx_nr_rings
)) {
299 dev_kfree_skb_any(skb
);
303 txq
= netdev_get_tx_queue(dev
, i
);
304 txr
= &bp
->tx_ring
[bp
->tx_ring_map
[i
]];
307 free_size
= bnxt_tx_avail(bp
, txr
);
308 if (unlikely(free_size
< skb_shinfo(skb
)->nr_frags
+ 2)) {
309 netif_tx_stop_queue(txq
);
310 return NETDEV_TX_BUSY
;
314 len
= skb_headlen(skb
);
315 last_frag
= skb_shinfo(skb
)->nr_frags
;
317 txbd
= &txr
->tx_desc_ring
[TX_RING(prod
)][TX_IDX(prod
)];
319 txbd
->tx_bd_opaque
= prod
;
321 tx_buf
= &txr
->tx_buf_ring
[prod
];
323 tx_buf
->nr_frags
= last_frag
;
326 cfa_action
= bnxt_xmit_get_cfa_action(skb
);
327 if (skb_vlan_tag_present(skb
)) {
328 vlan_tag_flags
= TX_BD_CFA_META_KEY_VLAN
|
329 skb_vlan_tag_get(skb
);
330 /* Currently supports 8021Q, 8021AD vlan offloads
331 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
333 if (skb
->vlan_proto
== htons(ETH_P_8021Q
))
334 vlan_tag_flags
|= 1 << TX_BD_CFA_META_TPID_SHIFT
;
337 if (free_size
== bp
->tx_ring_size
&& length
<= bp
->tx_push_thresh
) {
338 struct tx_push_buffer
*tx_push_buf
= txr
->tx_push
;
339 struct tx_push_bd
*tx_push
= &tx_push_buf
->push_bd
;
340 struct tx_bd_ext
*tx_push1
= &tx_push
->txbd2
;
341 void *pdata
= tx_push_buf
->data
;
345 /* Set COAL_NOW to be ready quickly for the next push */
346 tx_push
->tx_bd_len_flags_type
=
347 cpu_to_le32((length
<< TX_BD_LEN_SHIFT
) |
348 TX_BD_TYPE_LONG_TX_BD
|
349 TX_BD_FLAGS_LHINT_512_AND_SMALLER
|
350 TX_BD_FLAGS_COAL_NOW
|
351 TX_BD_FLAGS_PACKET_END
|
352 (2 << TX_BD_FLAGS_BD_CNT_SHIFT
));
354 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
355 tx_push1
->tx_bd_hsize_lflags
=
356 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM
);
358 tx_push1
->tx_bd_hsize_lflags
= 0;
360 tx_push1
->tx_bd_cfa_meta
= cpu_to_le32(vlan_tag_flags
);
361 tx_push1
->tx_bd_cfa_action
=
362 cpu_to_le32(cfa_action
<< TX_BD_CFA_ACTION_SHIFT
);
364 end
= pdata
+ length
;
365 end
= PTR_ALIGN(end
, 8) - 1;
368 skb_copy_from_linear_data(skb
, pdata
, len
);
370 for (j
= 0; j
< last_frag
; j
++) {
371 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[j
];
374 fptr
= skb_frag_address_safe(frag
);
378 memcpy(pdata
, fptr
, skb_frag_size(frag
));
379 pdata
+= skb_frag_size(frag
);
382 txbd
->tx_bd_len_flags_type
= tx_push
->tx_bd_len_flags_type
;
383 txbd
->tx_bd_haddr
= txr
->data_mapping
;
384 prod
= NEXT_TX(prod
);
385 txbd
= &txr
->tx_desc_ring
[TX_RING(prod
)][TX_IDX(prod
)];
386 memcpy(txbd
, tx_push1
, sizeof(*txbd
));
387 prod
= NEXT_TX(prod
);
389 cpu_to_le32(DB_KEY_TX_PUSH
| DB_LONG_TX_PUSH
| prod
);
393 netdev_tx_sent_queue(txq
, skb
->len
);
394 wmb(); /* Sync is_push and byte queue before pushing data */
396 push_len
= (length
+ sizeof(*tx_push
) + 7) / 8;
398 __iowrite64_copy(txr
->tx_doorbell
, tx_push_buf
, 16);
399 __iowrite32_copy(txr
->tx_doorbell
+ 4, tx_push_buf
+ 1,
400 (push_len
- 16) << 1);
402 __iowrite64_copy(txr
->tx_doorbell
, tx_push_buf
,
410 if (length
< BNXT_MIN_PKT_SIZE
) {
411 pad
= BNXT_MIN_PKT_SIZE
- length
;
412 if (skb_pad(skb
, pad
)) {
413 /* SKB already freed. */
417 length
= BNXT_MIN_PKT_SIZE
;
420 mapping
= dma_map_single(&pdev
->dev
, skb
->data
, len
, DMA_TO_DEVICE
);
422 if (unlikely(dma_mapping_error(&pdev
->dev
, mapping
))) {
423 dev_kfree_skb_any(skb
);
428 dma_unmap_addr_set(tx_buf
, mapping
, mapping
);
429 flags
= (len
<< TX_BD_LEN_SHIFT
) | TX_BD_TYPE_LONG_TX_BD
|
430 ((last_frag
+ 2) << TX_BD_FLAGS_BD_CNT_SHIFT
);
432 txbd
->tx_bd_haddr
= cpu_to_le64(mapping
);
434 prod
= NEXT_TX(prod
);
435 txbd1
= (struct tx_bd_ext
*)
436 &txr
->tx_desc_ring
[TX_RING(prod
)][TX_IDX(prod
)];
438 txbd1
->tx_bd_hsize_lflags
= 0;
439 if (skb_is_gso(skb
)) {
442 if (skb
->encapsulation
)
443 hdr_len
= skb_inner_network_offset(skb
) +
444 skb_inner_network_header_len(skb
) +
445 inner_tcp_hdrlen(skb
);
447 hdr_len
= skb_transport_offset(skb
) +
450 txbd1
->tx_bd_hsize_lflags
= cpu_to_le32(TX_BD_FLAGS_LSO
|
452 (hdr_len
<< (TX_BD_HSIZE_SHIFT
- 1)));
453 length
= skb_shinfo(skb
)->gso_size
;
454 txbd1
->tx_bd_mss
= cpu_to_le32(length
);
456 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
457 txbd1
->tx_bd_hsize_lflags
=
458 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM
);
459 txbd1
->tx_bd_mss
= 0;
463 flags
|= bnxt_lhint_arr
[length
];
464 txbd
->tx_bd_len_flags_type
= cpu_to_le32(flags
);
466 txbd1
->tx_bd_cfa_meta
= cpu_to_le32(vlan_tag_flags
);
467 txbd1
->tx_bd_cfa_action
=
468 cpu_to_le32(cfa_action
<< TX_BD_CFA_ACTION_SHIFT
);
469 for (i
= 0; i
< last_frag
; i
++) {
470 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
472 prod
= NEXT_TX(prod
);
473 txbd
= &txr
->tx_desc_ring
[TX_RING(prod
)][TX_IDX(prod
)];
475 len
= skb_frag_size(frag
);
476 mapping
= skb_frag_dma_map(&pdev
->dev
, frag
, 0, len
,
479 if (unlikely(dma_mapping_error(&pdev
->dev
, mapping
)))
482 tx_buf
= &txr
->tx_buf_ring
[prod
];
483 dma_unmap_addr_set(tx_buf
, mapping
, mapping
);
485 txbd
->tx_bd_haddr
= cpu_to_le64(mapping
);
487 flags
= len
<< TX_BD_LEN_SHIFT
;
488 txbd
->tx_bd_len_flags_type
= cpu_to_le32(flags
);
492 txbd
->tx_bd_len_flags_type
=
493 cpu_to_le32(((len
+ pad
) << TX_BD_LEN_SHIFT
) | flags
|
494 TX_BD_FLAGS_PACKET_END
);
496 netdev_tx_sent_queue(txq
, skb
->len
);
498 /* Sync BD data before updating doorbell */
501 prod
= NEXT_TX(prod
);
504 if (!skb
->xmit_more
|| netif_xmit_stopped(txq
))
505 bnxt_db_write(bp
, txr
->tx_doorbell
, DB_KEY_TX
| prod
);
511 if (unlikely(bnxt_tx_avail(bp
, txr
) <= MAX_SKB_FRAGS
+ 1)) {
512 if (skb
->xmit_more
&& !tx_buf
->is_push
)
513 bnxt_db_write(bp
, txr
->tx_doorbell
, DB_KEY_TX
| prod
);
515 netif_tx_stop_queue(txq
);
517 /* netif_tx_stop_queue() must be done before checking
518 * tx index in bnxt_tx_avail() below, because in
519 * bnxt_tx_int(), we update tx index before checking for
520 * netif_tx_queue_stopped().
523 if (bnxt_tx_avail(bp
, txr
) > bp
->tx_wake_thresh
)
524 netif_tx_wake_queue(txq
);
531 /* start back at beginning and unmap skb */
533 tx_buf
= &txr
->tx_buf_ring
[prod
];
535 dma_unmap_single(&pdev
->dev
, dma_unmap_addr(tx_buf
, mapping
),
536 skb_headlen(skb
), PCI_DMA_TODEVICE
);
537 prod
= NEXT_TX(prod
);
539 /* unmap remaining mapped pages */
540 for (i
= 0; i
< last_frag
; i
++) {
541 prod
= NEXT_TX(prod
);
542 tx_buf
= &txr
->tx_buf_ring
[prod
];
543 dma_unmap_page(&pdev
->dev
, dma_unmap_addr(tx_buf
, mapping
),
544 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
548 dev_kfree_skb_any(skb
);
552 static void bnxt_tx_int(struct bnxt
*bp
, struct bnxt_napi
*bnapi
, int nr_pkts
)
554 struct bnxt_tx_ring_info
*txr
= bnapi
->tx_ring
;
555 struct netdev_queue
*txq
= netdev_get_tx_queue(bp
->dev
, txr
->txq_index
);
556 u16 cons
= txr
->tx_cons
;
557 struct pci_dev
*pdev
= bp
->pdev
;
559 unsigned int tx_bytes
= 0;
561 for (i
= 0; i
< nr_pkts
; i
++) {
562 struct bnxt_sw_tx_bd
*tx_buf
;
566 tx_buf
= &txr
->tx_buf_ring
[cons
];
567 cons
= NEXT_TX(cons
);
571 if (tx_buf
->is_push
) {
576 dma_unmap_single(&pdev
->dev
, dma_unmap_addr(tx_buf
, mapping
),
577 skb_headlen(skb
), PCI_DMA_TODEVICE
);
578 last
= tx_buf
->nr_frags
;
580 for (j
= 0; j
< last
; j
++) {
581 cons
= NEXT_TX(cons
);
582 tx_buf
= &txr
->tx_buf_ring
[cons
];
585 dma_unmap_addr(tx_buf
, mapping
),
586 skb_frag_size(&skb_shinfo(skb
)->frags
[j
]),
591 cons
= NEXT_TX(cons
);
593 tx_bytes
+= skb
->len
;
594 dev_kfree_skb_any(skb
);
597 netdev_tx_completed_queue(txq
, nr_pkts
, tx_bytes
);
600 /* Need to make the tx_cons update visible to bnxt_start_xmit()
601 * before checking for netif_tx_queue_stopped(). Without the
602 * memory barrier, there is a small possibility that bnxt_start_xmit()
603 * will miss it and cause the queue to be stopped forever.
607 if (unlikely(netif_tx_queue_stopped(txq
)) &&
608 (bnxt_tx_avail(bp
, txr
) > bp
->tx_wake_thresh
)) {
609 __netif_tx_lock(txq
, smp_processor_id());
610 if (netif_tx_queue_stopped(txq
) &&
611 bnxt_tx_avail(bp
, txr
) > bp
->tx_wake_thresh
&&
612 txr
->dev_state
!= BNXT_DEV_STATE_CLOSING
)
613 netif_tx_wake_queue(txq
);
614 __netif_tx_unlock(txq
);
618 static struct page
*__bnxt_alloc_rx_page(struct bnxt
*bp
, dma_addr_t
*mapping
,
621 struct device
*dev
= &bp
->pdev
->dev
;
624 page
= alloc_page(gfp
);
628 *mapping
= dma_map_page_attrs(dev
, page
, 0, PAGE_SIZE
, bp
->rx_dir
,
629 DMA_ATTR_WEAK_ORDERING
);
630 if (dma_mapping_error(dev
, *mapping
)) {
634 *mapping
+= bp
->rx_dma_offset
;
638 static inline u8
*__bnxt_alloc_rx_data(struct bnxt
*bp
, dma_addr_t
*mapping
,
642 struct pci_dev
*pdev
= bp
->pdev
;
644 data
= kmalloc(bp
->rx_buf_size
, gfp
);
648 *mapping
= dma_map_single_attrs(&pdev
->dev
, data
+ bp
->rx_dma_offset
,
649 bp
->rx_buf_use_size
, bp
->rx_dir
,
650 DMA_ATTR_WEAK_ORDERING
);
652 if (dma_mapping_error(&pdev
->dev
, *mapping
)) {
659 int bnxt_alloc_rx_data(struct bnxt
*bp
, struct bnxt_rx_ring_info
*rxr
,
662 struct rx_bd
*rxbd
= &rxr
->rx_desc_ring
[RX_RING(prod
)][RX_IDX(prod
)];
663 struct bnxt_sw_rx_bd
*rx_buf
= &rxr
->rx_buf_ring
[prod
];
666 if (BNXT_RX_PAGE_MODE(bp
)) {
667 struct page
*page
= __bnxt_alloc_rx_page(bp
, &mapping
, gfp
);
673 rx_buf
->data_ptr
= page_address(page
) + bp
->rx_offset
;
675 u8
*data
= __bnxt_alloc_rx_data(bp
, &mapping
, gfp
);
681 rx_buf
->data_ptr
= data
+ bp
->rx_offset
;
683 rx_buf
->mapping
= mapping
;
685 rxbd
->rx_bd_haddr
= cpu_to_le64(mapping
);
689 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info
*rxr
, u16 cons
, void *data
)
691 u16 prod
= rxr
->rx_prod
;
692 struct bnxt_sw_rx_bd
*cons_rx_buf
, *prod_rx_buf
;
693 struct rx_bd
*cons_bd
, *prod_bd
;
695 prod_rx_buf
= &rxr
->rx_buf_ring
[prod
];
696 cons_rx_buf
= &rxr
->rx_buf_ring
[cons
];
698 prod_rx_buf
->data
= data
;
699 prod_rx_buf
->data_ptr
= cons_rx_buf
->data_ptr
;
701 prod_rx_buf
->mapping
= cons_rx_buf
->mapping
;
703 prod_bd
= &rxr
->rx_desc_ring
[RX_RING(prod
)][RX_IDX(prod
)];
704 cons_bd
= &rxr
->rx_desc_ring
[RX_RING(cons
)][RX_IDX(cons
)];
706 prod_bd
->rx_bd_haddr
= cons_bd
->rx_bd_haddr
;
709 static inline u16
bnxt_find_next_agg_idx(struct bnxt_rx_ring_info
*rxr
, u16 idx
)
711 u16 next
, max
= rxr
->rx_agg_bmap_size
;
713 next
= find_next_zero_bit(rxr
->rx_agg_bmap
, max
, idx
);
715 next
= find_first_zero_bit(rxr
->rx_agg_bmap
, max
);
719 static inline int bnxt_alloc_rx_page(struct bnxt
*bp
,
720 struct bnxt_rx_ring_info
*rxr
,
724 &rxr
->rx_agg_desc_ring
[RX_RING(prod
)][RX_IDX(prod
)];
725 struct bnxt_sw_rx_agg_bd
*rx_agg_buf
;
726 struct pci_dev
*pdev
= bp
->pdev
;
729 u16 sw_prod
= rxr
->rx_sw_agg_prod
;
730 unsigned int offset
= 0;
732 if (PAGE_SIZE
> BNXT_RX_PAGE_SIZE
) {
735 page
= alloc_page(gfp
);
739 rxr
->rx_page_offset
= 0;
741 offset
= rxr
->rx_page_offset
;
742 rxr
->rx_page_offset
+= BNXT_RX_PAGE_SIZE
;
743 if (rxr
->rx_page_offset
== PAGE_SIZE
)
748 page
= alloc_page(gfp
);
753 mapping
= dma_map_page_attrs(&pdev
->dev
, page
, offset
,
754 BNXT_RX_PAGE_SIZE
, PCI_DMA_FROMDEVICE
,
755 DMA_ATTR_WEAK_ORDERING
);
756 if (dma_mapping_error(&pdev
->dev
, mapping
)) {
761 if (unlikely(test_bit(sw_prod
, rxr
->rx_agg_bmap
)))
762 sw_prod
= bnxt_find_next_agg_idx(rxr
, sw_prod
);
764 __set_bit(sw_prod
, rxr
->rx_agg_bmap
);
765 rx_agg_buf
= &rxr
->rx_agg_ring
[sw_prod
];
766 rxr
->rx_sw_agg_prod
= NEXT_RX_AGG(sw_prod
);
768 rx_agg_buf
->page
= page
;
769 rx_agg_buf
->offset
= offset
;
770 rx_agg_buf
->mapping
= mapping
;
771 rxbd
->rx_bd_haddr
= cpu_to_le64(mapping
);
772 rxbd
->rx_bd_opaque
= sw_prod
;
776 static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi
*bnapi
, u16 cp_cons
,
779 struct bnxt
*bp
= bnapi
->bp
;
780 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
781 struct bnxt_rx_ring_info
*rxr
= bnapi
->rx_ring
;
782 u16 prod
= rxr
->rx_agg_prod
;
783 u16 sw_prod
= rxr
->rx_sw_agg_prod
;
786 for (i
= 0; i
< agg_bufs
; i
++) {
788 struct rx_agg_cmp
*agg
;
789 struct bnxt_sw_rx_agg_bd
*cons_rx_buf
, *prod_rx_buf
;
790 struct rx_bd
*prod_bd
;
793 agg
= (struct rx_agg_cmp
*)
794 &cpr
->cp_desc_ring
[CP_RING(cp_cons
)][CP_IDX(cp_cons
)];
795 cons
= agg
->rx_agg_cmp_opaque
;
796 __clear_bit(cons
, rxr
->rx_agg_bmap
);
798 if (unlikely(test_bit(sw_prod
, rxr
->rx_agg_bmap
)))
799 sw_prod
= bnxt_find_next_agg_idx(rxr
, sw_prod
);
801 __set_bit(sw_prod
, rxr
->rx_agg_bmap
);
802 prod_rx_buf
= &rxr
->rx_agg_ring
[sw_prod
];
803 cons_rx_buf
= &rxr
->rx_agg_ring
[cons
];
805 /* It is possible for sw_prod to be equal to cons, so
806 * set cons_rx_buf->page to NULL first.
808 page
= cons_rx_buf
->page
;
809 cons_rx_buf
->page
= NULL
;
810 prod_rx_buf
->page
= page
;
811 prod_rx_buf
->offset
= cons_rx_buf
->offset
;
813 prod_rx_buf
->mapping
= cons_rx_buf
->mapping
;
815 prod_bd
= &rxr
->rx_agg_desc_ring
[RX_RING(prod
)][RX_IDX(prod
)];
817 prod_bd
->rx_bd_haddr
= cpu_to_le64(cons_rx_buf
->mapping
);
818 prod_bd
->rx_bd_opaque
= sw_prod
;
820 prod
= NEXT_RX_AGG(prod
);
821 sw_prod
= NEXT_RX_AGG(sw_prod
);
822 cp_cons
= NEXT_CMP(cp_cons
);
824 rxr
->rx_agg_prod
= prod
;
825 rxr
->rx_sw_agg_prod
= sw_prod
;
828 static struct sk_buff
*bnxt_rx_page_skb(struct bnxt
*bp
,
829 struct bnxt_rx_ring_info
*rxr
,
830 u16 cons
, void *data
, u8
*data_ptr
,
832 unsigned int offset_and_len
)
834 unsigned int payload
= offset_and_len
>> 16;
835 unsigned int len
= offset_and_len
& 0xffff;
836 struct skb_frag_struct
*frag
;
837 struct page
*page
= data
;
838 u16 prod
= rxr
->rx_prod
;
842 err
= bnxt_alloc_rx_data(bp
, rxr
, prod
, GFP_ATOMIC
);
844 bnxt_reuse_rx_data(rxr
, cons
, data
);
847 dma_addr
-= bp
->rx_dma_offset
;
848 dma_unmap_page_attrs(&bp
->pdev
->dev
, dma_addr
, PAGE_SIZE
, bp
->rx_dir
,
849 DMA_ATTR_WEAK_ORDERING
);
851 if (unlikely(!payload
))
852 payload
= eth_get_headlen(data_ptr
, len
);
854 skb
= napi_alloc_skb(&rxr
->bnapi
->napi
, payload
);
860 off
= (void *)data_ptr
- page_address(page
);
861 skb_add_rx_frag(skb
, 0, page
, off
, len
, PAGE_SIZE
);
862 memcpy(skb
->data
- NET_IP_ALIGN
, data_ptr
- NET_IP_ALIGN
,
863 payload
+ NET_IP_ALIGN
);
865 frag
= &skb_shinfo(skb
)->frags
[0];
866 skb_frag_size_sub(frag
, payload
);
867 frag
->page_offset
+= payload
;
868 skb
->data_len
-= payload
;
869 skb
->tail
+= payload
;
874 static struct sk_buff
*bnxt_rx_skb(struct bnxt
*bp
,
875 struct bnxt_rx_ring_info
*rxr
, u16 cons
,
876 void *data
, u8
*data_ptr
,
878 unsigned int offset_and_len
)
880 u16 prod
= rxr
->rx_prod
;
884 err
= bnxt_alloc_rx_data(bp
, rxr
, prod
, GFP_ATOMIC
);
886 bnxt_reuse_rx_data(rxr
, cons
, data
);
890 skb
= build_skb(data
, 0);
891 dma_unmap_single_attrs(&bp
->pdev
->dev
, dma_addr
, bp
->rx_buf_use_size
,
892 bp
->rx_dir
, DMA_ATTR_WEAK_ORDERING
);
898 skb_reserve(skb
, bp
->rx_offset
);
899 skb_put(skb
, offset_and_len
& 0xffff);
903 static struct sk_buff
*bnxt_rx_pages(struct bnxt
*bp
, struct bnxt_napi
*bnapi
,
904 struct sk_buff
*skb
, u16 cp_cons
,
907 struct pci_dev
*pdev
= bp
->pdev
;
908 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
909 struct bnxt_rx_ring_info
*rxr
= bnapi
->rx_ring
;
910 u16 prod
= rxr
->rx_agg_prod
;
913 for (i
= 0; i
< agg_bufs
; i
++) {
915 struct rx_agg_cmp
*agg
;
916 struct bnxt_sw_rx_agg_bd
*cons_rx_buf
;
920 agg
= (struct rx_agg_cmp
*)
921 &cpr
->cp_desc_ring
[CP_RING(cp_cons
)][CP_IDX(cp_cons
)];
922 cons
= agg
->rx_agg_cmp_opaque
;
923 frag_len
= (le32_to_cpu(agg
->rx_agg_cmp_len_flags_type
) &
924 RX_AGG_CMP_LEN
) >> RX_AGG_CMP_LEN_SHIFT
;
926 cons_rx_buf
= &rxr
->rx_agg_ring
[cons
];
927 skb_fill_page_desc(skb
, i
, cons_rx_buf
->page
,
928 cons_rx_buf
->offset
, frag_len
);
929 __clear_bit(cons
, rxr
->rx_agg_bmap
);
931 /* It is possible for bnxt_alloc_rx_page() to allocate
932 * a sw_prod index that equals the cons index, so we
933 * need to clear the cons entry now.
935 mapping
= cons_rx_buf
->mapping
;
936 page
= cons_rx_buf
->page
;
937 cons_rx_buf
->page
= NULL
;
939 if (bnxt_alloc_rx_page(bp
, rxr
, prod
, GFP_ATOMIC
) != 0) {
940 struct skb_shared_info
*shinfo
;
941 unsigned int nr_frags
;
943 shinfo
= skb_shinfo(skb
);
944 nr_frags
= --shinfo
->nr_frags
;
945 __skb_frag_set_page(&shinfo
->frags
[nr_frags
], NULL
);
949 cons_rx_buf
->page
= page
;
951 /* Update prod since possibly some pages have been
954 rxr
->rx_agg_prod
= prod
;
955 bnxt_reuse_rx_agg_bufs(bnapi
, cp_cons
, agg_bufs
- i
);
959 dma_unmap_page_attrs(&pdev
->dev
, mapping
, BNXT_RX_PAGE_SIZE
,
961 DMA_ATTR_WEAK_ORDERING
);
963 skb
->data_len
+= frag_len
;
964 skb
->len
+= frag_len
;
965 skb
->truesize
+= PAGE_SIZE
;
967 prod
= NEXT_RX_AGG(prod
);
968 cp_cons
= NEXT_CMP(cp_cons
);
970 rxr
->rx_agg_prod
= prod
;
974 static int bnxt_agg_bufs_valid(struct bnxt
*bp
, struct bnxt_cp_ring_info
*cpr
,
975 u8 agg_bufs
, u32
*raw_cons
)
978 struct rx_agg_cmp
*agg
;
980 *raw_cons
= ADV_RAW_CMP(*raw_cons
, agg_bufs
);
981 last
= RING_CMP(*raw_cons
);
982 agg
= (struct rx_agg_cmp
*)
983 &cpr
->cp_desc_ring
[CP_RING(last
)][CP_IDX(last
)];
984 return RX_AGG_CMP_VALID(agg
, *raw_cons
);
987 static inline struct sk_buff
*bnxt_copy_skb(struct bnxt_napi
*bnapi
, u8
*data
,
991 struct bnxt
*bp
= bnapi
->bp
;
992 struct pci_dev
*pdev
= bp
->pdev
;
995 skb
= napi_alloc_skb(&bnapi
->napi
, len
);
999 dma_sync_single_for_cpu(&pdev
->dev
, mapping
, bp
->rx_copy_thresh
,
1002 memcpy(skb
->data
- NET_IP_ALIGN
, data
- NET_IP_ALIGN
,
1003 len
+ NET_IP_ALIGN
);
1005 dma_sync_single_for_device(&pdev
->dev
, mapping
, bp
->rx_copy_thresh
,
1012 static int bnxt_discard_rx(struct bnxt
*bp
, struct bnxt_napi
*bnapi
,
1013 u32
*raw_cons
, void *cmp
)
1015 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
1016 struct rx_cmp
*rxcmp
= cmp
;
1017 u32 tmp_raw_cons
= *raw_cons
;
1018 u8 cmp_type
, agg_bufs
= 0;
1020 cmp_type
= RX_CMP_TYPE(rxcmp
);
1022 if (cmp_type
== CMP_TYPE_RX_L2_CMP
) {
1023 agg_bufs
= (le32_to_cpu(rxcmp
->rx_cmp_misc_v1
) &
1025 RX_CMP_AGG_BUFS_SHIFT
;
1026 } else if (cmp_type
== CMP_TYPE_RX_L2_TPA_END_CMP
) {
1027 struct rx_tpa_end_cmp
*tpa_end
= cmp
;
1029 agg_bufs
= (le32_to_cpu(tpa_end
->rx_tpa_end_cmp_misc_v1
) &
1030 RX_TPA_END_CMP_AGG_BUFS
) >>
1031 RX_TPA_END_CMP_AGG_BUFS_SHIFT
;
1035 if (!bnxt_agg_bufs_valid(bp
, cpr
, agg_bufs
, &tmp_raw_cons
))
1038 *raw_cons
= tmp_raw_cons
;
1042 static void bnxt_queue_sp_work(struct bnxt
*bp
)
1045 queue_work(bnxt_pf_wq
, &bp
->sp_task
);
1047 schedule_work(&bp
->sp_task
);
1050 static void bnxt_cancel_sp_work(struct bnxt
*bp
)
1053 flush_workqueue(bnxt_pf_wq
);
1055 cancel_work_sync(&bp
->sp_task
);
1058 static void bnxt_sched_reset(struct bnxt
*bp
, struct bnxt_rx_ring_info
*rxr
)
1060 if (!rxr
->bnapi
->in_reset
) {
1061 rxr
->bnapi
->in_reset
= true;
1062 set_bit(BNXT_RESET_TASK_SP_EVENT
, &bp
->sp_event
);
1063 bnxt_queue_sp_work(bp
);
1065 rxr
->rx_next_cons
= 0xffff;
1068 static void bnxt_tpa_start(struct bnxt
*bp
, struct bnxt_rx_ring_info
*rxr
,
1069 struct rx_tpa_start_cmp
*tpa_start
,
1070 struct rx_tpa_start_cmp_ext
*tpa_start1
)
1072 u8 agg_id
= TPA_START_AGG_ID(tpa_start
);
1074 struct bnxt_tpa_info
*tpa_info
;
1075 struct bnxt_sw_rx_bd
*cons_rx_buf
, *prod_rx_buf
;
1076 struct rx_bd
*prod_bd
;
1079 cons
= tpa_start
->rx_tpa_start_cmp_opaque
;
1080 prod
= rxr
->rx_prod
;
1081 cons_rx_buf
= &rxr
->rx_buf_ring
[cons
];
1082 prod_rx_buf
= &rxr
->rx_buf_ring
[prod
];
1083 tpa_info
= &rxr
->rx_tpa
[agg_id
];
1085 if (unlikely(cons
!= rxr
->rx_next_cons
)) {
1086 bnxt_sched_reset(bp
, rxr
);
1089 /* Store cfa_code in tpa_info to use in tpa_end
1090 * completion processing.
1092 tpa_info
->cfa_code
= TPA_START_CFA_CODE(tpa_start1
);
1093 prod_rx_buf
->data
= tpa_info
->data
;
1094 prod_rx_buf
->data_ptr
= tpa_info
->data_ptr
;
1096 mapping
= tpa_info
->mapping
;
1097 prod_rx_buf
->mapping
= mapping
;
1099 prod_bd
= &rxr
->rx_desc_ring
[RX_RING(prod
)][RX_IDX(prod
)];
1101 prod_bd
->rx_bd_haddr
= cpu_to_le64(mapping
);
1103 tpa_info
->data
= cons_rx_buf
->data
;
1104 tpa_info
->data_ptr
= cons_rx_buf
->data_ptr
;
1105 cons_rx_buf
->data
= NULL
;
1106 tpa_info
->mapping
= cons_rx_buf
->mapping
;
1109 le32_to_cpu(tpa_start
->rx_tpa_start_cmp_len_flags_type
) >>
1110 RX_TPA_START_CMP_LEN_SHIFT
;
1111 if (likely(TPA_START_HASH_VALID(tpa_start
))) {
1112 u32 hash_type
= TPA_START_HASH_TYPE(tpa_start
);
1114 tpa_info
->hash_type
= PKT_HASH_TYPE_L4
;
1115 tpa_info
->gso_type
= SKB_GSO_TCPV4
;
1116 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1118 tpa_info
->gso_type
= SKB_GSO_TCPV6
;
1119 tpa_info
->rss_hash
=
1120 le32_to_cpu(tpa_start
->rx_tpa_start_cmp_rss_hash
);
1122 tpa_info
->hash_type
= PKT_HASH_TYPE_NONE
;
1123 tpa_info
->gso_type
= 0;
1124 if (netif_msg_rx_err(bp
))
1125 netdev_warn(bp
->dev
, "TPA packet without valid hash\n");
1127 tpa_info
->flags2
= le32_to_cpu(tpa_start1
->rx_tpa_start_cmp_flags2
);
1128 tpa_info
->metadata
= le32_to_cpu(tpa_start1
->rx_tpa_start_cmp_metadata
);
1129 tpa_info
->hdr_info
= le32_to_cpu(tpa_start1
->rx_tpa_start_cmp_hdr_info
);
1131 rxr
->rx_prod
= NEXT_RX(prod
);
1132 cons
= NEXT_RX(cons
);
1133 rxr
->rx_next_cons
= NEXT_RX(cons
);
1134 cons_rx_buf
= &rxr
->rx_buf_ring
[cons
];
1136 bnxt_reuse_rx_data(rxr
, cons
, cons_rx_buf
->data
);
1137 rxr
->rx_prod
= NEXT_RX(rxr
->rx_prod
);
1138 cons_rx_buf
->data
= NULL
;
1141 static void bnxt_abort_tpa(struct bnxt
*bp
, struct bnxt_napi
*bnapi
,
1142 u16 cp_cons
, u32 agg_bufs
)
1145 bnxt_reuse_rx_agg_bufs(bnapi
, cp_cons
, agg_bufs
);
1148 static struct sk_buff
*bnxt_gro_func_5731x(struct bnxt_tpa_info
*tpa_info
,
1149 int payload_off
, int tcp_ts
,
1150 struct sk_buff
*skb
)
1155 u16 outer_ip_off
, inner_ip_off
, inner_mac_off
;
1156 u32 hdr_info
= tpa_info
->hdr_info
;
1157 bool loopback
= false;
1159 inner_ip_off
= BNXT_TPA_INNER_L3_OFF(hdr_info
);
1160 inner_mac_off
= BNXT_TPA_INNER_L2_OFF(hdr_info
);
1161 outer_ip_off
= BNXT_TPA_OUTER_L3_OFF(hdr_info
);
1163 /* If the packet is an internal loopback packet, the offsets will
1164 * have an extra 4 bytes.
1166 if (inner_mac_off
== 4) {
1168 } else if (inner_mac_off
> 4) {
1169 __be16 proto
= *((__be16
*)(skb
->data
+ inner_ip_off
-
1172 /* We only support inner iPv4/ipv6. If we don't see the
1173 * correct protocol ID, it must be a loopback packet where
1174 * the offsets are off by 4.
1176 if (proto
!= htons(ETH_P_IP
) && proto
!= htons(ETH_P_IPV6
))
1180 /* internal loopback packet, subtract all offsets by 4 */
1186 nw_off
= inner_ip_off
- ETH_HLEN
;
1187 skb_set_network_header(skb
, nw_off
);
1188 if (tpa_info
->flags2
& RX_TPA_START_CMP_FLAGS2_IP_TYPE
) {
1189 struct ipv6hdr
*iph
= ipv6_hdr(skb
);
1191 skb_set_transport_header(skb
, nw_off
+ sizeof(struct ipv6hdr
));
1192 len
= skb
->len
- skb_transport_offset(skb
);
1194 th
->check
= ~tcp_v6_check(len
, &iph
->saddr
, &iph
->daddr
, 0);
1196 struct iphdr
*iph
= ip_hdr(skb
);
1198 skb_set_transport_header(skb
, nw_off
+ sizeof(struct iphdr
));
1199 len
= skb
->len
- skb_transport_offset(skb
);
1201 th
->check
= ~tcp_v4_check(len
, iph
->saddr
, iph
->daddr
, 0);
1204 if (inner_mac_off
) { /* tunnel */
1205 struct udphdr
*uh
= NULL
;
1206 __be16 proto
= *((__be16
*)(skb
->data
+ outer_ip_off
-
1209 if (proto
== htons(ETH_P_IP
)) {
1210 struct iphdr
*iph
= (struct iphdr
*)skb
->data
;
1212 if (iph
->protocol
== IPPROTO_UDP
)
1213 uh
= (struct udphdr
*)(iph
+ 1);
1215 struct ipv6hdr
*iph
= (struct ipv6hdr
*)skb
->data
;
1217 if (iph
->nexthdr
== IPPROTO_UDP
)
1218 uh
= (struct udphdr
*)(iph
+ 1);
1222 skb_shinfo(skb
)->gso_type
|=
1223 SKB_GSO_UDP_TUNNEL_CSUM
;
1225 skb_shinfo(skb
)->gso_type
|= SKB_GSO_UDP_TUNNEL
;
1232 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1233 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1235 static struct sk_buff
*bnxt_gro_func_5730x(struct bnxt_tpa_info
*tpa_info
,
1236 int payload_off
, int tcp_ts
,
1237 struct sk_buff
*skb
)
1241 int len
, nw_off
, tcp_opt_len
= 0;
1246 if (tpa_info
->gso_type
== SKB_GSO_TCPV4
) {
1249 nw_off
= payload_off
- BNXT_IPV4_HDR_SIZE
- tcp_opt_len
-
1251 skb_set_network_header(skb
, nw_off
);
1253 skb_set_transport_header(skb
, nw_off
+ sizeof(struct iphdr
));
1254 len
= skb
->len
- skb_transport_offset(skb
);
1256 th
->check
= ~tcp_v4_check(len
, iph
->saddr
, iph
->daddr
, 0);
1257 } else if (tpa_info
->gso_type
== SKB_GSO_TCPV6
) {
1258 struct ipv6hdr
*iph
;
1260 nw_off
= payload_off
- BNXT_IPV6_HDR_SIZE
- tcp_opt_len
-
1262 skb_set_network_header(skb
, nw_off
);
1263 iph
= ipv6_hdr(skb
);
1264 skb_set_transport_header(skb
, nw_off
+ sizeof(struct ipv6hdr
));
1265 len
= skb
->len
- skb_transport_offset(skb
);
1267 th
->check
= ~tcp_v6_check(len
, &iph
->saddr
, &iph
->daddr
, 0);
1269 dev_kfree_skb_any(skb
);
1273 if (nw_off
) { /* tunnel */
1274 struct udphdr
*uh
= NULL
;
1276 if (skb
->protocol
== htons(ETH_P_IP
)) {
1277 struct iphdr
*iph
= (struct iphdr
*)skb
->data
;
1279 if (iph
->protocol
== IPPROTO_UDP
)
1280 uh
= (struct udphdr
*)(iph
+ 1);
1282 struct ipv6hdr
*iph
= (struct ipv6hdr
*)skb
->data
;
1284 if (iph
->nexthdr
== IPPROTO_UDP
)
1285 uh
= (struct udphdr
*)(iph
+ 1);
1289 skb_shinfo(skb
)->gso_type
|=
1290 SKB_GSO_UDP_TUNNEL_CSUM
;
1292 skb_shinfo(skb
)->gso_type
|= SKB_GSO_UDP_TUNNEL
;
1299 static inline struct sk_buff
*bnxt_gro_skb(struct bnxt
*bp
,
1300 struct bnxt_tpa_info
*tpa_info
,
1301 struct rx_tpa_end_cmp
*tpa_end
,
1302 struct rx_tpa_end_cmp_ext
*tpa_end1
,
1303 struct sk_buff
*skb
)
1309 segs
= TPA_END_TPA_SEGS(tpa_end
);
1313 NAPI_GRO_CB(skb
)->count
= segs
;
1314 skb_shinfo(skb
)->gso_size
=
1315 le32_to_cpu(tpa_end1
->rx_tpa_end_cmp_seg_len
);
1316 skb_shinfo(skb
)->gso_type
= tpa_info
->gso_type
;
1317 payload_off
= (le32_to_cpu(tpa_end
->rx_tpa_end_cmp_misc_v1
) &
1318 RX_TPA_END_CMP_PAYLOAD_OFFSET
) >>
1319 RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT
;
1320 skb
= bp
->gro_func(tpa_info
, payload_off
, TPA_END_GRO_TS(tpa_end
), skb
);
1322 tcp_gro_complete(skb
);
1327 /* Given the cfa_code of a received packet determine which
1328 * netdev (vf-rep or PF) the packet is destined to.
1330 static struct net_device
*bnxt_get_pkt_dev(struct bnxt
*bp
, u16 cfa_code
)
1332 struct net_device
*dev
= bnxt_get_vf_rep(bp
, cfa_code
);
1334 /* if vf-rep dev is NULL, the must belongs to the PF */
1335 return dev
? dev
: bp
->dev
;
1338 static inline struct sk_buff
*bnxt_tpa_end(struct bnxt
*bp
,
1339 struct bnxt_napi
*bnapi
,
1341 struct rx_tpa_end_cmp
*tpa_end
,
1342 struct rx_tpa_end_cmp_ext
*tpa_end1
,
1345 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
1346 struct bnxt_rx_ring_info
*rxr
= bnapi
->rx_ring
;
1347 u8 agg_id
= TPA_END_AGG_ID(tpa_end
);
1348 u8
*data_ptr
, agg_bufs
;
1349 u16 cp_cons
= RING_CMP(*raw_cons
);
1351 struct bnxt_tpa_info
*tpa_info
;
1353 struct sk_buff
*skb
;
1356 if (unlikely(bnapi
->in_reset
)) {
1357 int rc
= bnxt_discard_rx(bp
, bnapi
, raw_cons
, tpa_end
);
1360 return ERR_PTR(-EBUSY
);
1364 tpa_info
= &rxr
->rx_tpa
[agg_id
];
1365 data
= tpa_info
->data
;
1366 data_ptr
= tpa_info
->data_ptr
;
1368 len
= tpa_info
->len
;
1369 mapping
= tpa_info
->mapping
;
1371 agg_bufs
= (le32_to_cpu(tpa_end
->rx_tpa_end_cmp_misc_v1
) &
1372 RX_TPA_END_CMP_AGG_BUFS
) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT
;
1375 if (!bnxt_agg_bufs_valid(bp
, cpr
, agg_bufs
, raw_cons
))
1376 return ERR_PTR(-EBUSY
);
1378 *event
|= BNXT_AGG_EVENT
;
1379 cp_cons
= NEXT_CMP(cp_cons
);
1382 if (unlikely(agg_bufs
> MAX_SKB_FRAGS
|| TPA_END_ERRORS(tpa_end1
))) {
1383 bnxt_abort_tpa(bp
, bnapi
, cp_cons
, agg_bufs
);
1384 if (agg_bufs
> MAX_SKB_FRAGS
)
1385 netdev_warn(bp
->dev
, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1386 agg_bufs
, (int)MAX_SKB_FRAGS
);
1390 if (len
<= bp
->rx_copy_thresh
) {
1391 skb
= bnxt_copy_skb(bnapi
, data_ptr
, len
, mapping
);
1393 bnxt_abort_tpa(bp
, bnapi
, cp_cons
, agg_bufs
);
1398 dma_addr_t new_mapping
;
1400 new_data
= __bnxt_alloc_rx_data(bp
, &new_mapping
, GFP_ATOMIC
);
1402 bnxt_abort_tpa(bp
, bnapi
, cp_cons
, agg_bufs
);
1406 tpa_info
->data
= new_data
;
1407 tpa_info
->data_ptr
= new_data
+ bp
->rx_offset
;
1408 tpa_info
->mapping
= new_mapping
;
1410 skb
= build_skb(data
, 0);
1411 dma_unmap_single_attrs(&bp
->pdev
->dev
, mapping
,
1412 bp
->rx_buf_use_size
, bp
->rx_dir
,
1413 DMA_ATTR_WEAK_ORDERING
);
1417 bnxt_abort_tpa(bp
, bnapi
, cp_cons
, agg_bufs
);
1420 skb_reserve(skb
, bp
->rx_offset
);
1425 skb
= bnxt_rx_pages(bp
, bnapi
, skb
, cp_cons
, agg_bufs
);
1427 /* Page reuse already handled by bnxt_rx_pages(). */
1433 eth_type_trans(skb
, bnxt_get_pkt_dev(bp
, tpa_info
->cfa_code
));
1435 if (tpa_info
->hash_type
!= PKT_HASH_TYPE_NONE
)
1436 skb_set_hash(skb
, tpa_info
->rss_hash
, tpa_info
->hash_type
);
1438 if ((tpa_info
->flags2
& RX_CMP_FLAGS2_META_FORMAT_VLAN
) &&
1439 (skb
->dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
)) {
1440 u16 vlan_proto
= tpa_info
->metadata
>>
1441 RX_CMP_FLAGS2_METADATA_TPID_SFT
;
1442 u16 vtag
= tpa_info
->metadata
& RX_CMP_FLAGS2_METADATA_TCI_MASK
;
1444 __vlan_hwaccel_put_tag(skb
, htons(vlan_proto
), vtag
);
1447 skb_checksum_none_assert(skb
);
1448 if (likely(tpa_info
->flags2
& RX_TPA_START_CMP_FLAGS2_L4_CS_CALC
)) {
1449 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1451 (tpa_info
->flags2
& RX_CMP_FLAGS2_T_L4_CS_CALC
) >> 3;
1454 if (TPA_END_GRO(tpa_end
))
1455 skb
= bnxt_gro_skb(bp
, tpa_info
, tpa_end
, tpa_end1
, skb
);
1460 static void bnxt_deliver_skb(struct bnxt
*bp
, struct bnxt_napi
*bnapi
,
1461 struct sk_buff
*skb
)
1463 if (skb
->dev
!= bp
->dev
) {
1464 /* this packet belongs to a vf-rep */
1465 bnxt_vf_rep_rx(bp
, skb
);
1468 skb_record_rx_queue(skb
, bnapi
->index
);
1469 napi_gro_receive(&bnapi
->napi
, skb
);
1472 /* returns the following:
1473 * 1 - 1 packet successfully received
1474 * 0 - successful TPA_START, packet not completed yet
1475 * -EBUSY - completion ring does not have all the agg buffers yet
1476 * -ENOMEM - packet aborted due to out of memory
1477 * -EIO - packet aborted due to hw error indicated in BD
1479 static int bnxt_rx_pkt(struct bnxt
*bp
, struct bnxt_napi
*bnapi
, u32
*raw_cons
,
1482 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
1483 struct bnxt_rx_ring_info
*rxr
= bnapi
->rx_ring
;
1484 struct net_device
*dev
= bp
->dev
;
1485 struct rx_cmp
*rxcmp
;
1486 struct rx_cmp_ext
*rxcmp1
;
1487 u32 tmp_raw_cons
= *raw_cons
;
1488 u16 cfa_code
, cons
, prod
, cp_cons
= RING_CMP(tmp_raw_cons
);
1489 struct bnxt_sw_rx_bd
*rx_buf
;
1491 u8
*data_ptr
, agg_bufs
, cmp_type
;
1492 dma_addr_t dma_addr
;
1493 struct sk_buff
*skb
;
1498 rxcmp
= (struct rx_cmp
*)
1499 &cpr
->cp_desc_ring
[CP_RING(cp_cons
)][CP_IDX(cp_cons
)];
1501 tmp_raw_cons
= NEXT_RAW_CMP(tmp_raw_cons
);
1502 cp_cons
= RING_CMP(tmp_raw_cons
);
1503 rxcmp1
= (struct rx_cmp_ext
*)
1504 &cpr
->cp_desc_ring
[CP_RING(cp_cons
)][CP_IDX(cp_cons
)];
1506 if (!RX_CMP_VALID(rxcmp1
, tmp_raw_cons
))
1509 cmp_type
= RX_CMP_TYPE(rxcmp
);
1511 prod
= rxr
->rx_prod
;
1513 if (cmp_type
== CMP_TYPE_RX_L2_TPA_START_CMP
) {
1514 bnxt_tpa_start(bp
, rxr
, (struct rx_tpa_start_cmp
*)rxcmp
,
1515 (struct rx_tpa_start_cmp_ext
*)rxcmp1
);
1517 *event
|= BNXT_RX_EVENT
;
1518 goto next_rx_no_prod_no_len
;
1520 } else if (cmp_type
== CMP_TYPE_RX_L2_TPA_END_CMP
) {
1521 skb
= bnxt_tpa_end(bp
, bnapi
, &tmp_raw_cons
,
1522 (struct rx_tpa_end_cmp
*)rxcmp
,
1523 (struct rx_tpa_end_cmp_ext
*)rxcmp1
, event
);
1530 bnxt_deliver_skb(bp
, bnapi
, skb
);
1533 *event
|= BNXT_RX_EVENT
;
1534 goto next_rx_no_prod_no_len
;
1537 cons
= rxcmp
->rx_cmp_opaque
;
1538 rx_buf
= &rxr
->rx_buf_ring
[cons
];
1539 data
= rx_buf
->data
;
1540 data_ptr
= rx_buf
->data_ptr
;
1541 if (unlikely(cons
!= rxr
->rx_next_cons
)) {
1542 int rc1
= bnxt_discard_rx(bp
, bnapi
, raw_cons
, rxcmp
);
1544 bnxt_sched_reset(bp
, rxr
);
1549 misc
= le32_to_cpu(rxcmp
->rx_cmp_misc_v1
);
1550 agg_bufs
= (misc
& RX_CMP_AGG_BUFS
) >> RX_CMP_AGG_BUFS_SHIFT
;
1553 if (!bnxt_agg_bufs_valid(bp
, cpr
, agg_bufs
, &tmp_raw_cons
))
1556 cp_cons
= NEXT_CMP(cp_cons
);
1557 *event
|= BNXT_AGG_EVENT
;
1559 *event
|= BNXT_RX_EVENT
;
1561 rx_buf
->data
= NULL
;
1562 if (rxcmp1
->rx_cmp_cfa_code_errors_v2
& RX_CMP_L2_ERRORS
) {
1563 bnxt_reuse_rx_data(rxr
, cons
, data
);
1565 bnxt_reuse_rx_agg_bufs(bnapi
, cp_cons
, agg_bufs
);
1571 len
= le32_to_cpu(rxcmp
->rx_cmp_len_flags_type
) >> RX_CMP_LEN_SHIFT
;
1572 dma_addr
= rx_buf
->mapping
;
1574 if (bnxt_rx_xdp(bp
, rxr
, cons
, data
, &data_ptr
, &len
, event
)) {
1579 if (len
<= bp
->rx_copy_thresh
) {
1580 skb
= bnxt_copy_skb(bnapi
, data_ptr
, len
, dma_addr
);
1581 bnxt_reuse_rx_data(rxr
, cons
, data
);
1589 if (rx_buf
->data_ptr
== data_ptr
)
1590 payload
= misc
& RX_CMP_PAYLOAD_OFFSET
;
1593 skb
= bp
->rx_skb_func(bp
, rxr
, cons
, data
, data_ptr
, dma_addr
,
1602 skb
= bnxt_rx_pages(bp
, bnapi
, skb
, cp_cons
, agg_bufs
);
1609 if (RX_CMP_HASH_VALID(rxcmp
)) {
1610 u32 hash_type
= RX_CMP_HASH_TYPE(rxcmp
);
1611 enum pkt_hash_types type
= PKT_HASH_TYPE_L4
;
1613 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1614 if (hash_type
!= 1 && hash_type
!= 3)
1615 type
= PKT_HASH_TYPE_L3
;
1616 skb_set_hash(skb
, le32_to_cpu(rxcmp
->rx_cmp_rss_hash
), type
);
1619 cfa_code
= RX_CMP_CFA_CODE(rxcmp1
);
1620 skb
->protocol
= eth_type_trans(skb
, bnxt_get_pkt_dev(bp
, cfa_code
));
1622 if ((rxcmp1
->rx_cmp_flags2
&
1623 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN
)) &&
1624 (skb
->dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
)) {
1625 u32 meta_data
= le32_to_cpu(rxcmp1
->rx_cmp_meta_data
);
1626 u16 vtag
= meta_data
& RX_CMP_FLAGS2_METADATA_TCI_MASK
;
1627 u16 vlan_proto
= meta_data
>> RX_CMP_FLAGS2_METADATA_TPID_SFT
;
1629 __vlan_hwaccel_put_tag(skb
, htons(vlan_proto
), vtag
);
1632 skb_checksum_none_assert(skb
);
1633 if (RX_CMP_L4_CS_OK(rxcmp1
)) {
1634 if (dev
->features
& NETIF_F_RXCSUM
) {
1635 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1636 skb
->csum_level
= RX_CMP_ENCAP(rxcmp1
);
1639 if (rxcmp1
->rx_cmp_cfa_code_errors_v2
& RX_CMP_L4_CS_ERR_BITS
) {
1640 if (dev
->features
& NETIF_F_RXCSUM
)
1641 cpr
->rx_l4_csum_errors
++;
1645 bnxt_deliver_skb(bp
, bnapi
, skb
);
1649 rxr
->rx_prod
= NEXT_RX(prod
);
1650 rxr
->rx_next_cons
= NEXT_RX(cons
);
1652 cpr
->rx_packets
+= 1;
1653 cpr
->rx_bytes
+= len
;
1655 next_rx_no_prod_no_len
:
1656 *raw_cons
= tmp_raw_cons
;
1661 /* In netpoll mode, if we are using a combined completion ring, we need to
1662 * discard the rx packets and recycle the buffers.
1664 static int bnxt_force_rx_discard(struct bnxt
*bp
, struct bnxt_napi
*bnapi
,
1665 u32
*raw_cons
, u8
*event
)
1667 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
1668 u32 tmp_raw_cons
= *raw_cons
;
1669 struct rx_cmp_ext
*rxcmp1
;
1670 struct rx_cmp
*rxcmp
;
1674 cp_cons
= RING_CMP(tmp_raw_cons
);
1675 rxcmp
= (struct rx_cmp
*)
1676 &cpr
->cp_desc_ring
[CP_RING(cp_cons
)][CP_IDX(cp_cons
)];
1678 tmp_raw_cons
= NEXT_RAW_CMP(tmp_raw_cons
);
1679 cp_cons
= RING_CMP(tmp_raw_cons
);
1680 rxcmp1
= (struct rx_cmp_ext
*)
1681 &cpr
->cp_desc_ring
[CP_RING(cp_cons
)][CP_IDX(cp_cons
)];
1683 if (!RX_CMP_VALID(rxcmp1
, tmp_raw_cons
))
1686 cmp_type
= RX_CMP_TYPE(rxcmp
);
1687 if (cmp_type
== CMP_TYPE_RX_L2_CMP
) {
1688 rxcmp1
->rx_cmp_cfa_code_errors_v2
|=
1689 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR
);
1690 } else if (cmp_type
== CMP_TYPE_RX_L2_TPA_END_CMP
) {
1691 struct rx_tpa_end_cmp_ext
*tpa_end1
;
1693 tpa_end1
= (struct rx_tpa_end_cmp_ext
*)rxcmp1
;
1694 tpa_end1
->rx_tpa_end_cmp_errors_v2
|=
1695 cpu_to_le32(RX_TPA_END_CMP_ERRORS
);
1697 return bnxt_rx_pkt(bp
, bnapi
, raw_cons
, event
);
1700 #define BNXT_GET_EVENT_PORT(data) \
1702 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1704 static int bnxt_async_event_process(struct bnxt
*bp
,
1705 struct hwrm_async_event_cmpl
*cmpl
)
1707 u16 event_id
= le16_to_cpu(cmpl
->event_id
);
1709 /* TODO CHIMP_FW: Define event id's for link change, error etc */
1711 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE
: {
1712 u32 data1
= le32_to_cpu(cmpl
->event_data1
);
1713 struct bnxt_link_info
*link_info
= &bp
->link_info
;
1716 goto async_event_process_exit
;
1718 /* print unsupported speed warning in forced speed mode only */
1719 if (!(link_info
->autoneg
& BNXT_AUTONEG_SPEED
) &&
1720 (data1
& 0x20000)) {
1721 u16 fw_speed
= link_info
->force_link_speed
;
1722 u32 speed
= bnxt_fw_to_ethtool_speed(fw_speed
);
1724 if (speed
!= SPEED_UNKNOWN
)
1725 netdev_warn(bp
->dev
, "Link speed %d no longer supported\n",
1728 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT
, &bp
->sp_event
);
1731 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE
:
1732 set_bit(BNXT_LINK_CHNG_SP_EVENT
, &bp
->sp_event
);
1734 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD
:
1735 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT
, &bp
->sp_event
);
1737 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED
: {
1738 u32 data1
= le32_to_cpu(cmpl
->event_data1
);
1739 u16 port_id
= BNXT_GET_EVENT_PORT(data1
);
1744 if (bp
->pf
.port_id
!= port_id
)
1747 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT
, &bp
->sp_event
);
1750 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE
:
1752 goto async_event_process_exit
;
1753 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT
, &bp
->sp_event
);
1756 goto async_event_process_exit
;
1758 bnxt_queue_sp_work(bp
);
1759 async_event_process_exit
:
1760 bnxt_ulp_async_events(bp
, cmpl
);
1764 static int bnxt_hwrm_handler(struct bnxt
*bp
, struct tx_cmp
*txcmp
)
1766 u16 cmpl_type
= TX_CMP_TYPE(txcmp
), vf_id
, seq_id
;
1767 struct hwrm_cmpl
*h_cmpl
= (struct hwrm_cmpl
*)txcmp
;
1768 struct hwrm_fwd_req_cmpl
*fwd_req_cmpl
=
1769 (struct hwrm_fwd_req_cmpl
*)txcmp
;
1771 switch (cmpl_type
) {
1772 case CMPL_BASE_TYPE_HWRM_DONE
:
1773 seq_id
= le16_to_cpu(h_cmpl
->sequence_id
);
1774 if (seq_id
== bp
->hwrm_intr_seq_id
)
1775 bp
->hwrm_intr_seq_id
= HWRM_SEQ_ID_INVALID
;
1777 netdev_err(bp
->dev
, "Invalid hwrm seq id %d\n", seq_id
);
1780 case CMPL_BASE_TYPE_HWRM_FWD_REQ
:
1781 vf_id
= le16_to_cpu(fwd_req_cmpl
->source_id
);
1783 if ((vf_id
< bp
->pf
.first_vf_id
) ||
1784 (vf_id
>= bp
->pf
.first_vf_id
+ bp
->pf
.active_vfs
)) {
1785 netdev_err(bp
->dev
, "Msg contains invalid VF id %x\n",
1790 set_bit(vf_id
- bp
->pf
.first_vf_id
, bp
->pf
.vf_event_bmap
);
1791 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT
, &bp
->sp_event
);
1792 bnxt_queue_sp_work(bp
);
1795 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT
:
1796 bnxt_async_event_process(bp
,
1797 (struct hwrm_async_event_cmpl
*)txcmp
);
1806 static irqreturn_t
bnxt_msix(int irq
, void *dev_instance
)
1808 struct bnxt_napi
*bnapi
= dev_instance
;
1809 struct bnxt
*bp
= bnapi
->bp
;
1810 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
1811 u32 cons
= RING_CMP(cpr
->cp_raw_cons
);
1814 prefetch(&cpr
->cp_desc_ring
[CP_RING(cons
)][CP_IDX(cons
)]);
1815 napi_schedule(&bnapi
->napi
);
1819 static inline int bnxt_has_work(struct bnxt
*bp
, struct bnxt_cp_ring_info
*cpr
)
1821 u32 raw_cons
= cpr
->cp_raw_cons
;
1822 u16 cons
= RING_CMP(raw_cons
);
1823 struct tx_cmp
*txcmp
;
1825 txcmp
= &cpr
->cp_desc_ring
[CP_RING(cons
)][CP_IDX(cons
)];
1827 return TX_CMP_VALID(txcmp
, raw_cons
);
1830 static irqreturn_t
bnxt_inta(int irq
, void *dev_instance
)
1832 struct bnxt_napi
*bnapi
= dev_instance
;
1833 struct bnxt
*bp
= bnapi
->bp
;
1834 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
1835 u32 cons
= RING_CMP(cpr
->cp_raw_cons
);
1838 prefetch(&cpr
->cp_desc_ring
[CP_RING(cons
)][CP_IDX(cons
)]);
1840 if (!bnxt_has_work(bp
, cpr
)) {
1841 int_status
= readl(bp
->bar0
+ BNXT_CAG_REG_LEGACY_INT_STATUS
);
1842 /* return if erroneous interrupt */
1843 if (!(int_status
& (0x10000 << cpr
->cp_ring_struct
.fw_ring_id
)))
1847 /* disable ring IRQ */
1848 BNXT_CP_DB_IRQ_DIS(cpr
->cp_doorbell
);
1850 /* Return here if interrupt is shared and is disabled. */
1851 if (unlikely(atomic_read(&bp
->intr_sem
) != 0))
1854 napi_schedule(&bnapi
->napi
);
1858 static int bnxt_poll_work(struct bnxt
*bp
, struct bnxt_napi
*bnapi
, int budget
)
1860 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
1861 u32 raw_cons
= cpr
->cp_raw_cons
;
1866 struct tx_cmp
*txcmp
;
1871 cons
= RING_CMP(raw_cons
);
1872 txcmp
= &cpr
->cp_desc_ring
[CP_RING(cons
)][CP_IDX(cons
)];
1874 if (!TX_CMP_VALID(txcmp
, raw_cons
))
1877 /* The valid test of the entry must be done first before
1878 * reading any further.
1881 if (TX_CMP_TYPE(txcmp
) == CMP_TYPE_TX_L2_CMP
) {
1883 /* return full budget so NAPI will complete. */
1884 if (unlikely(tx_pkts
> bp
->tx_wake_thresh
))
1886 } else if ((TX_CMP_TYPE(txcmp
) & 0x30) == 0x10) {
1888 rc
= bnxt_rx_pkt(bp
, bnapi
, &raw_cons
, &event
);
1890 rc
= bnxt_force_rx_discard(bp
, bnapi
, &raw_cons
,
1892 if (likely(rc
>= 0))
1894 /* Increment rx_pkts when rc is -ENOMEM to count towards
1895 * the NAPI budget. Otherwise, we may potentially loop
1896 * here forever if we consistently cannot allocate
1899 else if (rc
== -ENOMEM
&& budget
)
1901 else if (rc
== -EBUSY
) /* partial completion */
1903 } else if (unlikely((TX_CMP_TYPE(txcmp
) ==
1904 CMPL_BASE_TYPE_HWRM_DONE
) ||
1905 (TX_CMP_TYPE(txcmp
) ==
1906 CMPL_BASE_TYPE_HWRM_FWD_REQ
) ||
1907 (TX_CMP_TYPE(txcmp
) ==
1908 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT
))) {
1909 bnxt_hwrm_handler(bp
, txcmp
);
1911 raw_cons
= NEXT_RAW_CMP(raw_cons
);
1913 if (rx_pkts
== budget
)
1917 if (event
& BNXT_TX_EVENT
) {
1918 struct bnxt_tx_ring_info
*txr
= bnapi
->tx_ring
;
1919 void __iomem
*db
= txr
->tx_doorbell
;
1920 u16 prod
= txr
->tx_prod
;
1922 /* Sync BD data before updating doorbell */
1925 bnxt_db_write(bp
, db
, DB_KEY_TX
| prod
);
1928 cpr
->cp_raw_cons
= raw_cons
;
1929 /* ACK completion ring before freeing tx ring and producing new
1930 * buffers in rx/agg rings to prevent overflowing the completion
1933 BNXT_CP_DB(cpr
->cp_doorbell
, cpr
->cp_raw_cons
);
1936 bnapi
->tx_int(bp
, bnapi
, tx_pkts
);
1938 if (event
& BNXT_RX_EVENT
) {
1939 struct bnxt_rx_ring_info
*rxr
= bnapi
->rx_ring
;
1941 bnxt_db_write(bp
, rxr
->rx_doorbell
, DB_KEY_RX
| rxr
->rx_prod
);
1942 if (event
& BNXT_AGG_EVENT
)
1943 bnxt_db_write(bp
, rxr
->rx_agg_doorbell
,
1944 DB_KEY_RX
| rxr
->rx_agg_prod
);
1949 static int bnxt_poll_nitroa0(struct napi_struct
*napi
, int budget
)
1951 struct bnxt_napi
*bnapi
= container_of(napi
, struct bnxt_napi
, napi
);
1952 struct bnxt
*bp
= bnapi
->bp
;
1953 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
1954 struct bnxt_rx_ring_info
*rxr
= bnapi
->rx_ring
;
1955 struct tx_cmp
*txcmp
;
1956 struct rx_cmp_ext
*rxcmp1
;
1957 u32 cp_cons
, tmp_raw_cons
;
1958 u32 raw_cons
= cpr
->cp_raw_cons
;
1965 cp_cons
= RING_CMP(raw_cons
);
1966 txcmp
= &cpr
->cp_desc_ring
[CP_RING(cp_cons
)][CP_IDX(cp_cons
)];
1968 if (!TX_CMP_VALID(txcmp
, raw_cons
))
1971 if ((TX_CMP_TYPE(txcmp
) & 0x30) == 0x10) {
1972 tmp_raw_cons
= NEXT_RAW_CMP(raw_cons
);
1973 cp_cons
= RING_CMP(tmp_raw_cons
);
1974 rxcmp1
= (struct rx_cmp_ext
*)
1975 &cpr
->cp_desc_ring
[CP_RING(cp_cons
)][CP_IDX(cp_cons
)];
1977 if (!RX_CMP_VALID(rxcmp1
, tmp_raw_cons
))
1980 /* force an error to recycle the buffer */
1981 rxcmp1
->rx_cmp_cfa_code_errors_v2
|=
1982 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR
);
1984 rc
= bnxt_rx_pkt(bp
, bnapi
, &raw_cons
, &event
);
1985 if (likely(rc
== -EIO
) && budget
)
1987 else if (rc
== -EBUSY
) /* partial completion */
1989 } else if (unlikely(TX_CMP_TYPE(txcmp
) ==
1990 CMPL_BASE_TYPE_HWRM_DONE
)) {
1991 bnxt_hwrm_handler(bp
, txcmp
);
1994 "Invalid completion received on special ring\n");
1996 raw_cons
= NEXT_RAW_CMP(raw_cons
);
1998 if (rx_pkts
== budget
)
2002 cpr
->cp_raw_cons
= raw_cons
;
2003 BNXT_CP_DB(cpr
->cp_doorbell
, cpr
->cp_raw_cons
);
2004 bnxt_db_write(bp
, rxr
->rx_doorbell
, DB_KEY_RX
| rxr
->rx_prod
);
2006 if (event
& BNXT_AGG_EVENT
)
2007 bnxt_db_write(bp
, rxr
->rx_agg_doorbell
,
2008 DB_KEY_RX
| rxr
->rx_agg_prod
);
2010 if (!bnxt_has_work(bp
, cpr
) && rx_pkts
< budget
) {
2011 napi_complete_done(napi
, rx_pkts
);
2012 BNXT_CP_DB_REARM(cpr
->cp_doorbell
, cpr
->cp_raw_cons
);
2017 static int bnxt_poll(struct napi_struct
*napi
, int budget
)
2019 struct bnxt_napi
*bnapi
= container_of(napi
, struct bnxt_napi
, napi
);
2020 struct bnxt
*bp
= bnapi
->bp
;
2021 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
2025 work_done
+= bnxt_poll_work(bp
, bnapi
, budget
- work_done
);
2027 if (work_done
>= budget
)
2030 if (!bnxt_has_work(bp
, cpr
)) {
2031 if (napi_complete_done(napi
, work_done
))
2032 BNXT_CP_DB_REARM(cpr
->cp_doorbell
,
2037 if (bp
->flags
& BNXT_FLAG_DIM
) {
2038 struct net_dim_sample dim_sample
;
2040 net_dim_sample(cpr
->event_ctr
,
2044 net_dim(&cpr
->dim
, dim_sample
);
2050 static void bnxt_free_tx_skbs(struct bnxt
*bp
)
2053 struct pci_dev
*pdev
= bp
->pdev
;
2058 max_idx
= bp
->tx_nr_pages
* TX_DESC_CNT
;
2059 for (i
= 0; i
< bp
->tx_nr_rings
; i
++) {
2060 struct bnxt_tx_ring_info
*txr
= &bp
->tx_ring
[i
];
2063 for (j
= 0; j
< max_idx
;) {
2064 struct bnxt_sw_tx_bd
*tx_buf
= &txr
->tx_buf_ring
[j
];
2065 struct sk_buff
*skb
= tx_buf
->skb
;
2075 if (tx_buf
->is_push
) {
2081 dma_unmap_single(&pdev
->dev
,
2082 dma_unmap_addr(tx_buf
, mapping
),
2086 last
= tx_buf
->nr_frags
;
2088 for (k
= 0; k
< last
; k
++, j
++) {
2089 int ring_idx
= j
& bp
->tx_ring_mask
;
2090 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[k
];
2092 tx_buf
= &txr
->tx_buf_ring
[ring_idx
];
2095 dma_unmap_addr(tx_buf
, mapping
),
2096 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
2100 netdev_tx_reset_queue(netdev_get_tx_queue(bp
->dev
, i
));
2104 static void bnxt_free_rx_skbs(struct bnxt
*bp
)
2106 int i
, max_idx
, max_agg_idx
;
2107 struct pci_dev
*pdev
= bp
->pdev
;
2112 max_idx
= bp
->rx_nr_pages
* RX_DESC_CNT
;
2113 max_agg_idx
= bp
->rx_agg_nr_pages
* RX_DESC_CNT
;
2114 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
2115 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[i
];
2119 for (j
= 0; j
< MAX_TPA
; j
++) {
2120 struct bnxt_tpa_info
*tpa_info
=
2122 u8
*data
= tpa_info
->data
;
2127 dma_unmap_single_attrs(&pdev
->dev
,
2129 bp
->rx_buf_use_size
,
2131 DMA_ATTR_WEAK_ORDERING
);
2133 tpa_info
->data
= NULL
;
2139 for (j
= 0; j
< max_idx
; j
++) {
2140 struct bnxt_sw_rx_bd
*rx_buf
= &rxr
->rx_buf_ring
[j
];
2141 dma_addr_t mapping
= rx_buf
->mapping
;
2142 void *data
= rx_buf
->data
;
2147 rx_buf
->data
= NULL
;
2149 if (BNXT_RX_PAGE_MODE(bp
)) {
2150 mapping
-= bp
->rx_dma_offset
;
2151 dma_unmap_page_attrs(&pdev
->dev
, mapping
,
2152 PAGE_SIZE
, bp
->rx_dir
,
2153 DMA_ATTR_WEAK_ORDERING
);
2156 dma_unmap_single_attrs(&pdev
->dev
, mapping
,
2157 bp
->rx_buf_use_size
,
2159 DMA_ATTR_WEAK_ORDERING
);
2164 for (j
= 0; j
< max_agg_idx
; j
++) {
2165 struct bnxt_sw_rx_agg_bd
*rx_agg_buf
=
2166 &rxr
->rx_agg_ring
[j
];
2167 struct page
*page
= rx_agg_buf
->page
;
2172 dma_unmap_page_attrs(&pdev
->dev
, rx_agg_buf
->mapping
,
2175 DMA_ATTR_WEAK_ORDERING
);
2177 rx_agg_buf
->page
= NULL
;
2178 __clear_bit(j
, rxr
->rx_agg_bmap
);
2183 __free_page(rxr
->rx_page
);
2184 rxr
->rx_page
= NULL
;
2189 static void bnxt_free_skbs(struct bnxt
*bp
)
2191 bnxt_free_tx_skbs(bp
);
2192 bnxt_free_rx_skbs(bp
);
2195 static void bnxt_free_ring(struct bnxt
*bp
, struct bnxt_ring_struct
*ring
)
2197 struct pci_dev
*pdev
= bp
->pdev
;
2200 for (i
= 0; i
< ring
->nr_pages
; i
++) {
2201 if (!ring
->pg_arr
[i
])
2204 dma_free_coherent(&pdev
->dev
, ring
->page_size
,
2205 ring
->pg_arr
[i
], ring
->dma_arr
[i
]);
2207 ring
->pg_arr
[i
] = NULL
;
2210 dma_free_coherent(&pdev
->dev
, ring
->nr_pages
* 8,
2211 ring
->pg_tbl
, ring
->pg_tbl_map
);
2212 ring
->pg_tbl
= NULL
;
2214 if (ring
->vmem_size
&& *ring
->vmem
) {
2220 static int bnxt_alloc_ring(struct bnxt
*bp
, struct bnxt_ring_struct
*ring
)
2223 struct pci_dev
*pdev
= bp
->pdev
;
2225 if (ring
->nr_pages
> 1) {
2226 ring
->pg_tbl
= dma_alloc_coherent(&pdev
->dev
,
2234 for (i
= 0; i
< ring
->nr_pages
; i
++) {
2235 ring
->pg_arr
[i
] = dma_alloc_coherent(&pdev
->dev
,
2239 if (!ring
->pg_arr
[i
])
2242 if (ring
->nr_pages
> 1)
2243 ring
->pg_tbl
[i
] = cpu_to_le64(ring
->dma_arr
[i
]);
2246 if (ring
->vmem_size
) {
2247 *ring
->vmem
= vzalloc(ring
->vmem_size
);
2254 static void bnxt_free_rx_rings(struct bnxt
*bp
)
2261 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
2262 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[i
];
2263 struct bnxt_ring_struct
*ring
;
2266 bpf_prog_put(rxr
->xdp_prog
);
2268 if (xdp_rxq_info_is_reg(&rxr
->xdp_rxq
))
2269 xdp_rxq_info_unreg(&rxr
->xdp_rxq
);
2274 kfree(rxr
->rx_agg_bmap
);
2275 rxr
->rx_agg_bmap
= NULL
;
2277 ring
= &rxr
->rx_ring_struct
;
2278 bnxt_free_ring(bp
, ring
);
2280 ring
= &rxr
->rx_agg_ring_struct
;
2281 bnxt_free_ring(bp
, ring
);
2285 static int bnxt_alloc_rx_rings(struct bnxt
*bp
)
2287 int i
, rc
, agg_rings
= 0, tpa_rings
= 0;
2292 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
2295 if (bp
->flags
& BNXT_FLAG_TPA
)
2298 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
2299 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[i
];
2300 struct bnxt_ring_struct
*ring
;
2302 ring
= &rxr
->rx_ring_struct
;
2304 rc
= xdp_rxq_info_reg(&rxr
->xdp_rxq
, bp
->dev
, i
);
2308 rc
= bnxt_alloc_ring(bp
, ring
);
2315 ring
= &rxr
->rx_agg_ring_struct
;
2316 rc
= bnxt_alloc_ring(bp
, ring
);
2320 rxr
->rx_agg_bmap_size
= bp
->rx_agg_ring_mask
+ 1;
2321 mem_size
= rxr
->rx_agg_bmap_size
/ 8;
2322 rxr
->rx_agg_bmap
= kzalloc(mem_size
, GFP_KERNEL
);
2323 if (!rxr
->rx_agg_bmap
)
2327 rxr
->rx_tpa
= kcalloc(MAX_TPA
,
2328 sizeof(struct bnxt_tpa_info
),
2338 static void bnxt_free_tx_rings(struct bnxt
*bp
)
2341 struct pci_dev
*pdev
= bp
->pdev
;
2346 for (i
= 0; i
< bp
->tx_nr_rings
; i
++) {
2347 struct bnxt_tx_ring_info
*txr
= &bp
->tx_ring
[i
];
2348 struct bnxt_ring_struct
*ring
;
2351 dma_free_coherent(&pdev
->dev
, bp
->tx_push_size
,
2352 txr
->tx_push
, txr
->tx_push_mapping
);
2353 txr
->tx_push
= NULL
;
2356 ring
= &txr
->tx_ring_struct
;
2358 bnxt_free_ring(bp
, ring
);
2362 static int bnxt_alloc_tx_rings(struct bnxt
*bp
)
2365 struct pci_dev
*pdev
= bp
->pdev
;
2367 bp
->tx_push_size
= 0;
2368 if (bp
->tx_push_thresh
) {
2371 push_size
= L1_CACHE_ALIGN(sizeof(struct tx_push_bd
) +
2372 bp
->tx_push_thresh
);
2374 if (push_size
> 256) {
2376 bp
->tx_push_thresh
= 0;
2379 bp
->tx_push_size
= push_size
;
2382 for (i
= 0, j
= 0; i
< bp
->tx_nr_rings
; i
++) {
2383 struct bnxt_tx_ring_info
*txr
= &bp
->tx_ring
[i
];
2384 struct bnxt_ring_struct
*ring
;
2386 ring
= &txr
->tx_ring_struct
;
2388 rc
= bnxt_alloc_ring(bp
, ring
);
2392 if (bp
->tx_push_size
) {
2395 /* One pre-allocated DMA buffer to backup
2398 txr
->tx_push
= dma_alloc_coherent(&pdev
->dev
,
2400 &txr
->tx_push_mapping
,
2406 mapping
= txr
->tx_push_mapping
+
2407 sizeof(struct tx_push_bd
);
2408 txr
->data_mapping
= cpu_to_le64(mapping
);
2410 memset(txr
->tx_push
, 0, sizeof(struct tx_push_bd
));
2412 ring
->queue_id
= bp
->q_info
[j
].queue_id
;
2413 if (i
< bp
->tx_nr_rings_xdp
)
2415 if (i
% bp
->tx_nr_rings_per_tc
== (bp
->tx_nr_rings_per_tc
- 1))
2421 static void bnxt_free_cp_rings(struct bnxt
*bp
)
2428 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
2429 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
2430 struct bnxt_cp_ring_info
*cpr
;
2431 struct bnxt_ring_struct
*ring
;
2436 cpr
= &bnapi
->cp_ring
;
2437 ring
= &cpr
->cp_ring_struct
;
2439 bnxt_free_ring(bp
, ring
);
2443 static int bnxt_alloc_cp_rings(struct bnxt
*bp
)
2447 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
2448 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
2449 struct bnxt_cp_ring_info
*cpr
;
2450 struct bnxt_ring_struct
*ring
;
2455 cpr
= &bnapi
->cp_ring
;
2456 ring
= &cpr
->cp_ring_struct
;
2458 rc
= bnxt_alloc_ring(bp
, ring
);
2465 static void bnxt_init_ring_struct(struct bnxt
*bp
)
2469 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
2470 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
2471 struct bnxt_cp_ring_info
*cpr
;
2472 struct bnxt_rx_ring_info
*rxr
;
2473 struct bnxt_tx_ring_info
*txr
;
2474 struct bnxt_ring_struct
*ring
;
2479 cpr
= &bnapi
->cp_ring
;
2480 ring
= &cpr
->cp_ring_struct
;
2481 ring
->nr_pages
= bp
->cp_nr_pages
;
2482 ring
->page_size
= HW_CMPD_RING_SIZE
;
2483 ring
->pg_arr
= (void **)cpr
->cp_desc_ring
;
2484 ring
->dma_arr
= cpr
->cp_desc_mapping
;
2485 ring
->vmem_size
= 0;
2487 rxr
= bnapi
->rx_ring
;
2491 ring
= &rxr
->rx_ring_struct
;
2492 ring
->nr_pages
= bp
->rx_nr_pages
;
2493 ring
->page_size
= HW_RXBD_RING_SIZE
;
2494 ring
->pg_arr
= (void **)rxr
->rx_desc_ring
;
2495 ring
->dma_arr
= rxr
->rx_desc_mapping
;
2496 ring
->vmem_size
= SW_RXBD_RING_SIZE
* bp
->rx_nr_pages
;
2497 ring
->vmem
= (void **)&rxr
->rx_buf_ring
;
2499 ring
= &rxr
->rx_agg_ring_struct
;
2500 ring
->nr_pages
= bp
->rx_agg_nr_pages
;
2501 ring
->page_size
= HW_RXBD_RING_SIZE
;
2502 ring
->pg_arr
= (void **)rxr
->rx_agg_desc_ring
;
2503 ring
->dma_arr
= rxr
->rx_agg_desc_mapping
;
2504 ring
->vmem_size
= SW_RXBD_AGG_RING_SIZE
* bp
->rx_agg_nr_pages
;
2505 ring
->vmem
= (void **)&rxr
->rx_agg_ring
;
2508 txr
= bnapi
->tx_ring
;
2512 ring
= &txr
->tx_ring_struct
;
2513 ring
->nr_pages
= bp
->tx_nr_pages
;
2514 ring
->page_size
= HW_RXBD_RING_SIZE
;
2515 ring
->pg_arr
= (void **)txr
->tx_desc_ring
;
2516 ring
->dma_arr
= txr
->tx_desc_mapping
;
2517 ring
->vmem_size
= SW_TXBD_RING_SIZE
* bp
->tx_nr_pages
;
2518 ring
->vmem
= (void **)&txr
->tx_buf_ring
;
2522 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct
*ring
, u32 type
)
2526 struct rx_bd
**rx_buf_ring
;
2528 rx_buf_ring
= (struct rx_bd
**)ring
->pg_arr
;
2529 for (i
= 0, prod
= 0; i
< ring
->nr_pages
; i
++) {
2533 rxbd
= rx_buf_ring
[i
];
2537 for (j
= 0; j
< RX_DESC_CNT
; j
++, rxbd
++, prod
++) {
2538 rxbd
->rx_bd_len_flags_type
= cpu_to_le32(type
);
2539 rxbd
->rx_bd_opaque
= prod
;
2544 static int bnxt_init_one_rx_ring(struct bnxt
*bp
, int ring_nr
)
2546 struct net_device
*dev
= bp
->dev
;
2547 struct bnxt_rx_ring_info
*rxr
;
2548 struct bnxt_ring_struct
*ring
;
2552 type
= (bp
->rx_buf_use_size
<< RX_BD_LEN_SHIFT
) |
2553 RX_BD_TYPE_RX_PACKET_BD
| RX_BD_FLAGS_EOP
;
2555 if (NET_IP_ALIGN
== 2)
2556 type
|= RX_BD_FLAGS_SOP
;
2558 rxr
= &bp
->rx_ring
[ring_nr
];
2559 ring
= &rxr
->rx_ring_struct
;
2560 bnxt_init_rxbd_pages(ring
, type
);
2562 if (BNXT_RX_PAGE_MODE(bp
) && bp
->xdp_prog
) {
2563 rxr
->xdp_prog
= bpf_prog_add(bp
->xdp_prog
, 1);
2564 if (IS_ERR(rxr
->xdp_prog
)) {
2565 int rc
= PTR_ERR(rxr
->xdp_prog
);
2567 rxr
->xdp_prog
= NULL
;
2571 prod
= rxr
->rx_prod
;
2572 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
2573 if (bnxt_alloc_rx_data(bp
, rxr
, prod
, GFP_KERNEL
) != 0) {
2574 netdev_warn(dev
, "init'ed rx ring %d with %d/%d skbs only\n",
2575 ring_nr
, i
, bp
->rx_ring_size
);
2578 prod
= NEXT_RX(prod
);
2580 rxr
->rx_prod
= prod
;
2581 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
2583 ring
= &rxr
->rx_agg_ring_struct
;
2584 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
2586 if (!(bp
->flags
& BNXT_FLAG_AGG_RINGS
))
2589 type
= ((u32
)BNXT_RX_PAGE_SIZE
<< RX_BD_LEN_SHIFT
) |
2590 RX_BD_TYPE_RX_AGG_BD
| RX_BD_FLAGS_SOP
;
2592 bnxt_init_rxbd_pages(ring
, type
);
2594 prod
= rxr
->rx_agg_prod
;
2595 for (i
= 0; i
< bp
->rx_agg_ring_size
; i
++) {
2596 if (bnxt_alloc_rx_page(bp
, rxr
, prod
, GFP_KERNEL
) != 0) {
2597 netdev_warn(dev
, "init'ed rx ring %d with %d/%d pages only\n",
2598 ring_nr
, i
, bp
->rx_ring_size
);
2601 prod
= NEXT_RX_AGG(prod
);
2603 rxr
->rx_agg_prod
= prod
;
2605 if (bp
->flags
& BNXT_FLAG_TPA
) {
2610 for (i
= 0; i
< MAX_TPA
; i
++) {
2611 data
= __bnxt_alloc_rx_data(bp
, &mapping
,
2616 rxr
->rx_tpa
[i
].data
= data
;
2617 rxr
->rx_tpa
[i
].data_ptr
= data
+ bp
->rx_offset
;
2618 rxr
->rx_tpa
[i
].mapping
= mapping
;
2621 netdev_err(bp
->dev
, "No resource allocated for LRO/GRO\n");
2629 static void bnxt_init_cp_rings(struct bnxt
*bp
)
2633 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
2634 struct bnxt_cp_ring_info
*cpr
= &bp
->bnapi
[i
]->cp_ring
;
2635 struct bnxt_ring_struct
*ring
= &cpr
->cp_ring_struct
;
2637 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
2638 cpr
->rx_ring_coal
.coal_ticks
= bp
->rx_coal
.coal_ticks
;
2639 cpr
->rx_ring_coal
.coal_bufs
= bp
->rx_coal
.coal_bufs
;
2643 static int bnxt_init_rx_rings(struct bnxt
*bp
)
2647 if (BNXT_RX_PAGE_MODE(bp
)) {
2648 bp
->rx_offset
= NET_IP_ALIGN
+ XDP_PACKET_HEADROOM
;
2649 bp
->rx_dma_offset
= XDP_PACKET_HEADROOM
;
2651 bp
->rx_offset
= BNXT_RX_OFFSET
;
2652 bp
->rx_dma_offset
= BNXT_RX_DMA_OFFSET
;
2655 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
2656 rc
= bnxt_init_one_rx_ring(bp
, i
);
2664 static int bnxt_init_tx_rings(struct bnxt
*bp
)
2668 bp
->tx_wake_thresh
= max_t(int, bp
->tx_ring_size
/ 2,
2671 for (i
= 0; i
< bp
->tx_nr_rings
; i
++) {
2672 struct bnxt_tx_ring_info
*txr
= &bp
->tx_ring
[i
];
2673 struct bnxt_ring_struct
*ring
= &txr
->tx_ring_struct
;
2675 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
2681 static void bnxt_free_ring_grps(struct bnxt
*bp
)
2683 kfree(bp
->grp_info
);
2684 bp
->grp_info
= NULL
;
2687 static int bnxt_init_ring_grps(struct bnxt
*bp
, bool irq_re_init
)
2692 bp
->grp_info
= kcalloc(bp
->cp_nr_rings
,
2693 sizeof(struct bnxt_ring_grp_info
),
2698 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
2700 bp
->grp_info
[i
].fw_stats_ctx
= INVALID_HW_RING_ID
;
2701 bp
->grp_info
[i
].fw_grp_id
= INVALID_HW_RING_ID
;
2702 bp
->grp_info
[i
].rx_fw_ring_id
= INVALID_HW_RING_ID
;
2703 bp
->grp_info
[i
].agg_fw_ring_id
= INVALID_HW_RING_ID
;
2704 bp
->grp_info
[i
].cp_fw_ring_id
= INVALID_HW_RING_ID
;
2709 static void bnxt_free_vnics(struct bnxt
*bp
)
2711 kfree(bp
->vnic_info
);
2712 bp
->vnic_info
= NULL
;
2716 static int bnxt_alloc_vnics(struct bnxt
*bp
)
2720 #ifdef CONFIG_RFS_ACCEL
2721 if (bp
->flags
& BNXT_FLAG_RFS
)
2722 num_vnics
+= bp
->rx_nr_rings
;
2725 if (BNXT_CHIP_TYPE_NITRO_A0(bp
))
2728 bp
->vnic_info
= kcalloc(num_vnics
, sizeof(struct bnxt_vnic_info
),
2733 bp
->nr_vnics
= num_vnics
;
2737 static void bnxt_init_vnics(struct bnxt
*bp
)
2741 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
2742 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[i
];
2744 vnic
->fw_vnic_id
= INVALID_HW_RING_ID
;
2745 vnic
->fw_rss_cos_lb_ctx
[0] = INVALID_HW_RING_ID
;
2746 vnic
->fw_rss_cos_lb_ctx
[1] = INVALID_HW_RING_ID
;
2747 vnic
->fw_l2_ctx_id
= INVALID_HW_RING_ID
;
2749 if (bp
->vnic_info
[i
].rss_hash_key
) {
2751 prandom_bytes(vnic
->rss_hash_key
,
2754 memcpy(vnic
->rss_hash_key
,
2755 bp
->vnic_info
[0].rss_hash_key
,
2761 static int bnxt_calc_nr_ring_pages(u32 ring_size
, int desc_per_pg
)
2765 pages
= ring_size
/ desc_per_pg
;
2772 while (pages
& (pages
- 1))
2778 void bnxt_set_tpa_flags(struct bnxt
*bp
)
2780 bp
->flags
&= ~BNXT_FLAG_TPA
;
2781 if (bp
->flags
& BNXT_FLAG_NO_AGG_RINGS
)
2783 if (bp
->dev
->features
& NETIF_F_LRO
)
2784 bp
->flags
|= BNXT_FLAG_LRO
;
2785 else if (bp
->dev
->features
& NETIF_F_GRO_HW
)
2786 bp
->flags
|= BNXT_FLAG_GRO
;
2789 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
2792 void bnxt_set_ring_params(struct bnxt
*bp
)
2794 u32 ring_size
, rx_size
, rx_space
;
2795 u32 agg_factor
= 0, agg_ring_size
= 0;
2797 /* 8 for CRC and VLAN */
2798 rx_size
= SKB_DATA_ALIGN(bp
->dev
->mtu
+ ETH_HLEN
+ NET_IP_ALIGN
+ 8);
2800 rx_space
= rx_size
+ NET_SKB_PAD
+
2801 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
2803 bp
->rx_copy_thresh
= BNXT_RX_COPY_THRESH
;
2804 ring_size
= bp
->rx_ring_size
;
2805 bp
->rx_agg_ring_size
= 0;
2806 bp
->rx_agg_nr_pages
= 0;
2808 if (bp
->flags
& BNXT_FLAG_TPA
)
2809 agg_factor
= min_t(u32
, 4, 65536 / BNXT_RX_PAGE_SIZE
);
2811 bp
->flags
&= ~BNXT_FLAG_JUMBO
;
2812 if (rx_space
> PAGE_SIZE
&& !(bp
->flags
& BNXT_FLAG_NO_AGG_RINGS
)) {
2815 bp
->flags
|= BNXT_FLAG_JUMBO
;
2816 jumbo_factor
= PAGE_ALIGN(bp
->dev
->mtu
- 40) >> PAGE_SHIFT
;
2817 if (jumbo_factor
> agg_factor
)
2818 agg_factor
= jumbo_factor
;
2820 agg_ring_size
= ring_size
* agg_factor
;
2822 if (agg_ring_size
) {
2823 bp
->rx_agg_nr_pages
= bnxt_calc_nr_ring_pages(agg_ring_size
,
2825 if (bp
->rx_agg_nr_pages
> MAX_RX_AGG_PAGES
) {
2826 u32 tmp
= agg_ring_size
;
2828 bp
->rx_agg_nr_pages
= MAX_RX_AGG_PAGES
;
2829 agg_ring_size
= MAX_RX_AGG_PAGES
* RX_DESC_CNT
- 1;
2830 netdev_warn(bp
->dev
, "rx agg ring size %d reduced to %d.\n",
2831 tmp
, agg_ring_size
);
2833 bp
->rx_agg_ring_size
= agg_ring_size
;
2834 bp
->rx_agg_ring_mask
= (bp
->rx_agg_nr_pages
* RX_DESC_CNT
) - 1;
2835 rx_size
= SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH
+ NET_IP_ALIGN
);
2836 rx_space
= rx_size
+ NET_SKB_PAD
+
2837 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
2840 bp
->rx_buf_use_size
= rx_size
;
2841 bp
->rx_buf_size
= rx_space
;
2843 bp
->rx_nr_pages
= bnxt_calc_nr_ring_pages(ring_size
, RX_DESC_CNT
);
2844 bp
->rx_ring_mask
= (bp
->rx_nr_pages
* RX_DESC_CNT
) - 1;
2846 ring_size
= bp
->tx_ring_size
;
2847 bp
->tx_nr_pages
= bnxt_calc_nr_ring_pages(ring_size
, TX_DESC_CNT
);
2848 bp
->tx_ring_mask
= (bp
->tx_nr_pages
* TX_DESC_CNT
) - 1;
2850 ring_size
= bp
->rx_ring_size
* (2 + agg_factor
) + bp
->tx_ring_size
;
2851 bp
->cp_ring_size
= ring_size
;
2853 bp
->cp_nr_pages
= bnxt_calc_nr_ring_pages(ring_size
, CP_DESC_CNT
);
2854 if (bp
->cp_nr_pages
> MAX_CP_PAGES
) {
2855 bp
->cp_nr_pages
= MAX_CP_PAGES
;
2856 bp
->cp_ring_size
= MAX_CP_PAGES
* CP_DESC_CNT
- 1;
2857 netdev_warn(bp
->dev
, "completion ring size %d reduced to %d.\n",
2858 ring_size
, bp
->cp_ring_size
);
2860 bp
->cp_bit
= bp
->cp_nr_pages
* CP_DESC_CNT
;
2861 bp
->cp_ring_mask
= bp
->cp_bit
- 1;
2864 /* Changing allocation mode of RX rings.
2865 * TODO: Update when extending xdp_rxq_info to support allocation modes.
2867 int bnxt_set_rx_skb_mode(struct bnxt
*bp
, bool page_mode
)
2870 if (bp
->dev
->mtu
> BNXT_MAX_PAGE_MODE_MTU
)
2873 min_t(u16
, bp
->max_mtu
, BNXT_MAX_PAGE_MODE_MTU
);
2874 bp
->flags
&= ~BNXT_FLAG_AGG_RINGS
;
2875 bp
->flags
|= BNXT_FLAG_NO_AGG_RINGS
| BNXT_FLAG_RX_PAGE_MODE
;
2876 bp
->rx_dir
= DMA_BIDIRECTIONAL
;
2877 bp
->rx_skb_func
= bnxt_rx_page_skb
;
2878 /* Disable LRO or GRO_HW */
2879 netdev_update_features(bp
->dev
);
2881 bp
->dev
->max_mtu
= bp
->max_mtu
;
2882 bp
->flags
&= ~BNXT_FLAG_RX_PAGE_MODE
;
2883 bp
->rx_dir
= DMA_FROM_DEVICE
;
2884 bp
->rx_skb_func
= bnxt_rx_skb
;
2889 static void bnxt_free_vnic_attributes(struct bnxt
*bp
)
2892 struct bnxt_vnic_info
*vnic
;
2893 struct pci_dev
*pdev
= bp
->pdev
;
2898 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
2899 vnic
= &bp
->vnic_info
[i
];
2901 kfree(vnic
->fw_grp_ids
);
2902 vnic
->fw_grp_ids
= NULL
;
2904 kfree(vnic
->uc_list
);
2905 vnic
->uc_list
= NULL
;
2907 if (vnic
->mc_list
) {
2908 dma_free_coherent(&pdev
->dev
, vnic
->mc_list_size
,
2909 vnic
->mc_list
, vnic
->mc_list_mapping
);
2910 vnic
->mc_list
= NULL
;
2913 if (vnic
->rss_table
) {
2914 dma_free_coherent(&pdev
->dev
, PAGE_SIZE
,
2916 vnic
->rss_table_dma_addr
);
2917 vnic
->rss_table
= NULL
;
2920 vnic
->rss_hash_key
= NULL
;
2925 static int bnxt_alloc_vnic_attributes(struct bnxt
*bp
)
2927 int i
, rc
= 0, size
;
2928 struct bnxt_vnic_info
*vnic
;
2929 struct pci_dev
*pdev
= bp
->pdev
;
2932 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
2933 vnic
= &bp
->vnic_info
[i
];
2935 if (vnic
->flags
& BNXT_VNIC_UCAST_FLAG
) {
2936 int mem_size
= (BNXT_MAX_UC_ADDRS
- 1) * ETH_ALEN
;
2939 vnic
->uc_list
= kmalloc(mem_size
, GFP_KERNEL
);
2940 if (!vnic
->uc_list
) {
2947 if (vnic
->flags
& BNXT_VNIC_MCAST_FLAG
) {
2948 vnic
->mc_list_size
= BNXT_MAX_MC_ADDRS
* ETH_ALEN
;
2950 dma_alloc_coherent(&pdev
->dev
,
2952 &vnic
->mc_list_mapping
,
2954 if (!vnic
->mc_list
) {
2960 if (vnic
->flags
& BNXT_VNIC_RSS_FLAG
)
2961 max_rings
= bp
->rx_nr_rings
;
2965 vnic
->fw_grp_ids
= kcalloc(max_rings
, sizeof(u16
), GFP_KERNEL
);
2966 if (!vnic
->fw_grp_ids
) {
2971 if ((bp
->flags
& BNXT_FLAG_NEW_RSS_CAP
) &&
2972 !(vnic
->flags
& BNXT_VNIC_RSS_FLAG
))
2975 /* Allocate rss table and hash key */
2976 vnic
->rss_table
= dma_alloc_coherent(&pdev
->dev
, PAGE_SIZE
,
2977 &vnic
->rss_table_dma_addr
,
2979 if (!vnic
->rss_table
) {
2984 size
= L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE
* sizeof(u16
));
2986 vnic
->rss_hash_key
= ((void *)vnic
->rss_table
) + size
;
2987 vnic
->rss_hash_key_dma_addr
= vnic
->rss_table_dma_addr
+ size
;
2995 static void bnxt_free_hwrm_resources(struct bnxt
*bp
)
2997 struct pci_dev
*pdev
= bp
->pdev
;
2999 dma_free_coherent(&pdev
->dev
, PAGE_SIZE
, bp
->hwrm_cmd_resp_addr
,
3000 bp
->hwrm_cmd_resp_dma_addr
);
3002 bp
->hwrm_cmd_resp_addr
= NULL
;
3003 if (bp
->hwrm_dbg_resp_addr
) {
3004 dma_free_coherent(&pdev
->dev
, HWRM_DBG_REG_BUF_SIZE
,
3005 bp
->hwrm_dbg_resp_addr
,
3006 bp
->hwrm_dbg_resp_dma_addr
);
3008 bp
->hwrm_dbg_resp_addr
= NULL
;
3012 static int bnxt_alloc_hwrm_resources(struct bnxt
*bp
)
3014 struct pci_dev
*pdev
= bp
->pdev
;
3016 bp
->hwrm_cmd_resp_addr
= dma_alloc_coherent(&pdev
->dev
, PAGE_SIZE
,
3017 &bp
->hwrm_cmd_resp_dma_addr
,
3019 if (!bp
->hwrm_cmd_resp_addr
)
3021 bp
->hwrm_dbg_resp_addr
= dma_alloc_coherent(&pdev
->dev
,
3022 HWRM_DBG_REG_BUF_SIZE
,
3023 &bp
->hwrm_dbg_resp_dma_addr
,
3025 if (!bp
->hwrm_dbg_resp_addr
)
3026 netdev_warn(bp
->dev
, "fail to alloc debug register dma mem\n");
3031 static void bnxt_free_hwrm_short_cmd_req(struct bnxt
*bp
)
3033 if (bp
->hwrm_short_cmd_req_addr
) {
3034 struct pci_dev
*pdev
= bp
->pdev
;
3036 dma_free_coherent(&pdev
->dev
, BNXT_HWRM_MAX_REQ_LEN
,
3037 bp
->hwrm_short_cmd_req_addr
,
3038 bp
->hwrm_short_cmd_req_dma_addr
);
3039 bp
->hwrm_short_cmd_req_addr
= NULL
;
3043 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt
*bp
)
3045 struct pci_dev
*pdev
= bp
->pdev
;
3047 bp
->hwrm_short_cmd_req_addr
=
3048 dma_alloc_coherent(&pdev
->dev
, BNXT_HWRM_MAX_REQ_LEN
,
3049 &bp
->hwrm_short_cmd_req_dma_addr
,
3051 if (!bp
->hwrm_short_cmd_req_addr
)
3057 static void bnxt_free_stats(struct bnxt
*bp
)
3060 struct pci_dev
*pdev
= bp
->pdev
;
3062 if (bp
->hw_rx_port_stats
) {
3063 dma_free_coherent(&pdev
->dev
, bp
->hw_port_stats_size
,
3064 bp
->hw_rx_port_stats
,
3065 bp
->hw_rx_port_stats_map
);
3066 bp
->hw_rx_port_stats
= NULL
;
3067 bp
->flags
&= ~BNXT_FLAG_PORT_STATS
;
3073 size
= sizeof(struct ctx_hw_stats
);
3075 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
3076 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
3077 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
3079 if (cpr
->hw_stats
) {
3080 dma_free_coherent(&pdev
->dev
, size
, cpr
->hw_stats
,
3082 cpr
->hw_stats
= NULL
;
3087 static int bnxt_alloc_stats(struct bnxt
*bp
)
3090 struct pci_dev
*pdev
= bp
->pdev
;
3092 size
= sizeof(struct ctx_hw_stats
);
3094 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
3095 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
3096 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
3098 cpr
->hw_stats
= dma_alloc_coherent(&pdev
->dev
, size
,
3104 cpr
->hw_stats_ctx_id
= INVALID_STATS_CTX_ID
;
3107 if (BNXT_PF(bp
) && bp
->chip_num
!= CHIP_NUM_58700
) {
3108 bp
->hw_port_stats_size
= sizeof(struct rx_port_stats
) +
3109 sizeof(struct tx_port_stats
) + 1024;
3111 bp
->hw_rx_port_stats
=
3112 dma_alloc_coherent(&pdev
->dev
, bp
->hw_port_stats_size
,
3113 &bp
->hw_rx_port_stats_map
,
3115 if (!bp
->hw_rx_port_stats
)
3118 bp
->hw_tx_port_stats
= (void *)(bp
->hw_rx_port_stats
+ 1) +
3120 bp
->hw_tx_port_stats_map
= bp
->hw_rx_port_stats_map
+
3121 sizeof(struct rx_port_stats
) + 512;
3122 bp
->flags
|= BNXT_FLAG_PORT_STATS
;
3127 static void bnxt_clear_ring_indices(struct bnxt
*bp
)
3134 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
3135 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
3136 struct bnxt_cp_ring_info
*cpr
;
3137 struct bnxt_rx_ring_info
*rxr
;
3138 struct bnxt_tx_ring_info
*txr
;
3143 cpr
= &bnapi
->cp_ring
;
3144 cpr
->cp_raw_cons
= 0;
3146 txr
= bnapi
->tx_ring
;
3152 rxr
= bnapi
->rx_ring
;
3155 rxr
->rx_agg_prod
= 0;
3156 rxr
->rx_sw_agg_prod
= 0;
3157 rxr
->rx_next_cons
= 0;
3162 static void bnxt_free_ntp_fltrs(struct bnxt
*bp
, bool irq_reinit
)
3164 #ifdef CONFIG_RFS_ACCEL
3167 /* Under rtnl_lock and all our NAPIs have been disabled. It's
3168 * safe to delete the hash table.
3170 for (i
= 0; i
< BNXT_NTP_FLTR_HASH_SIZE
; i
++) {
3171 struct hlist_head
*head
;
3172 struct hlist_node
*tmp
;
3173 struct bnxt_ntuple_filter
*fltr
;
3175 head
= &bp
->ntp_fltr_hash_tbl
[i
];
3176 hlist_for_each_entry_safe(fltr
, tmp
, head
, hash
) {
3177 hlist_del(&fltr
->hash
);
3182 kfree(bp
->ntp_fltr_bmap
);
3183 bp
->ntp_fltr_bmap
= NULL
;
3185 bp
->ntp_fltr_count
= 0;
3189 static int bnxt_alloc_ntp_fltrs(struct bnxt
*bp
)
3191 #ifdef CONFIG_RFS_ACCEL
3194 if (!(bp
->flags
& BNXT_FLAG_RFS
))
3197 for (i
= 0; i
< BNXT_NTP_FLTR_HASH_SIZE
; i
++)
3198 INIT_HLIST_HEAD(&bp
->ntp_fltr_hash_tbl
[i
]);
3200 bp
->ntp_fltr_count
= 0;
3201 bp
->ntp_fltr_bmap
= kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR
),
3205 if (!bp
->ntp_fltr_bmap
)
3214 static void bnxt_free_mem(struct bnxt
*bp
, bool irq_re_init
)
3216 bnxt_free_vnic_attributes(bp
);
3217 bnxt_free_tx_rings(bp
);
3218 bnxt_free_rx_rings(bp
);
3219 bnxt_free_cp_rings(bp
);
3220 bnxt_free_ntp_fltrs(bp
, irq_re_init
);
3222 bnxt_free_stats(bp
);
3223 bnxt_free_ring_grps(bp
);
3224 bnxt_free_vnics(bp
);
3225 kfree(bp
->tx_ring_map
);
3226 bp
->tx_ring_map
= NULL
;
3234 bnxt_clear_ring_indices(bp
);
3238 static int bnxt_alloc_mem(struct bnxt
*bp
, bool irq_re_init
)
3240 int i
, j
, rc
, size
, arr_size
;
3244 /* Allocate bnapi mem pointer array and mem block for
3247 arr_size
= L1_CACHE_ALIGN(sizeof(struct bnxt_napi
*) *
3249 size
= L1_CACHE_ALIGN(sizeof(struct bnxt_napi
));
3250 bnapi
= kzalloc(arr_size
+ size
* bp
->cp_nr_rings
, GFP_KERNEL
);
3256 for (i
= 0; i
< bp
->cp_nr_rings
; i
++, bnapi
+= size
) {
3257 bp
->bnapi
[i
] = bnapi
;
3258 bp
->bnapi
[i
]->index
= i
;
3259 bp
->bnapi
[i
]->bp
= bp
;
3262 bp
->rx_ring
= kcalloc(bp
->rx_nr_rings
,
3263 sizeof(struct bnxt_rx_ring_info
),
3268 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
3269 bp
->rx_ring
[i
].bnapi
= bp
->bnapi
[i
];
3270 bp
->bnapi
[i
]->rx_ring
= &bp
->rx_ring
[i
];
3273 bp
->tx_ring
= kcalloc(bp
->tx_nr_rings
,
3274 sizeof(struct bnxt_tx_ring_info
),
3279 bp
->tx_ring_map
= kcalloc(bp
->tx_nr_rings
, sizeof(u16
),
3282 if (!bp
->tx_ring_map
)
3285 if (bp
->flags
& BNXT_FLAG_SHARED_RINGS
)
3288 j
= bp
->rx_nr_rings
;
3290 for (i
= 0; i
< bp
->tx_nr_rings
; i
++, j
++) {
3291 bp
->tx_ring
[i
].bnapi
= bp
->bnapi
[j
];
3292 bp
->bnapi
[j
]->tx_ring
= &bp
->tx_ring
[i
];
3293 bp
->tx_ring_map
[i
] = bp
->tx_nr_rings_xdp
+ i
;
3294 if (i
>= bp
->tx_nr_rings_xdp
) {
3295 bp
->tx_ring
[i
].txq_index
= i
-
3296 bp
->tx_nr_rings_xdp
;
3297 bp
->bnapi
[j
]->tx_int
= bnxt_tx_int
;
3299 bp
->bnapi
[j
]->flags
|= BNXT_NAPI_FLAG_XDP
;
3300 bp
->bnapi
[j
]->tx_int
= bnxt_tx_int_xdp
;
3304 rc
= bnxt_alloc_stats(bp
);
3308 rc
= bnxt_alloc_ntp_fltrs(bp
);
3312 rc
= bnxt_alloc_vnics(bp
);
3317 bnxt_init_ring_struct(bp
);
3319 rc
= bnxt_alloc_rx_rings(bp
);
3323 rc
= bnxt_alloc_tx_rings(bp
);
3327 rc
= bnxt_alloc_cp_rings(bp
);
3331 bp
->vnic_info
[0].flags
|= BNXT_VNIC_RSS_FLAG
| BNXT_VNIC_MCAST_FLAG
|
3332 BNXT_VNIC_UCAST_FLAG
;
3333 rc
= bnxt_alloc_vnic_attributes(bp
);
3339 bnxt_free_mem(bp
, true);
3343 static void bnxt_disable_int(struct bnxt
*bp
)
3350 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
3351 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
3352 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
3353 struct bnxt_ring_struct
*ring
= &cpr
->cp_ring_struct
;
3355 if (ring
->fw_ring_id
!= INVALID_HW_RING_ID
)
3356 BNXT_CP_DB(cpr
->cp_doorbell
, cpr
->cp_raw_cons
);
3360 static void bnxt_disable_int_sync(struct bnxt
*bp
)
3364 atomic_inc(&bp
->intr_sem
);
3366 bnxt_disable_int(bp
);
3367 for (i
= 0; i
< bp
->cp_nr_rings
; i
++)
3368 synchronize_irq(bp
->irq_tbl
[i
].vector
);
3371 static void bnxt_enable_int(struct bnxt
*bp
)
3375 atomic_set(&bp
->intr_sem
, 0);
3376 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
3377 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
3378 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
3380 BNXT_CP_DB_REARM(cpr
->cp_doorbell
, cpr
->cp_raw_cons
);
3384 void bnxt_hwrm_cmd_hdr_init(struct bnxt
*bp
, void *request
, u16 req_type
,
3385 u16 cmpl_ring
, u16 target_id
)
3387 struct input
*req
= request
;
3389 req
->req_type
= cpu_to_le16(req_type
);
3390 req
->cmpl_ring
= cpu_to_le16(cmpl_ring
);
3391 req
->target_id
= cpu_to_le16(target_id
);
3392 req
->resp_addr
= cpu_to_le64(bp
->hwrm_cmd_resp_dma_addr
);
3395 static int bnxt_hwrm_do_send_msg(struct bnxt
*bp
, void *msg
, u32 msg_len
,
3396 int timeout
, bool silent
)
3398 int i
, intr_process
, rc
, tmo_count
;
3399 struct input
*req
= msg
;
3401 __le32
*resp_len
, *valid
;
3402 u16 cp_ring_id
, len
= 0;
3403 struct hwrm_err_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3404 u16 max_req_len
= BNXT_HWRM_MAX_REQ_LEN
;
3405 struct hwrm_short_input short_input
= {0};
3407 req
->seq_id
= cpu_to_le16(bp
->hwrm_cmd_seq
++);
3408 memset(resp
, 0, PAGE_SIZE
);
3409 cp_ring_id
= le16_to_cpu(req
->cmpl_ring
);
3410 intr_process
= (cp_ring_id
== INVALID_HW_RING_ID
) ? 0 : 1;
3412 if (bp
->flags
& BNXT_FLAG_SHORT_CMD
) {
3413 void *short_cmd_req
= bp
->hwrm_short_cmd_req_addr
;
3415 memcpy(short_cmd_req
, req
, msg_len
);
3416 memset(short_cmd_req
+ msg_len
, 0, BNXT_HWRM_MAX_REQ_LEN
-
3419 short_input
.req_type
= req
->req_type
;
3420 short_input
.signature
=
3421 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD
);
3422 short_input
.size
= cpu_to_le16(msg_len
);
3423 short_input
.req_addr
=
3424 cpu_to_le64(bp
->hwrm_short_cmd_req_dma_addr
);
3426 data
= (u32
*)&short_input
;
3427 msg_len
= sizeof(short_input
);
3429 /* Sync memory write before updating doorbell */
3432 max_req_len
= BNXT_HWRM_SHORT_REQ_LEN
;
3435 /* Write request msg to hwrm channel */
3436 __iowrite32_copy(bp
->bar0
, data
, msg_len
/ 4);
3438 for (i
= msg_len
; i
< max_req_len
; i
+= 4)
3439 writel(0, bp
->bar0
+ i
);
3441 /* currently supports only one outstanding message */
3443 bp
->hwrm_intr_seq_id
= le16_to_cpu(req
->seq_id
);
3445 /* Ring channel doorbell */
3446 writel(1, bp
->bar0
+ 0x100);
3449 timeout
= DFLT_HWRM_CMD_TIMEOUT
;
3452 tmo_count
= timeout
* 40;
3454 /* Wait until hwrm response cmpl interrupt is processed */
3455 while (bp
->hwrm_intr_seq_id
!= HWRM_SEQ_ID_INVALID
&&
3457 usleep_range(25, 40);
3460 if (bp
->hwrm_intr_seq_id
!= HWRM_SEQ_ID_INVALID
) {
3461 netdev_err(bp
->dev
, "Resp cmpl intr err msg: 0x%x\n",
3462 le16_to_cpu(req
->req_type
));
3466 /* Check if response len is updated */
3467 resp_len
= bp
->hwrm_cmd_resp_addr
+ HWRM_RESP_LEN_OFFSET
;
3468 for (i
= 0; i
< tmo_count
; i
++) {
3469 len
= (le32_to_cpu(*resp_len
) & HWRM_RESP_LEN_MASK
) >>
3473 usleep_range(25, 40);
3476 if (i
>= tmo_count
) {
3477 netdev_err(bp
->dev
, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
3478 timeout
, le16_to_cpu(req
->req_type
),
3479 le16_to_cpu(req
->seq_id
), len
);
3483 /* Last word of resp contains valid bit */
3484 valid
= bp
->hwrm_cmd_resp_addr
+ len
- 4;
3485 for (i
= 0; i
< 5; i
++) {
3486 if (le32_to_cpu(*valid
) & HWRM_RESP_VALID_MASK
)
3492 netdev_err(bp
->dev
, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
3493 timeout
, le16_to_cpu(req
->req_type
),
3494 le16_to_cpu(req
->seq_id
), len
, *valid
);
3499 rc
= le16_to_cpu(resp
->error_code
);
3501 netdev_err(bp
->dev
, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
3502 le16_to_cpu(resp
->req_type
),
3503 le16_to_cpu(resp
->seq_id
), rc
);
3507 int _hwrm_send_message(struct bnxt
*bp
, void *msg
, u32 msg_len
, int timeout
)
3509 return bnxt_hwrm_do_send_msg(bp
, msg
, msg_len
, timeout
, false);
3512 int _hwrm_send_message_silent(struct bnxt
*bp
, void *msg
, u32 msg_len
,
3515 return bnxt_hwrm_do_send_msg(bp
, msg
, msg_len
, timeout
, true);
3518 int hwrm_send_message(struct bnxt
*bp
, void *msg
, u32 msg_len
, int timeout
)
3522 mutex_lock(&bp
->hwrm_cmd_lock
);
3523 rc
= _hwrm_send_message(bp
, msg
, msg_len
, timeout
);
3524 mutex_unlock(&bp
->hwrm_cmd_lock
);
3528 int hwrm_send_message_silent(struct bnxt
*bp
, void *msg
, u32 msg_len
,
3533 mutex_lock(&bp
->hwrm_cmd_lock
);
3534 rc
= bnxt_hwrm_do_send_msg(bp
, msg
, msg_len
, timeout
, true);
3535 mutex_unlock(&bp
->hwrm_cmd_lock
);
3539 int bnxt_hwrm_func_rgtr_async_events(struct bnxt
*bp
, unsigned long *bmap
,
3542 struct hwrm_func_drv_rgtr_input req
= {0};
3543 DECLARE_BITMAP(async_events_bmap
, 256);
3544 u32
*events
= (u32
*)async_events_bmap
;
3547 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_DRV_RGTR
, -1, -1);
3550 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD
);
3552 memset(async_events_bmap
, 0, sizeof(async_events_bmap
));
3553 for (i
= 0; i
< ARRAY_SIZE(bnxt_async_events_arr
); i
++)
3554 __set_bit(bnxt_async_events_arr
[i
], async_events_bmap
);
3556 if (bmap
&& bmap_size
) {
3557 for (i
= 0; i
< bmap_size
; i
++) {
3558 if (test_bit(i
, bmap
))
3559 __set_bit(i
, async_events_bmap
);
3563 for (i
= 0; i
< 8; i
++)
3564 req
.async_event_fwd
[i
] |= cpu_to_le32(events
[i
]);
3566 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3569 static int bnxt_hwrm_func_drv_rgtr(struct bnxt
*bp
)
3571 struct hwrm_func_drv_rgtr_input req
= {0};
3573 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_DRV_RGTR
, -1, -1);
3576 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE
|
3577 FUNC_DRV_RGTR_REQ_ENABLES_VER
);
3579 req
.os_type
= cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX
);
3580 req
.ver_maj
= DRV_VER_MAJ
;
3581 req
.ver_min
= DRV_VER_MIN
;
3582 req
.ver_upd
= DRV_VER_UPD
;
3588 memset(data
, 0, sizeof(data
));
3589 for (i
= 0; i
< ARRAY_SIZE(bnxt_vf_req_snif
); i
++) {
3590 u16 cmd
= bnxt_vf_req_snif
[i
];
3591 unsigned int bit
, idx
;
3595 data
[idx
] |= 1 << bit
;
3598 for (i
= 0; i
< 8; i
++)
3599 req
.vf_req_fwd
[i
] = cpu_to_le32(data
[i
]);
3602 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD
);
3605 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3608 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt
*bp
)
3610 struct hwrm_func_drv_unrgtr_input req
= {0};
3612 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_DRV_UNRGTR
, -1, -1);
3613 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3616 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt
*bp
, u8 tunnel_type
)
3619 struct hwrm_tunnel_dst_port_free_input req
= {0};
3621 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_TUNNEL_DST_PORT_FREE
, -1, -1);
3622 req
.tunnel_type
= tunnel_type
;
3624 switch (tunnel_type
) {
3625 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN
:
3626 req
.tunnel_dst_port_id
= bp
->vxlan_fw_dst_port_id
;
3628 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE
:
3629 req
.tunnel_dst_port_id
= bp
->nge_fw_dst_port_id
;
3635 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3637 netdev_err(bp
->dev
, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
3642 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt
*bp
, __be16 port
,
3646 struct hwrm_tunnel_dst_port_alloc_input req
= {0};
3647 struct hwrm_tunnel_dst_port_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3649 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_TUNNEL_DST_PORT_ALLOC
, -1, -1);
3651 req
.tunnel_type
= tunnel_type
;
3652 req
.tunnel_dst_port_val
= port
;
3654 mutex_lock(&bp
->hwrm_cmd_lock
);
3655 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3657 netdev_err(bp
->dev
, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
3662 switch (tunnel_type
) {
3663 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN
:
3664 bp
->vxlan_fw_dst_port_id
= resp
->tunnel_dst_port_id
;
3666 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE
:
3667 bp
->nge_fw_dst_port_id
= resp
->tunnel_dst_port_id
;
3674 mutex_unlock(&bp
->hwrm_cmd_lock
);
3678 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt
*bp
, u16 vnic_id
)
3680 struct hwrm_cfa_l2_set_rx_mask_input req
= {0};
3681 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[vnic_id
];
3683 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_CFA_L2_SET_RX_MASK
, -1, -1);
3684 req
.vnic_id
= cpu_to_le32(vnic
->fw_vnic_id
);
3686 req
.num_mc_entries
= cpu_to_le32(vnic
->mc_list_count
);
3687 req
.mc_tbl_addr
= cpu_to_le64(vnic
->mc_list_mapping
);
3688 req
.mask
= cpu_to_le32(vnic
->rx_mask
);
3689 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3692 #ifdef CONFIG_RFS_ACCEL
3693 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt
*bp
,
3694 struct bnxt_ntuple_filter
*fltr
)
3696 struct hwrm_cfa_ntuple_filter_free_input req
= {0};
3698 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_CFA_NTUPLE_FILTER_FREE
, -1, -1);
3699 req
.ntuple_filter_id
= fltr
->filter_id
;
3700 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3703 #define BNXT_NTP_FLTR_FLAGS \
3704 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
3705 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
3706 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
3707 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
3708 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
3709 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
3710 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
3711 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
3712 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
3713 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
3714 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
3715 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
3716 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
3717 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
3719 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
3720 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
3722 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt
*bp
,
3723 struct bnxt_ntuple_filter
*fltr
)
3726 struct hwrm_cfa_ntuple_filter_alloc_input req
= {0};
3727 struct hwrm_cfa_ntuple_filter_alloc_output
*resp
=
3728 bp
->hwrm_cmd_resp_addr
;
3729 struct flow_keys
*keys
= &fltr
->fkeys
;
3730 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[fltr
->rxq
+ 1];
3732 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_CFA_NTUPLE_FILTER_ALLOC
, -1, -1);
3733 req
.l2_filter_id
= bp
->vnic_info
[0].fw_l2_filter_id
[fltr
->l2_fltr_idx
];
3735 req
.enables
= cpu_to_le32(BNXT_NTP_FLTR_FLAGS
);
3737 req
.ethertype
= htons(ETH_P_IP
);
3738 memcpy(req
.src_macaddr
, fltr
->src_mac_addr
, ETH_ALEN
);
3739 req
.ip_addr_type
= CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4
;
3740 req
.ip_protocol
= keys
->basic
.ip_proto
;
3742 if (keys
->basic
.n_proto
== htons(ETH_P_IPV6
)) {
3745 req
.ethertype
= htons(ETH_P_IPV6
);
3747 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
;
3748 *(struct in6_addr
*)&req
.src_ipaddr
[0] =
3749 keys
->addrs
.v6addrs
.src
;
3750 *(struct in6_addr
*)&req
.dst_ipaddr
[0] =
3751 keys
->addrs
.v6addrs
.dst
;
3752 for (i
= 0; i
< 4; i
++) {
3753 req
.src_ipaddr_mask
[i
] = cpu_to_be32(0xffffffff);
3754 req
.dst_ipaddr_mask
[i
] = cpu_to_be32(0xffffffff);
3757 req
.src_ipaddr
[0] = keys
->addrs
.v4addrs
.src
;
3758 req
.src_ipaddr_mask
[0] = cpu_to_be32(0xffffffff);
3759 req
.dst_ipaddr
[0] = keys
->addrs
.v4addrs
.dst
;
3760 req
.dst_ipaddr_mask
[0] = cpu_to_be32(0xffffffff);
3762 if (keys
->control
.flags
& FLOW_DIS_ENCAPSULATION
) {
3763 req
.enables
|= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG
);
3765 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
;
3768 req
.src_port
= keys
->ports
.src
;
3769 req
.src_port_mask
= cpu_to_be16(0xffff);
3770 req
.dst_port
= keys
->ports
.dst
;
3771 req
.dst_port_mask
= cpu_to_be16(0xffff);
3773 req
.dst_id
= cpu_to_le16(vnic
->fw_vnic_id
);
3774 mutex_lock(&bp
->hwrm_cmd_lock
);
3775 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3777 fltr
->filter_id
= resp
->ntuple_filter_id
;
3778 mutex_unlock(&bp
->hwrm_cmd_lock
);
3783 static int bnxt_hwrm_set_vnic_filter(struct bnxt
*bp
, u16 vnic_id
, u16 idx
,
3787 struct hwrm_cfa_l2_filter_alloc_input req
= {0};
3788 struct hwrm_cfa_l2_filter_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3790 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_CFA_L2_FILTER_ALLOC
, -1, -1);
3791 req
.flags
= cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
);
3792 if (!BNXT_CHIP_TYPE_NITRO_A0(bp
))
3794 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST
);
3795 req
.dst_id
= cpu_to_le16(bp
->vnic_info
[vnic_id
].fw_vnic_id
);
3797 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR
|
3798 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID
|
3799 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK
);
3800 memcpy(req
.l2_addr
, mac_addr
, ETH_ALEN
);
3801 req
.l2_addr_mask
[0] = 0xff;
3802 req
.l2_addr_mask
[1] = 0xff;
3803 req
.l2_addr_mask
[2] = 0xff;
3804 req
.l2_addr_mask
[3] = 0xff;
3805 req
.l2_addr_mask
[4] = 0xff;
3806 req
.l2_addr_mask
[5] = 0xff;
3808 mutex_lock(&bp
->hwrm_cmd_lock
);
3809 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3811 bp
->vnic_info
[vnic_id
].fw_l2_filter_id
[idx
] =
3813 mutex_unlock(&bp
->hwrm_cmd_lock
);
3817 static int bnxt_hwrm_clear_vnic_filter(struct bnxt
*bp
)
3819 u16 i
, j
, num_of_vnics
= 1; /* only vnic 0 supported */
3822 /* Any associated ntuple filters will also be cleared by firmware. */
3823 mutex_lock(&bp
->hwrm_cmd_lock
);
3824 for (i
= 0; i
< num_of_vnics
; i
++) {
3825 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[i
];
3827 for (j
= 0; j
< vnic
->uc_filter_count
; j
++) {
3828 struct hwrm_cfa_l2_filter_free_input req
= {0};
3830 bnxt_hwrm_cmd_hdr_init(bp
, &req
,
3831 HWRM_CFA_L2_FILTER_FREE
, -1, -1);
3833 req
.l2_filter_id
= vnic
->fw_l2_filter_id
[j
];
3835 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
3838 vnic
->uc_filter_count
= 0;
3840 mutex_unlock(&bp
->hwrm_cmd_lock
);
3845 static int bnxt_hwrm_vnic_set_tpa(struct bnxt
*bp
, u16 vnic_id
, u32 tpa_flags
)
3847 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[vnic_id
];
3848 struct hwrm_vnic_tpa_cfg_input req
= {0};
3850 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_TPA_CFG
, -1, -1);
3853 u16 mss
= bp
->dev
->mtu
- 40;
3854 u32 nsegs
, n
, segs
= 0, flags
;
3856 flags
= VNIC_TPA_CFG_REQ_FLAGS_TPA
|
3857 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA
|
3858 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE
|
3859 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN
|
3860 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ
;
3861 if (tpa_flags
& BNXT_FLAG_GRO
)
3862 flags
|= VNIC_TPA_CFG_REQ_FLAGS_GRO
;
3864 req
.flags
= cpu_to_le32(flags
);
3867 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS
|
3868 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS
|
3869 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN
);
3871 /* Number of segs are log2 units, and first packet is not
3872 * included as part of this units.
3874 if (mss
<= BNXT_RX_PAGE_SIZE
) {
3875 n
= BNXT_RX_PAGE_SIZE
/ mss
;
3876 nsegs
= (MAX_SKB_FRAGS
- 1) * n
;
3878 n
= mss
/ BNXT_RX_PAGE_SIZE
;
3879 if (mss
& (BNXT_RX_PAGE_SIZE
- 1))
3881 nsegs
= (MAX_SKB_FRAGS
- n
) / n
;
3884 segs
= ilog2(nsegs
);
3885 req
.max_agg_segs
= cpu_to_le16(segs
);
3886 req
.max_aggs
= cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX
);
3888 req
.min_agg_len
= cpu_to_le32(512);
3890 req
.vnic_id
= cpu_to_le16(vnic
->fw_vnic_id
);
3892 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3895 static int bnxt_hwrm_vnic_set_rss(struct bnxt
*bp
, u16 vnic_id
, bool set_rss
)
3897 u32 i
, j
, max_rings
;
3898 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[vnic_id
];
3899 struct hwrm_vnic_rss_cfg_input req
= {0};
3901 if (vnic
->fw_rss_cos_lb_ctx
[0] == INVALID_HW_RING_ID
)
3904 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_RSS_CFG
, -1, -1);
3906 req
.hash_type
= cpu_to_le32(bp
->rss_hash_cfg
);
3907 if (vnic
->flags
& BNXT_VNIC_RSS_FLAG
) {
3908 if (BNXT_CHIP_TYPE_NITRO_A0(bp
))
3909 max_rings
= bp
->rx_nr_rings
- 1;
3911 max_rings
= bp
->rx_nr_rings
;
3916 /* Fill the RSS indirection table with ring group ids */
3917 for (i
= 0, j
= 0; i
< HW_HASH_INDEX_SIZE
; i
++, j
++) {
3920 vnic
->rss_table
[i
] = cpu_to_le16(vnic
->fw_grp_ids
[j
]);
3923 req
.ring_grp_tbl_addr
= cpu_to_le64(vnic
->rss_table_dma_addr
);
3924 req
.hash_key_tbl_addr
=
3925 cpu_to_le64(vnic
->rss_hash_key_dma_addr
);
3927 req
.rss_ctx_idx
= cpu_to_le16(vnic
->fw_rss_cos_lb_ctx
[0]);
3928 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3931 static int bnxt_hwrm_vnic_set_hds(struct bnxt
*bp
, u16 vnic_id
)
3933 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[vnic_id
];
3934 struct hwrm_vnic_plcmodes_cfg_input req
= {0};
3936 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_PLCMODES_CFG
, -1, -1);
3937 req
.flags
= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT
|
3938 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4
|
3939 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6
);
3941 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID
|
3942 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID
);
3943 /* thresholds not implemented in firmware yet */
3944 req
.jumbo_thresh
= cpu_to_le16(bp
->rx_copy_thresh
);
3945 req
.hds_threshold
= cpu_to_le16(bp
->rx_copy_thresh
);
3946 req
.vnic_id
= cpu_to_le32(vnic
->fw_vnic_id
);
3947 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3950 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt
*bp
, u16 vnic_id
,
3953 struct hwrm_vnic_rss_cos_lb_ctx_free_input req
= {0};
3955 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_RSS_COS_LB_CTX_FREE
, -1, -1);
3956 req
.rss_cos_lb_ctx_id
=
3957 cpu_to_le16(bp
->vnic_info
[vnic_id
].fw_rss_cos_lb_ctx
[ctx_idx
]);
3959 hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3960 bp
->vnic_info
[vnic_id
].fw_rss_cos_lb_ctx
[ctx_idx
] = INVALID_HW_RING_ID
;
3963 static void bnxt_hwrm_vnic_ctx_free(struct bnxt
*bp
)
3967 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
3968 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[i
];
3970 for (j
= 0; j
< BNXT_MAX_CTX_PER_VNIC
; j
++) {
3971 if (vnic
->fw_rss_cos_lb_ctx
[j
] != INVALID_HW_RING_ID
)
3972 bnxt_hwrm_vnic_ctx_free_one(bp
, i
, j
);
3975 bp
->rsscos_nr_ctxs
= 0;
3978 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt
*bp
, u16 vnic_id
, u16 ctx_idx
)
3981 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req
= {0};
3982 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output
*resp
=
3983 bp
->hwrm_cmd_resp_addr
;
3985 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC
, -1,
3988 mutex_lock(&bp
->hwrm_cmd_lock
);
3989 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3991 bp
->vnic_info
[vnic_id
].fw_rss_cos_lb_ctx
[ctx_idx
] =
3992 le16_to_cpu(resp
->rss_cos_lb_ctx_id
);
3993 mutex_unlock(&bp
->hwrm_cmd_lock
);
3998 int bnxt_hwrm_vnic_cfg(struct bnxt
*bp
, u16 vnic_id
)
4000 unsigned int ring
= 0, grp_idx
;
4001 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[vnic_id
];
4002 struct hwrm_vnic_cfg_input req
= {0};
4005 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_CFG
, -1, -1);
4007 req
.enables
= cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP
);
4008 /* Only RSS support for now TBD: COS & LB */
4009 if (vnic
->fw_rss_cos_lb_ctx
[0] != INVALID_HW_RING_ID
) {
4010 req
.rss_rule
= cpu_to_le16(vnic
->fw_rss_cos_lb_ctx
[0]);
4011 req
.enables
|= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE
|
4012 VNIC_CFG_REQ_ENABLES_MRU
);
4013 } else if (vnic
->flags
& BNXT_VNIC_RFS_NEW_RSS_FLAG
) {
4015 cpu_to_le16(bp
->vnic_info
[0].fw_rss_cos_lb_ctx
[0]);
4016 req
.enables
|= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE
|
4017 VNIC_CFG_REQ_ENABLES_MRU
);
4018 req
.flags
|= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE
);
4020 req
.rss_rule
= cpu_to_le16(0xffff);
4023 if (BNXT_CHIP_TYPE_NITRO_A0(bp
) &&
4024 (vnic
->fw_rss_cos_lb_ctx
[0] != INVALID_HW_RING_ID
)) {
4025 req
.cos_rule
= cpu_to_le16(vnic
->fw_rss_cos_lb_ctx
[1]);
4026 req
.enables
|= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE
);
4028 req
.cos_rule
= cpu_to_le16(0xffff);
4031 if (vnic
->flags
& BNXT_VNIC_RSS_FLAG
)
4033 else if (vnic
->flags
& BNXT_VNIC_RFS_FLAG
)
4035 else if ((vnic_id
== 1) && BNXT_CHIP_TYPE_NITRO_A0(bp
))
4036 ring
= bp
->rx_nr_rings
- 1;
4038 grp_idx
= bp
->rx_ring
[ring
].bnapi
->index
;
4039 req
.vnic_id
= cpu_to_le16(vnic
->fw_vnic_id
);
4040 req
.dflt_ring_grp
= cpu_to_le16(bp
->grp_info
[grp_idx
].fw_grp_id
);
4042 req
.lb_rule
= cpu_to_le16(0xffff);
4043 req
.mru
= cpu_to_le16(bp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+
4046 #ifdef CONFIG_BNXT_SRIOV
4048 def_vlan
= bp
->vf
.vlan
;
4050 if ((bp
->flags
& BNXT_FLAG_STRIP_VLAN
) || def_vlan
)
4051 req
.flags
|= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE
);
4052 if (!vnic_id
&& bnxt_ulp_registered(bp
->edev
, BNXT_ROCE_ULP
))
4054 cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE
);
4056 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4059 static int bnxt_hwrm_vnic_free_one(struct bnxt
*bp
, u16 vnic_id
)
4063 if (bp
->vnic_info
[vnic_id
].fw_vnic_id
!= INVALID_HW_RING_ID
) {
4064 struct hwrm_vnic_free_input req
= {0};
4066 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_FREE
, -1, -1);
4068 cpu_to_le32(bp
->vnic_info
[vnic_id
].fw_vnic_id
);
4070 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4073 bp
->vnic_info
[vnic_id
].fw_vnic_id
= INVALID_HW_RING_ID
;
4078 static void bnxt_hwrm_vnic_free(struct bnxt
*bp
)
4082 for (i
= 0; i
< bp
->nr_vnics
; i
++)
4083 bnxt_hwrm_vnic_free_one(bp
, i
);
4086 static int bnxt_hwrm_vnic_alloc(struct bnxt
*bp
, u16 vnic_id
,
4087 unsigned int start_rx_ring_idx
,
4088 unsigned int nr_rings
)
4091 unsigned int i
, j
, grp_idx
, end_idx
= start_rx_ring_idx
+ nr_rings
;
4092 struct hwrm_vnic_alloc_input req
= {0};
4093 struct hwrm_vnic_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4095 /* map ring groups to this vnic */
4096 for (i
= start_rx_ring_idx
, j
= 0; i
< end_idx
; i
++, j
++) {
4097 grp_idx
= bp
->rx_ring
[i
].bnapi
->index
;
4098 if (bp
->grp_info
[grp_idx
].fw_grp_id
== INVALID_HW_RING_ID
) {
4099 netdev_err(bp
->dev
, "Not enough ring groups avail:%x req:%x\n",
4103 bp
->vnic_info
[vnic_id
].fw_grp_ids
[j
] =
4104 bp
->grp_info
[grp_idx
].fw_grp_id
;
4107 bp
->vnic_info
[vnic_id
].fw_rss_cos_lb_ctx
[0] = INVALID_HW_RING_ID
;
4108 bp
->vnic_info
[vnic_id
].fw_rss_cos_lb_ctx
[1] = INVALID_HW_RING_ID
;
4110 req
.flags
= cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT
);
4112 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_ALLOC
, -1, -1);
4114 mutex_lock(&bp
->hwrm_cmd_lock
);
4115 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4117 bp
->vnic_info
[vnic_id
].fw_vnic_id
= le32_to_cpu(resp
->vnic_id
);
4118 mutex_unlock(&bp
->hwrm_cmd_lock
);
4122 static int bnxt_hwrm_vnic_qcaps(struct bnxt
*bp
)
4124 struct hwrm_vnic_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4125 struct hwrm_vnic_qcaps_input req
= {0};
4128 if (bp
->hwrm_spec_code
< 0x10600)
4131 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VNIC_QCAPS
, -1, -1);
4132 mutex_lock(&bp
->hwrm_cmd_lock
);
4133 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4136 cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP
))
4137 bp
->flags
|= BNXT_FLAG_NEW_RSS_CAP
;
4139 mutex_unlock(&bp
->hwrm_cmd_lock
);
4143 static int bnxt_hwrm_ring_grp_alloc(struct bnxt
*bp
)
4148 mutex_lock(&bp
->hwrm_cmd_lock
);
4149 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
4150 struct hwrm_ring_grp_alloc_input req
= {0};
4151 struct hwrm_ring_grp_alloc_output
*resp
=
4152 bp
->hwrm_cmd_resp_addr
;
4153 unsigned int grp_idx
= bp
->rx_ring
[i
].bnapi
->index
;
4155 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_RING_GRP_ALLOC
, -1, -1);
4157 req
.cr
= cpu_to_le16(bp
->grp_info
[grp_idx
].cp_fw_ring_id
);
4158 req
.rr
= cpu_to_le16(bp
->grp_info
[grp_idx
].rx_fw_ring_id
);
4159 req
.ar
= cpu_to_le16(bp
->grp_info
[grp_idx
].agg_fw_ring_id
);
4160 req
.sc
= cpu_to_le16(bp
->grp_info
[grp_idx
].fw_stats_ctx
);
4162 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
4167 bp
->grp_info
[grp_idx
].fw_grp_id
=
4168 le32_to_cpu(resp
->ring_group_id
);
4170 mutex_unlock(&bp
->hwrm_cmd_lock
);
4174 static int bnxt_hwrm_ring_grp_free(struct bnxt
*bp
)
4178 struct hwrm_ring_grp_free_input req
= {0};
4183 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_RING_GRP_FREE
, -1, -1);
4185 mutex_lock(&bp
->hwrm_cmd_lock
);
4186 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
4187 if (bp
->grp_info
[i
].fw_grp_id
== INVALID_HW_RING_ID
)
4190 cpu_to_le32(bp
->grp_info
[i
].fw_grp_id
);
4192 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
4196 bp
->grp_info
[i
].fw_grp_id
= INVALID_HW_RING_ID
;
4198 mutex_unlock(&bp
->hwrm_cmd_lock
);
4202 static int hwrm_ring_alloc_send_msg(struct bnxt
*bp
,
4203 struct bnxt_ring_struct
*ring
,
4204 u32 ring_type
, u32 map_index
,
4207 int rc
= 0, err
= 0;
4208 struct hwrm_ring_alloc_input req
= {0};
4209 struct hwrm_ring_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4212 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_RING_ALLOC
, -1, -1);
4215 if (ring
->nr_pages
> 1) {
4216 req
.page_tbl_addr
= cpu_to_le64(ring
->pg_tbl_map
);
4217 /* Page size is in log2 units */
4218 req
.page_size
= BNXT_PAGE_SHIFT
;
4219 req
.page_tbl_depth
= 1;
4221 req
.page_tbl_addr
= cpu_to_le64(ring
->dma_arr
[0]);
4224 /* Association of ring index with doorbell index and MSIX number */
4225 req
.logical_id
= cpu_to_le16(map_index
);
4227 switch (ring_type
) {
4228 case HWRM_RING_ALLOC_TX
:
4229 req
.ring_type
= RING_ALLOC_REQ_RING_TYPE_TX
;
4230 /* Association of transmit ring with completion ring */
4232 cpu_to_le16(bp
->grp_info
[map_index
].cp_fw_ring_id
);
4233 req
.length
= cpu_to_le32(bp
->tx_ring_mask
+ 1);
4234 req
.stat_ctx_id
= cpu_to_le32(stats_ctx_id
);
4235 req
.queue_id
= cpu_to_le16(ring
->queue_id
);
4237 case HWRM_RING_ALLOC_RX
:
4238 req
.ring_type
= RING_ALLOC_REQ_RING_TYPE_RX
;
4239 req
.length
= cpu_to_le32(bp
->rx_ring_mask
+ 1);
4241 case HWRM_RING_ALLOC_AGG
:
4242 req
.ring_type
= RING_ALLOC_REQ_RING_TYPE_RX
;
4243 req
.length
= cpu_to_le32(bp
->rx_agg_ring_mask
+ 1);
4245 case HWRM_RING_ALLOC_CMPL
:
4246 req
.ring_type
= RING_ALLOC_REQ_RING_TYPE_L2_CMPL
;
4247 req
.length
= cpu_to_le32(bp
->cp_ring_mask
+ 1);
4248 if (bp
->flags
& BNXT_FLAG_USING_MSIX
)
4249 req
.int_mode
= RING_ALLOC_REQ_INT_MODE_MSIX
;
4252 netdev_err(bp
->dev
, "hwrm alloc invalid ring type %d\n",
4257 mutex_lock(&bp
->hwrm_cmd_lock
);
4258 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4259 err
= le16_to_cpu(resp
->error_code
);
4260 ring_id
= le16_to_cpu(resp
->ring_id
);
4261 mutex_unlock(&bp
->hwrm_cmd_lock
);
4264 switch (ring_type
) {
4265 case RING_FREE_REQ_RING_TYPE_L2_CMPL
:
4266 netdev_err(bp
->dev
, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
4270 case RING_FREE_REQ_RING_TYPE_RX
:
4271 netdev_err(bp
->dev
, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
4275 case RING_FREE_REQ_RING_TYPE_TX
:
4276 netdev_err(bp
->dev
, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
4281 netdev_err(bp
->dev
, "Invalid ring\n");
4285 ring
->fw_ring_id
= ring_id
;
4289 static int bnxt_hwrm_set_async_event_cr(struct bnxt
*bp
, int idx
)
4294 struct hwrm_func_cfg_input req
= {0};
4296 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
4297 req
.fid
= cpu_to_le16(0xffff);
4298 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR
);
4299 req
.async_event_cr
= cpu_to_le16(idx
);
4300 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4302 struct hwrm_func_vf_cfg_input req
= {0};
4304 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_VF_CFG
, -1, -1);
4306 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR
);
4307 req
.async_event_cr
= cpu_to_le16(idx
);
4308 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4313 static int bnxt_hwrm_ring_alloc(struct bnxt
*bp
)
4317 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
4318 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
4319 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
4320 struct bnxt_ring_struct
*ring
= &cpr
->cp_ring_struct
;
4322 cpr
->cp_doorbell
= bp
->bar1
+ i
* 0x80;
4323 rc
= hwrm_ring_alloc_send_msg(bp
, ring
, HWRM_RING_ALLOC_CMPL
, i
,
4324 INVALID_STATS_CTX_ID
);
4327 BNXT_CP_DB(cpr
->cp_doorbell
, cpr
->cp_raw_cons
);
4328 bp
->grp_info
[i
].cp_fw_ring_id
= ring
->fw_ring_id
;
4331 rc
= bnxt_hwrm_set_async_event_cr(bp
, ring
->fw_ring_id
);
4333 netdev_warn(bp
->dev
, "Failed to set async event completion ring.\n");
4337 for (i
= 0; i
< bp
->tx_nr_rings
; i
++) {
4338 struct bnxt_tx_ring_info
*txr
= &bp
->tx_ring
[i
];
4339 struct bnxt_ring_struct
*ring
= &txr
->tx_ring_struct
;
4340 u32 map_idx
= txr
->bnapi
->index
;
4341 u16 fw_stats_ctx
= bp
->grp_info
[map_idx
].fw_stats_ctx
;
4343 rc
= hwrm_ring_alloc_send_msg(bp
, ring
, HWRM_RING_ALLOC_TX
,
4344 map_idx
, fw_stats_ctx
);
4347 txr
->tx_doorbell
= bp
->bar1
+ map_idx
* 0x80;
4350 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
4351 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[i
];
4352 struct bnxt_ring_struct
*ring
= &rxr
->rx_ring_struct
;
4353 u32 map_idx
= rxr
->bnapi
->index
;
4355 rc
= hwrm_ring_alloc_send_msg(bp
, ring
, HWRM_RING_ALLOC_RX
,
4356 map_idx
, INVALID_STATS_CTX_ID
);
4359 rxr
->rx_doorbell
= bp
->bar1
+ map_idx
* 0x80;
4360 writel(DB_KEY_RX
| rxr
->rx_prod
, rxr
->rx_doorbell
);
4361 bp
->grp_info
[map_idx
].rx_fw_ring_id
= ring
->fw_ring_id
;
4364 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
) {
4365 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
4366 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[i
];
4367 struct bnxt_ring_struct
*ring
=
4368 &rxr
->rx_agg_ring_struct
;
4369 u32 grp_idx
= rxr
->bnapi
->index
;
4370 u32 map_idx
= grp_idx
+ bp
->rx_nr_rings
;
4372 rc
= hwrm_ring_alloc_send_msg(bp
, ring
,
4373 HWRM_RING_ALLOC_AGG
,
4375 INVALID_STATS_CTX_ID
);
4379 rxr
->rx_agg_doorbell
= bp
->bar1
+ map_idx
* 0x80;
4380 writel(DB_KEY_RX
| rxr
->rx_agg_prod
,
4381 rxr
->rx_agg_doorbell
);
4382 bp
->grp_info
[grp_idx
].agg_fw_ring_id
= ring
->fw_ring_id
;
4389 static int hwrm_ring_free_send_msg(struct bnxt
*bp
,
4390 struct bnxt_ring_struct
*ring
,
4391 u32 ring_type
, int cmpl_ring_id
)
4394 struct hwrm_ring_free_input req
= {0};
4395 struct hwrm_ring_free_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4398 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_RING_FREE
, cmpl_ring_id
, -1);
4399 req
.ring_type
= ring_type
;
4400 req
.ring_id
= cpu_to_le16(ring
->fw_ring_id
);
4402 mutex_lock(&bp
->hwrm_cmd_lock
);
4403 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4404 error_code
= le16_to_cpu(resp
->error_code
);
4405 mutex_unlock(&bp
->hwrm_cmd_lock
);
4407 if (rc
|| error_code
) {
4408 switch (ring_type
) {
4409 case RING_FREE_REQ_RING_TYPE_L2_CMPL
:
4410 netdev_err(bp
->dev
, "hwrm_ring_free cp failed. rc:%d\n",
4413 case RING_FREE_REQ_RING_TYPE_RX
:
4414 netdev_err(bp
->dev
, "hwrm_ring_free rx failed. rc:%d\n",
4417 case RING_FREE_REQ_RING_TYPE_TX
:
4418 netdev_err(bp
->dev
, "hwrm_ring_free tx failed. rc:%d\n",
4422 netdev_err(bp
->dev
, "Invalid ring\n");
4429 static void bnxt_hwrm_ring_free(struct bnxt
*bp
, bool close_path
)
4436 for (i
= 0; i
< bp
->tx_nr_rings
; i
++) {
4437 struct bnxt_tx_ring_info
*txr
= &bp
->tx_ring
[i
];
4438 struct bnxt_ring_struct
*ring
= &txr
->tx_ring_struct
;
4439 u32 grp_idx
= txr
->bnapi
->index
;
4440 u32 cmpl_ring_id
= bp
->grp_info
[grp_idx
].cp_fw_ring_id
;
4442 if (ring
->fw_ring_id
!= INVALID_HW_RING_ID
) {
4443 hwrm_ring_free_send_msg(bp
, ring
,
4444 RING_FREE_REQ_RING_TYPE_TX
,
4445 close_path
? cmpl_ring_id
:
4446 INVALID_HW_RING_ID
);
4447 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
4451 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
4452 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[i
];
4453 struct bnxt_ring_struct
*ring
= &rxr
->rx_ring_struct
;
4454 u32 grp_idx
= rxr
->bnapi
->index
;
4455 u32 cmpl_ring_id
= bp
->grp_info
[grp_idx
].cp_fw_ring_id
;
4457 if (ring
->fw_ring_id
!= INVALID_HW_RING_ID
) {
4458 hwrm_ring_free_send_msg(bp
, ring
,
4459 RING_FREE_REQ_RING_TYPE_RX
,
4460 close_path
? cmpl_ring_id
:
4461 INVALID_HW_RING_ID
);
4462 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
4463 bp
->grp_info
[grp_idx
].rx_fw_ring_id
=
4468 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
4469 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[i
];
4470 struct bnxt_ring_struct
*ring
= &rxr
->rx_agg_ring_struct
;
4471 u32 grp_idx
= rxr
->bnapi
->index
;
4472 u32 cmpl_ring_id
= bp
->grp_info
[grp_idx
].cp_fw_ring_id
;
4474 if (ring
->fw_ring_id
!= INVALID_HW_RING_ID
) {
4475 hwrm_ring_free_send_msg(bp
, ring
,
4476 RING_FREE_REQ_RING_TYPE_RX
,
4477 close_path
? cmpl_ring_id
:
4478 INVALID_HW_RING_ID
);
4479 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
4480 bp
->grp_info
[grp_idx
].agg_fw_ring_id
=
4485 /* The completion rings are about to be freed. After that the
4486 * IRQ doorbell will not work anymore. So we need to disable
4489 bnxt_disable_int_sync(bp
);
4491 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
4492 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
4493 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
4494 struct bnxt_ring_struct
*ring
= &cpr
->cp_ring_struct
;
4496 if (ring
->fw_ring_id
!= INVALID_HW_RING_ID
) {
4497 hwrm_ring_free_send_msg(bp
, ring
,
4498 RING_FREE_REQ_RING_TYPE_L2_CMPL
,
4499 INVALID_HW_RING_ID
);
4500 ring
->fw_ring_id
= INVALID_HW_RING_ID
;
4501 bp
->grp_info
[i
].cp_fw_ring_id
= INVALID_HW_RING_ID
;
4506 static int bnxt_hwrm_get_rings(struct bnxt
*bp
)
4508 struct hwrm_func_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4509 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
4510 struct hwrm_func_qcfg_input req
= {0};
4513 if (bp
->hwrm_spec_code
< 0x10601)
4516 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_QCFG
, -1, -1);
4517 req
.fid
= cpu_to_le16(0xffff);
4518 mutex_lock(&bp
->hwrm_cmd_lock
);
4519 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4521 mutex_unlock(&bp
->hwrm_cmd_lock
);
4525 hw_resc
->resv_tx_rings
= le16_to_cpu(resp
->alloc_tx_rings
);
4526 if (bp
->flags
& BNXT_FLAG_NEW_RM
) {
4529 hw_resc
->resv_rx_rings
= le16_to_cpu(resp
->alloc_rx_rings
);
4530 hw_resc
->resv_hw_ring_grps
=
4531 le32_to_cpu(resp
->alloc_hw_ring_grps
);
4532 hw_resc
->resv_vnics
= le16_to_cpu(resp
->alloc_vnics
);
4533 cp
= le16_to_cpu(resp
->alloc_cmpl_rings
);
4534 stats
= le16_to_cpu(resp
->alloc_stat_ctx
);
4535 cp
= min_t(u16
, cp
, stats
);
4536 hw_resc
->resv_cp_rings
= cp
;
4538 mutex_unlock(&bp
->hwrm_cmd_lock
);
4542 /* Caller must hold bp->hwrm_cmd_lock */
4543 int __bnxt_hwrm_get_tx_rings(struct bnxt
*bp
, u16 fid
, int *tx_rings
)
4545 struct hwrm_func_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4546 struct hwrm_func_qcfg_input req
= {0};
4549 if (bp
->hwrm_spec_code
< 0x10601)
4552 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_QCFG
, -1, -1);
4553 req
.fid
= cpu_to_le16(fid
);
4554 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4556 *tx_rings
= le16_to_cpu(resp
->alloc_tx_rings
);
4562 __bnxt_hwrm_reserve_pf_rings(struct bnxt
*bp
, struct hwrm_func_cfg_input
*req
,
4563 int tx_rings
, int rx_rings
, int ring_grps
,
4564 int cp_rings
, int vnics
)
4568 bnxt_hwrm_cmd_hdr_init(bp
, req
, HWRM_FUNC_CFG
, -1, -1);
4569 req
->fid
= cpu_to_le16(0xffff);
4570 enables
|= tx_rings
? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS
: 0;
4571 req
->num_tx_rings
= cpu_to_le16(tx_rings
);
4572 if (bp
->flags
& BNXT_FLAG_NEW_RM
) {
4573 enables
|= rx_rings
? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS
: 0;
4574 enables
|= cp_rings
? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS
|
4575 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS
: 0;
4576 enables
|= ring_grps
?
4577 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS
: 0;
4578 enables
|= vnics
? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS
: 0;
4580 req
->num_rx_rings
= cpu_to_le16(rx_rings
);
4581 req
->num_hw_ring_grps
= cpu_to_le16(ring_grps
);
4582 req
->num_cmpl_rings
= cpu_to_le16(cp_rings
);
4583 req
->num_stat_ctxs
= req
->num_cmpl_rings
;
4584 req
->num_vnics
= cpu_to_le16(vnics
);
4586 req
->enables
= cpu_to_le32(enables
);
4590 __bnxt_hwrm_reserve_vf_rings(struct bnxt
*bp
,
4591 struct hwrm_func_vf_cfg_input
*req
, int tx_rings
,
4592 int rx_rings
, int ring_grps
, int cp_rings
,
4597 bnxt_hwrm_cmd_hdr_init(bp
, req
, HWRM_FUNC_VF_CFG
, -1, -1);
4598 enables
|= tx_rings
? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS
: 0;
4599 enables
|= rx_rings
? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS
: 0;
4600 enables
|= cp_rings
? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS
|
4601 FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS
: 0;
4602 enables
|= ring_grps
? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS
: 0;
4603 enables
|= vnics
? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS
: 0;
4605 req
->num_tx_rings
= cpu_to_le16(tx_rings
);
4606 req
->num_rx_rings
= cpu_to_le16(rx_rings
);
4607 req
->num_hw_ring_grps
= cpu_to_le16(ring_grps
);
4608 req
->num_cmpl_rings
= cpu_to_le16(cp_rings
);
4609 req
->num_stat_ctxs
= req
->num_cmpl_rings
;
4610 req
->num_vnics
= cpu_to_le16(vnics
);
4612 req
->enables
= cpu_to_le32(enables
);
4616 bnxt_hwrm_reserve_pf_rings(struct bnxt
*bp
, int tx_rings
, int rx_rings
,
4617 int ring_grps
, int cp_rings
, int vnics
)
4619 struct hwrm_func_cfg_input req
= {0};
4622 __bnxt_hwrm_reserve_pf_rings(bp
, &req
, tx_rings
, rx_rings
, ring_grps
,
4627 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4631 if (bp
->hwrm_spec_code
< 0x10601)
4632 bp
->hw_resc
.resv_tx_rings
= tx_rings
;
4634 rc
= bnxt_hwrm_get_rings(bp
);
4639 bnxt_hwrm_reserve_vf_rings(struct bnxt
*bp
, int tx_rings
, int rx_rings
,
4640 int ring_grps
, int cp_rings
, int vnics
)
4642 struct hwrm_func_vf_cfg_input req
= {0};
4645 if (!(bp
->flags
& BNXT_FLAG_NEW_RM
)) {
4646 bp
->hw_resc
.resv_tx_rings
= tx_rings
;
4650 __bnxt_hwrm_reserve_vf_rings(bp
, &req
, tx_rings
, rx_rings
, ring_grps
,
4652 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4656 rc
= bnxt_hwrm_get_rings(bp
);
4660 static int bnxt_hwrm_reserve_rings(struct bnxt
*bp
, int tx
, int rx
, int grp
,
4664 return bnxt_hwrm_reserve_pf_rings(bp
, tx
, rx
, grp
, cp
, vnic
);
4666 return bnxt_hwrm_reserve_vf_rings(bp
, tx
, rx
, grp
, cp
, vnic
);
4669 static int bnxt_trim_rings(struct bnxt
*bp
, int *rx
, int *tx
, int max
,
4672 static int __bnxt_reserve_rings(struct bnxt
*bp
)
4674 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
4675 int tx
= bp
->tx_nr_rings
;
4676 int rx
= bp
->rx_nr_rings
;
4677 int cp
= bp
->cp_nr_rings
;
4678 int grp
, rx_rings
, rc
;
4682 if (bp
->hwrm_spec_code
< 0x10601)
4685 if (bp
->flags
& BNXT_FLAG_SHARED_RINGS
)
4687 if (bp
->flags
& BNXT_FLAG_RFS
)
4689 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
4692 grp
= bp
->rx_nr_rings
;
4693 if (tx
== hw_resc
->resv_tx_rings
&&
4694 (!(bp
->flags
& BNXT_FLAG_NEW_RM
) ||
4695 (rx
== hw_resc
->resv_rx_rings
&&
4696 grp
== hw_resc
->resv_hw_ring_grps
&&
4697 cp
== hw_resc
->resv_cp_rings
&& vnic
== hw_resc
->resv_vnics
)))
4700 rc
= bnxt_hwrm_reserve_rings(bp
, tx
, rx
, grp
, cp
, vnic
);
4704 tx
= hw_resc
->resv_tx_rings
;
4705 if (bp
->flags
& BNXT_FLAG_NEW_RM
) {
4706 rx
= hw_resc
->resv_rx_rings
;
4707 cp
= hw_resc
->resv_cp_rings
;
4708 grp
= hw_resc
->resv_hw_ring_grps
;
4709 vnic
= hw_resc
->resv_vnics
;
4713 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
) {
4717 if (netif_running(bp
->dev
))
4720 bp
->flags
&= ~BNXT_FLAG_AGG_RINGS
;
4721 bp
->flags
|= BNXT_FLAG_NO_AGG_RINGS
;
4722 bp
->dev
->hw_features
&= ~NETIF_F_LRO
;
4723 bp
->dev
->features
&= ~NETIF_F_LRO
;
4724 bnxt_set_ring_params(bp
);
4727 rx_rings
= min_t(int, rx_rings
, grp
);
4728 rc
= bnxt_trim_rings(bp
, &rx_rings
, &tx
, cp
, sh
);
4729 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
4731 cp
= sh
? max_t(int, tx
, rx_rings
) : tx
+ rx_rings
;
4732 bp
->tx_nr_rings
= tx
;
4733 bp
->rx_nr_rings
= rx_rings
;
4734 bp
->cp_nr_rings
= cp
;
4736 if (!tx
|| !rx
|| !cp
|| !grp
|| !vnic
)
4742 static bool bnxt_need_reserve_rings(struct bnxt
*bp
)
4744 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
4745 int rx
= bp
->rx_nr_rings
;
4748 if (bp
->hwrm_spec_code
< 0x10601)
4751 if (hw_resc
->resv_tx_rings
!= bp
->tx_nr_rings
)
4754 if (bp
->flags
& BNXT_FLAG_RFS
)
4756 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
4758 if ((bp
->flags
& BNXT_FLAG_NEW_RM
) &&
4759 (hw_resc
->resv_rx_rings
!= rx
||
4760 hw_resc
->resv_cp_rings
!= bp
->cp_nr_rings
||
4761 hw_resc
->resv_vnics
!= vnic
))
4766 static int bnxt_hwrm_check_vf_rings(struct bnxt
*bp
, int tx_rings
, int rx_rings
,
4767 int ring_grps
, int cp_rings
, int vnics
)
4769 struct hwrm_func_vf_cfg_input req
= {0};
4773 if (!(bp
->flags
& BNXT_FLAG_NEW_RM
))
4776 __bnxt_hwrm_reserve_vf_rings(bp
, &req
, tx_rings
, rx_rings
, ring_grps
,
4778 flags
= FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST
|
4779 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST
|
4780 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST
|
4781 FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST
|
4782 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST
|
4783 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST
;
4785 req
.flags
= cpu_to_le32(flags
);
4786 rc
= hwrm_send_message_silent(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4792 static int bnxt_hwrm_check_pf_rings(struct bnxt
*bp
, int tx_rings
, int rx_rings
,
4793 int ring_grps
, int cp_rings
, int vnics
)
4795 struct hwrm_func_cfg_input req
= {0};
4799 __bnxt_hwrm_reserve_pf_rings(bp
, &req
, tx_rings
, rx_rings
, ring_grps
,
4801 flags
= FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST
;
4802 if (bp
->flags
& BNXT_FLAG_NEW_RM
)
4803 flags
|= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST
|
4804 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST
|
4805 FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST
|
4806 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST
|
4807 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST
;
4809 req
.flags
= cpu_to_le32(flags
);
4810 rc
= hwrm_send_message_silent(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
4816 static int bnxt_hwrm_check_rings(struct bnxt
*bp
, int tx_rings
, int rx_rings
,
4817 int ring_grps
, int cp_rings
, int vnics
)
4819 if (bp
->hwrm_spec_code
< 0x10801)
4823 return bnxt_hwrm_check_pf_rings(bp
, tx_rings
, rx_rings
,
4824 ring_grps
, cp_rings
, vnics
);
4826 return bnxt_hwrm_check_vf_rings(bp
, tx_rings
, rx_rings
, ring_grps
,
4830 static void bnxt_hwrm_set_coal_params(struct bnxt_coal
*hw_coal
,
4831 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input
*req
)
4833 u16 val
, tmr
, max
, flags
;
4835 max
= hw_coal
->bufs_per_record
* 128;
4836 if (hw_coal
->budget
)
4837 max
= hw_coal
->bufs_per_record
* hw_coal
->budget
;
4839 val
= clamp_t(u16
, hw_coal
->coal_bufs
, 1, max
);
4840 req
->num_cmpl_aggr_int
= cpu_to_le16(val
);
4842 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4843 val
= min_t(u16
, val
, 63);
4844 req
->num_cmpl_dma_aggr
= cpu_to_le16(val
);
4846 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4847 val
= clamp_t(u16
, hw_coal
->coal_bufs_irq
, 1, 63);
4848 req
->num_cmpl_dma_aggr_during_int
= cpu_to_le16(val
);
4850 tmr
= BNXT_USEC_TO_COAL_TIMER(hw_coal
->coal_ticks
);
4851 tmr
= max_t(u16
, tmr
, 1);
4852 req
->int_lat_tmr_max
= cpu_to_le16(tmr
);
4854 /* min timer set to 1/2 of interrupt timer */
4856 req
->int_lat_tmr_min
= cpu_to_le16(val
);
4858 /* buf timer set to 1/4 of interrupt timer */
4859 val
= max_t(u16
, tmr
/ 4, 1);
4860 req
->cmpl_aggr_dma_tmr
= cpu_to_le16(val
);
4862 tmr
= BNXT_USEC_TO_COAL_TIMER(hw_coal
->coal_ticks_irq
);
4863 tmr
= max_t(u16
, tmr
, 1);
4864 req
->cmpl_aggr_dma_tmr_during_int
= cpu_to_le16(tmr
);
4866 flags
= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET
;
4867 if (hw_coal
->idle_thresh
&& hw_coal
->coal_ticks
< hw_coal
->idle_thresh
)
4868 flags
|= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE
;
4869 req
->flags
= cpu_to_le16(flags
);
4872 int bnxt_hwrm_set_ring_coal(struct bnxt
*bp
, struct bnxt_napi
*bnapi
)
4874 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx
= {0};
4875 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
4876 struct bnxt_coal coal
;
4877 unsigned int grp_idx
;
4879 /* Tick values in micro seconds.
4880 * 1 coal_buf x bufs_per_record = 1 completion record.
4882 memcpy(&coal
, &bp
->rx_coal
, sizeof(struct bnxt_coal
));
4884 coal
.coal_ticks
= cpr
->rx_ring_coal
.coal_ticks
;
4885 coal
.coal_bufs
= cpr
->rx_ring_coal
.coal_bufs
;
4887 if (!bnapi
->rx_ring
)
4890 bnxt_hwrm_cmd_hdr_init(bp
, &req_rx
,
4891 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS
, -1, -1);
4893 bnxt_hwrm_set_coal_params(&coal
, &req_rx
);
4895 grp_idx
= bnapi
->index
;
4896 req_rx
.ring_id
= cpu_to_le16(bp
->grp_info
[grp_idx
].cp_fw_ring_id
);
4898 return hwrm_send_message(bp
, &req_rx
, sizeof(req_rx
),
4902 int bnxt_hwrm_set_coal(struct bnxt
*bp
)
4905 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx
= {0},
4908 bnxt_hwrm_cmd_hdr_init(bp
, &req_rx
,
4909 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS
, -1, -1);
4910 bnxt_hwrm_cmd_hdr_init(bp
, &req_tx
,
4911 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS
, -1, -1);
4913 bnxt_hwrm_set_coal_params(&bp
->rx_coal
, &req_rx
);
4914 bnxt_hwrm_set_coal_params(&bp
->tx_coal
, &req_tx
);
4916 mutex_lock(&bp
->hwrm_cmd_lock
);
4917 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
4918 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
4921 if (!bnapi
->rx_ring
)
4923 req
->ring_id
= cpu_to_le16(bp
->grp_info
[i
].cp_fw_ring_id
);
4925 rc
= _hwrm_send_message(bp
, req
, sizeof(*req
),
4930 mutex_unlock(&bp
->hwrm_cmd_lock
);
4934 static int bnxt_hwrm_stat_ctx_free(struct bnxt
*bp
)
4937 struct hwrm_stat_ctx_free_input req
= {0};
4942 if (BNXT_CHIP_TYPE_NITRO_A0(bp
))
4945 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_STAT_CTX_FREE
, -1, -1);
4947 mutex_lock(&bp
->hwrm_cmd_lock
);
4948 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
4949 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
4950 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
4952 if (cpr
->hw_stats_ctx_id
!= INVALID_STATS_CTX_ID
) {
4953 req
.stat_ctx_id
= cpu_to_le32(cpr
->hw_stats_ctx_id
);
4955 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
4960 cpr
->hw_stats_ctx_id
= INVALID_STATS_CTX_ID
;
4963 mutex_unlock(&bp
->hwrm_cmd_lock
);
4967 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt
*bp
)
4970 struct hwrm_stat_ctx_alloc_input req
= {0};
4971 struct hwrm_stat_ctx_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
4973 if (BNXT_CHIP_TYPE_NITRO_A0(bp
))
4976 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_STAT_CTX_ALLOC
, -1, -1);
4978 req
.update_period_ms
= cpu_to_le32(bp
->stats_coal_ticks
/ 1000);
4980 mutex_lock(&bp
->hwrm_cmd_lock
);
4981 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
4982 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
4983 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
4985 req
.stats_dma_addr
= cpu_to_le64(cpr
->hw_stats_map
);
4987 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
4992 cpr
->hw_stats_ctx_id
= le32_to_cpu(resp
->stat_ctx_id
);
4994 bp
->grp_info
[i
].fw_stats_ctx
= cpr
->hw_stats_ctx_id
;
4996 mutex_unlock(&bp
->hwrm_cmd_lock
);
5000 static int bnxt_hwrm_func_qcfg(struct bnxt
*bp
)
5002 struct hwrm_func_qcfg_input req
= {0};
5003 struct hwrm_func_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
5007 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_QCFG
, -1, -1);
5008 req
.fid
= cpu_to_le16(0xffff);
5009 mutex_lock(&bp
->hwrm_cmd_lock
);
5010 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5012 goto func_qcfg_exit
;
5014 #ifdef CONFIG_BNXT_SRIOV
5016 struct bnxt_vf_info
*vf
= &bp
->vf
;
5018 vf
->vlan
= le16_to_cpu(resp
->vlan
) & VLAN_VID_MASK
;
5021 flags
= le16_to_cpu(resp
->flags
);
5022 if (flags
& (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED
|
5023 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED
)) {
5024 bp
->flags
|= BNXT_FLAG_FW_LLDP_AGENT
;
5025 if (flags
& FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED
)
5026 bp
->flags
|= BNXT_FLAG_FW_DCBX_AGENT
;
5028 if (BNXT_PF(bp
) && (flags
& FUNC_QCFG_RESP_FLAGS_MULTI_HOST
))
5029 bp
->flags
|= BNXT_FLAG_MULTI_HOST
;
5031 switch (resp
->port_partition_type
) {
5032 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0
:
5033 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5
:
5034 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0
:
5035 bp
->port_partition_type
= resp
->port_partition_type
;
5038 if (bp
->hwrm_spec_code
< 0x10707 ||
5039 resp
->evb_mode
== FUNC_QCFG_RESP_EVB_MODE_VEB
)
5040 bp
->br_mode
= BRIDGE_MODE_VEB
;
5041 else if (resp
->evb_mode
== FUNC_QCFG_RESP_EVB_MODE_VEPA
)
5042 bp
->br_mode
= BRIDGE_MODE_VEPA
;
5044 bp
->br_mode
= BRIDGE_MODE_UNDEF
;
5046 bp
->max_mtu
= le16_to_cpu(resp
->max_mtu_configured
);
5048 bp
->max_mtu
= BNXT_MAX_MTU
;
5051 mutex_unlock(&bp
->hwrm_cmd_lock
);
5055 static int bnxt_hwrm_func_resc_qcaps(struct bnxt
*bp
)
5057 struct hwrm_func_resource_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
5058 struct hwrm_func_resource_qcaps_input req
= {0};
5059 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
5062 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_RESOURCE_QCAPS
, -1, -1);
5063 req
.fid
= cpu_to_le16(0xffff);
5065 mutex_lock(&bp
->hwrm_cmd_lock
);
5066 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5069 goto hwrm_func_resc_qcaps_exit
;
5072 hw_resc
->min_rsscos_ctxs
= le16_to_cpu(resp
->min_rsscos_ctx
);
5073 hw_resc
->max_rsscos_ctxs
= le16_to_cpu(resp
->max_rsscos_ctx
);
5074 hw_resc
->min_cp_rings
= le16_to_cpu(resp
->min_cmpl_rings
);
5075 hw_resc
->max_cp_rings
= le16_to_cpu(resp
->max_cmpl_rings
);
5076 hw_resc
->min_tx_rings
= le16_to_cpu(resp
->min_tx_rings
);
5077 hw_resc
->max_tx_rings
= le16_to_cpu(resp
->max_tx_rings
);
5078 hw_resc
->min_rx_rings
= le16_to_cpu(resp
->min_rx_rings
);
5079 hw_resc
->max_rx_rings
= le16_to_cpu(resp
->max_rx_rings
);
5080 hw_resc
->min_hw_ring_grps
= le16_to_cpu(resp
->min_hw_ring_grps
);
5081 hw_resc
->max_hw_ring_grps
= le16_to_cpu(resp
->max_hw_ring_grps
);
5082 hw_resc
->min_l2_ctxs
= le16_to_cpu(resp
->min_l2_ctxs
);
5083 hw_resc
->max_l2_ctxs
= le16_to_cpu(resp
->max_l2_ctxs
);
5084 hw_resc
->min_vnics
= le16_to_cpu(resp
->min_vnics
);
5085 hw_resc
->max_vnics
= le16_to_cpu(resp
->max_vnics
);
5086 hw_resc
->min_stat_ctxs
= le16_to_cpu(resp
->min_stat_ctx
);
5087 hw_resc
->max_stat_ctxs
= le16_to_cpu(resp
->max_stat_ctx
);
5090 struct bnxt_pf_info
*pf
= &bp
->pf
;
5092 pf
->vf_resv_strategy
=
5093 le16_to_cpu(resp
->vf_reservation_strategy
);
5094 if (pf
->vf_resv_strategy
> BNXT_VF_RESV_STRATEGY_MINIMAL
)
5095 pf
->vf_resv_strategy
= BNXT_VF_RESV_STRATEGY_MAXIMAL
;
5097 hwrm_func_resc_qcaps_exit
:
5098 mutex_unlock(&bp
->hwrm_cmd_lock
);
5102 static int __bnxt_hwrm_func_qcaps(struct bnxt
*bp
)
5105 struct hwrm_func_qcaps_input req
= {0};
5106 struct hwrm_func_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
5107 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
5110 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_QCAPS
, -1, -1);
5111 req
.fid
= cpu_to_le16(0xffff);
5113 mutex_lock(&bp
->hwrm_cmd_lock
);
5114 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5116 goto hwrm_func_qcaps_exit
;
5118 flags
= le32_to_cpu(resp
->flags
);
5119 if (flags
& FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED
)
5120 bp
->flags
|= BNXT_FLAG_ROCEV1_CAP
;
5121 if (flags
& FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED
)
5122 bp
->flags
|= BNXT_FLAG_ROCEV2_CAP
;
5124 bp
->tx_push_thresh
= 0;
5125 if (flags
& FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED
)
5126 bp
->tx_push_thresh
= BNXT_TX_PUSH_THRESH
;
5128 hw_resc
->max_rsscos_ctxs
= le16_to_cpu(resp
->max_rsscos_ctx
);
5129 hw_resc
->max_cp_rings
= le16_to_cpu(resp
->max_cmpl_rings
);
5130 hw_resc
->max_tx_rings
= le16_to_cpu(resp
->max_tx_rings
);
5131 hw_resc
->max_rx_rings
= le16_to_cpu(resp
->max_rx_rings
);
5132 hw_resc
->max_hw_ring_grps
= le32_to_cpu(resp
->max_hw_ring_grps
);
5133 if (!hw_resc
->max_hw_ring_grps
)
5134 hw_resc
->max_hw_ring_grps
= hw_resc
->max_tx_rings
;
5135 hw_resc
->max_l2_ctxs
= le16_to_cpu(resp
->max_l2_ctxs
);
5136 hw_resc
->max_vnics
= le16_to_cpu(resp
->max_vnics
);
5137 hw_resc
->max_stat_ctxs
= le16_to_cpu(resp
->max_stat_ctx
);
5140 struct bnxt_pf_info
*pf
= &bp
->pf
;
5142 pf
->fw_fid
= le16_to_cpu(resp
->fid
);
5143 pf
->port_id
= le16_to_cpu(resp
->port_id
);
5144 bp
->dev
->dev_port
= pf
->port_id
;
5145 memcpy(pf
->mac_addr
, resp
->mac_address
, ETH_ALEN
);
5146 pf
->first_vf_id
= le16_to_cpu(resp
->first_vf_id
);
5147 pf
->max_vfs
= le16_to_cpu(resp
->max_vfs
);
5148 pf
->max_encap_records
= le32_to_cpu(resp
->max_encap_records
);
5149 pf
->max_decap_records
= le32_to_cpu(resp
->max_decap_records
);
5150 pf
->max_tx_em_flows
= le32_to_cpu(resp
->max_tx_em_flows
);
5151 pf
->max_tx_wm_flows
= le32_to_cpu(resp
->max_tx_wm_flows
);
5152 pf
->max_rx_em_flows
= le32_to_cpu(resp
->max_rx_em_flows
);
5153 pf
->max_rx_wm_flows
= le32_to_cpu(resp
->max_rx_wm_flows
);
5154 if (flags
& FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED
)
5155 bp
->flags
|= BNXT_FLAG_WOL_CAP
;
5157 #ifdef CONFIG_BNXT_SRIOV
5158 struct bnxt_vf_info
*vf
= &bp
->vf
;
5160 vf
->fw_fid
= le16_to_cpu(resp
->fid
);
5161 memcpy(vf
->mac_addr
, resp
->mac_address
, ETH_ALEN
);
5165 hwrm_func_qcaps_exit
:
5166 mutex_unlock(&bp
->hwrm_cmd_lock
);
5170 static int bnxt_hwrm_func_qcaps(struct bnxt
*bp
)
5174 rc
= __bnxt_hwrm_func_qcaps(bp
);
5177 if (bp
->hwrm_spec_code
>= 0x10803) {
5178 rc
= bnxt_hwrm_func_resc_qcaps(bp
);
5180 bp
->flags
|= BNXT_FLAG_NEW_RM
;
5185 static int bnxt_hwrm_func_reset(struct bnxt
*bp
)
5187 struct hwrm_func_reset_input req
= {0};
5189 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_RESET
, -1, -1);
5192 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_RESET_TIMEOUT
);
5195 static int bnxt_hwrm_queue_qportcfg(struct bnxt
*bp
)
5198 struct hwrm_queue_qportcfg_input req
= {0};
5199 struct hwrm_queue_qportcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
5202 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_QUEUE_QPORTCFG
, -1, -1);
5204 mutex_lock(&bp
->hwrm_cmd_lock
);
5205 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5209 if (!resp
->max_configurable_queues
) {
5213 bp
->max_tc
= resp
->max_configurable_queues
;
5214 bp
->max_lltc
= resp
->max_configurable_lossless_queues
;
5215 if (bp
->max_tc
> BNXT_MAX_QUEUE
)
5216 bp
->max_tc
= BNXT_MAX_QUEUE
;
5218 if (resp
->queue_cfg_info
& QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG
)
5221 if (bp
->max_lltc
> bp
->max_tc
)
5222 bp
->max_lltc
= bp
->max_tc
;
5224 qptr
= &resp
->queue_id0
;
5225 for (i
= 0; i
< bp
->max_tc
; i
++) {
5226 bp
->q_info
[i
].queue_id
= *qptr
++;
5227 bp
->q_info
[i
].queue_profile
= *qptr
++;
5231 mutex_unlock(&bp
->hwrm_cmd_lock
);
5235 static int bnxt_hwrm_ver_get(struct bnxt
*bp
)
5238 struct hwrm_ver_get_input req
= {0};
5239 struct hwrm_ver_get_output
*resp
= bp
->hwrm_cmd_resp_addr
;
5242 bp
->hwrm_max_req_len
= HWRM_MAX_REQ_LEN
;
5243 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_VER_GET
, -1, -1);
5244 req
.hwrm_intf_maj
= HWRM_VERSION_MAJOR
;
5245 req
.hwrm_intf_min
= HWRM_VERSION_MINOR
;
5246 req
.hwrm_intf_upd
= HWRM_VERSION_UPDATE
;
5247 mutex_lock(&bp
->hwrm_cmd_lock
);
5248 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5250 goto hwrm_ver_get_exit
;
5252 memcpy(&bp
->ver_resp
, resp
, sizeof(struct hwrm_ver_get_output
));
5254 bp
->hwrm_spec_code
= resp
->hwrm_intf_maj_8b
<< 16 |
5255 resp
->hwrm_intf_min_8b
<< 8 |
5256 resp
->hwrm_intf_upd_8b
;
5257 if (resp
->hwrm_intf_maj_8b
< 1) {
5258 netdev_warn(bp
->dev
, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
5259 resp
->hwrm_intf_maj_8b
, resp
->hwrm_intf_min_8b
,
5260 resp
->hwrm_intf_upd_8b
);
5261 netdev_warn(bp
->dev
, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
5263 snprintf(bp
->fw_ver_str
, BC_HWRM_STR_LEN
, "%d.%d.%d.%d",
5264 resp
->hwrm_fw_maj_8b
, resp
->hwrm_fw_min_8b
,
5265 resp
->hwrm_fw_bld_8b
, resp
->hwrm_fw_rsvd_8b
);
5267 bp
->hwrm_cmd_timeout
= le16_to_cpu(resp
->def_req_timeout
);
5268 if (!bp
->hwrm_cmd_timeout
)
5269 bp
->hwrm_cmd_timeout
= DFLT_HWRM_CMD_TIMEOUT
;
5271 if (resp
->hwrm_intf_maj_8b
>= 1)
5272 bp
->hwrm_max_req_len
= le16_to_cpu(resp
->max_req_win_len
);
5274 bp
->chip_num
= le16_to_cpu(resp
->chip_num
);
5275 if (bp
->chip_num
== CHIP_NUM_58700
&& !resp
->chip_rev
&&
5277 bp
->flags
|= BNXT_FLAG_CHIP_NITRO_A0
;
5279 dev_caps_cfg
= le32_to_cpu(resp
->dev_caps_cfg
);
5280 if ((dev_caps_cfg
& VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED
) &&
5281 (dev_caps_cfg
& VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED
))
5282 bp
->flags
|= BNXT_FLAG_SHORT_CMD
;
5285 mutex_unlock(&bp
->hwrm_cmd_lock
);
5289 int bnxt_hwrm_fw_set_time(struct bnxt
*bp
)
5291 struct hwrm_fw_set_time_input req
= {0};
5293 time64_t now
= ktime_get_real_seconds();
5295 if (bp
->hwrm_spec_code
< 0x10400)
5298 time64_to_tm(now
, 0, &tm
);
5299 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FW_SET_TIME
, -1, -1);
5300 req
.year
= cpu_to_le16(1900 + tm
.tm_year
);
5301 req
.month
= 1 + tm
.tm_mon
;
5302 req
.day
= tm
.tm_mday
;
5303 req
.hour
= tm
.tm_hour
;
5304 req
.minute
= tm
.tm_min
;
5305 req
.second
= tm
.tm_sec
;
5306 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5309 static int bnxt_hwrm_port_qstats(struct bnxt
*bp
)
5312 struct bnxt_pf_info
*pf
= &bp
->pf
;
5313 struct hwrm_port_qstats_input req
= {0};
5315 if (!(bp
->flags
& BNXT_FLAG_PORT_STATS
))
5318 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_QSTATS
, -1, -1);
5319 req
.port_id
= cpu_to_le16(pf
->port_id
);
5320 req
.tx_stat_host_addr
= cpu_to_le64(bp
->hw_tx_port_stats_map
);
5321 req
.rx_stat_host_addr
= cpu_to_le64(bp
->hw_rx_port_stats_map
);
5322 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5326 static void bnxt_hwrm_free_tunnel_ports(struct bnxt
*bp
)
5328 if (bp
->vxlan_port_cnt
) {
5329 bnxt_hwrm_tunnel_dst_port_free(
5330 bp
, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN
);
5332 bp
->vxlan_port_cnt
= 0;
5333 if (bp
->nge_port_cnt
) {
5334 bnxt_hwrm_tunnel_dst_port_free(
5335 bp
, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE
);
5337 bp
->nge_port_cnt
= 0;
5340 static int bnxt_set_tpa(struct bnxt
*bp
, bool set_tpa
)
5346 tpa_flags
= bp
->flags
& BNXT_FLAG_TPA
;
5347 for (i
= 0; i
< bp
->nr_vnics
; i
++) {
5348 rc
= bnxt_hwrm_vnic_set_tpa(bp
, i
, tpa_flags
);
5350 netdev_err(bp
->dev
, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
5358 static void bnxt_hwrm_clear_vnic_rss(struct bnxt
*bp
)
5362 for (i
= 0; i
< bp
->nr_vnics
; i
++)
5363 bnxt_hwrm_vnic_set_rss(bp
, i
, false);
5366 static void bnxt_hwrm_resource_free(struct bnxt
*bp
, bool close_path
,
5369 if (bp
->vnic_info
) {
5370 bnxt_hwrm_clear_vnic_filter(bp
);
5371 /* clear all RSS setting before free vnic ctx */
5372 bnxt_hwrm_clear_vnic_rss(bp
);
5373 bnxt_hwrm_vnic_ctx_free(bp
);
5374 /* before free the vnic, undo the vnic tpa settings */
5375 if (bp
->flags
& BNXT_FLAG_TPA
)
5376 bnxt_set_tpa(bp
, false);
5377 bnxt_hwrm_vnic_free(bp
);
5379 bnxt_hwrm_ring_free(bp
, close_path
);
5380 bnxt_hwrm_ring_grp_free(bp
);
5382 bnxt_hwrm_stat_ctx_free(bp
);
5383 bnxt_hwrm_free_tunnel_ports(bp
);
5387 static int bnxt_hwrm_set_br_mode(struct bnxt
*bp
, u16 br_mode
)
5389 struct hwrm_func_cfg_input req
= {0};
5392 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
5393 req
.fid
= cpu_to_le16(0xffff);
5394 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE
);
5395 if (br_mode
== BRIDGE_MODE_VEB
)
5396 req
.evb_mode
= FUNC_CFG_REQ_EVB_MODE_VEB
;
5397 else if (br_mode
== BRIDGE_MODE_VEPA
)
5398 req
.evb_mode
= FUNC_CFG_REQ_EVB_MODE_VEPA
;
5401 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5407 static int bnxt_hwrm_set_cache_line_size(struct bnxt
*bp
, int size
)
5409 struct hwrm_func_cfg_input req
= {0};
5412 if (BNXT_VF(bp
) || bp
->hwrm_spec_code
< 0x10803)
5415 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FUNC_CFG
, -1, -1);
5416 req
.fid
= cpu_to_le16(0xffff);
5417 req
.enables
= cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE
);
5418 req
.cache_linesize
= FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64
;
5420 req
.cache_linesize
=
5421 FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128
;
5423 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
5429 static int bnxt_setup_vnic(struct bnxt
*bp
, u16 vnic_id
)
5431 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[vnic_id
];
5434 if (vnic
->flags
& BNXT_VNIC_RFS_NEW_RSS_FLAG
)
5437 /* allocate context for vnic */
5438 rc
= bnxt_hwrm_vnic_ctx_alloc(bp
, vnic_id
, 0);
5440 netdev_err(bp
->dev
, "hwrm vnic %d alloc failure rc: %x\n",
5442 goto vnic_setup_err
;
5444 bp
->rsscos_nr_ctxs
++;
5446 if (BNXT_CHIP_TYPE_NITRO_A0(bp
)) {
5447 rc
= bnxt_hwrm_vnic_ctx_alloc(bp
, vnic_id
, 1);
5449 netdev_err(bp
->dev
, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
5451 goto vnic_setup_err
;
5453 bp
->rsscos_nr_ctxs
++;
5457 /* configure default vnic, ring grp */
5458 rc
= bnxt_hwrm_vnic_cfg(bp
, vnic_id
);
5460 netdev_err(bp
->dev
, "hwrm vnic %d cfg failure rc: %x\n",
5462 goto vnic_setup_err
;
5465 /* Enable RSS hashing on vnic */
5466 rc
= bnxt_hwrm_vnic_set_rss(bp
, vnic_id
, true);
5468 netdev_err(bp
->dev
, "hwrm vnic %d set rss failure rc: %x\n",
5470 goto vnic_setup_err
;
5473 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
) {
5474 rc
= bnxt_hwrm_vnic_set_hds(bp
, vnic_id
);
5476 netdev_err(bp
->dev
, "hwrm vnic %d set hds failure rc: %x\n",
5485 static int bnxt_alloc_rfs_vnics(struct bnxt
*bp
)
5487 #ifdef CONFIG_RFS_ACCEL
5490 for (i
= 0; i
< bp
->rx_nr_rings
; i
++) {
5491 struct bnxt_vnic_info
*vnic
;
5492 u16 vnic_id
= i
+ 1;
5495 if (vnic_id
>= bp
->nr_vnics
)
5498 vnic
= &bp
->vnic_info
[vnic_id
];
5499 vnic
->flags
|= BNXT_VNIC_RFS_FLAG
;
5500 if (bp
->flags
& BNXT_FLAG_NEW_RSS_CAP
)
5501 vnic
->flags
|= BNXT_VNIC_RFS_NEW_RSS_FLAG
;
5502 rc
= bnxt_hwrm_vnic_alloc(bp
, vnic_id
, ring_id
, 1);
5504 netdev_err(bp
->dev
, "hwrm vnic %d alloc failure rc: %x\n",
5508 rc
= bnxt_setup_vnic(bp
, vnic_id
);
5518 /* Allow PF and VF with default VLAN to be in promiscuous mode */
5519 static bool bnxt_promisc_ok(struct bnxt
*bp
)
5521 #ifdef CONFIG_BNXT_SRIOV
5522 if (BNXT_VF(bp
) && !bp
->vf
.vlan
)
5528 static int bnxt_setup_nitroa0_vnic(struct bnxt
*bp
)
5530 unsigned int rc
= 0;
5532 rc
= bnxt_hwrm_vnic_alloc(bp
, 1, bp
->rx_nr_rings
- 1, 1);
5534 netdev_err(bp
->dev
, "Cannot allocate special vnic for NS2 A0: %x\n",
5539 rc
= bnxt_hwrm_vnic_cfg(bp
, 1);
5541 netdev_err(bp
->dev
, "Cannot allocate special vnic for NS2 A0: %x\n",
5548 static int bnxt_cfg_rx_mode(struct bnxt
*);
5549 static bool bnxt_mc_list_updated(struct bnxt
*, u32
*);
5551 static int bnxt_init_chip(struct bnxt
*bp
, bool irq_re_init
)
5553 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[0];
5555 unsigned int rx_nr_rings
= bp
->rx_nr_rings
;
5558 rc
= bnxt_hwrm_stat_ctx_alloc(bp
);
5560 netdev_err(bp
->dev
, "hwrm stat ctx alloc failure rc: %x\n",
5566 rc
= bnxt_hwrm_ring_alloc(bp
);
5568 netdev_err(bp
->dev
, "hwrm ring alloc failure rc: %x\n", rc
);
5572 rc
= bnxt_hwrm_ring_grp_alloc(bp
);
5574 netdev_err(bp
->dev
, "hwrm_ring_grp alloc failure: %x\n", rc
);
5578 if (BNXT_CHIP_TYPE_NITRO_A0(bp
))
5581 /* default vnic 0 */
5582 rc
= bnxt_hwrm_vnic_alloc(bp
, 0, 0, rx_nr_rings
);
5584 netdev_err(bp
->dev
, "hwrm vnic alloc failure rc: %x\n", rc
);
5588 rc
= bnxt_setup_vnic(bp
, 0);
5592 if (bp
->flags
& BNXT_FLAG_RFS
) {
5593 rc
= bnxt_alloc_rfs_vnics(bp
);
5598 if (bp
->flags
& BNXT_FLAG_TPA
) {
5599 rc
= bnxt_set_tpa(bp
, true);
5605 bnxt_update_vf_mac(bp
);
5607 /* Filter for default vnic 0 */
5608 rc
= bnxt_hwrm_set_vnic_filter(bp
, 0, 0, bp
->dev
->dev_addr
);
5610 netdev_err(bp
->dev
, "HWRM vnic filter failure rc: %x\n", rc
);
5613 vnic
->uc_filter_count
= 1;
5615 vnic
->rx_mask
= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST
;
5617 if ((bp
->dev
->flags
& IFF_PROMISC
) && bnxt_promisc_ok(bp
))
5618 vnic
->rx_mask
|= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS
;
5620 if (bp
->dev
->flags
& IFF_ALLMULTI
) {
5621 vnic
->rx_mask
|= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST
;
5622 vnic
->mc_list_count
= 0;
5626 bnxt_mc_list_updated(bp
, &mask
);
5627 vnic
->rx_mask
|= mask
;
5630 rc
= bnxt_cfg_rx_mode(bp
);
5634 rc
= bnxt_hwrm_set_coal(bp
);
5636 netdev_warn(bp
->dev
, "HWRM set coalescing failure rc: %x\n",
5639 if (BNXT_CHIP_TYPE_NITRO_A0(bp
)) {
5640 rc
= bnxt_setup_nitroa0_vnic(bp
);
5642 netdev_err(bp
->dev
, "Special vnic setup failure for NS2 A0 rc: %x\n",
5647 bnxt_hwrm_func_qcfg(bp
);
5648 netdev_update_features(bp
->dev
);
5654 bnxt_hwrm_resource_free(bp
, 0, true);
5659 static int bnxt_shutdown_nic(struct bnxt
*bp
, bool irq_re_init
)
5661 bnxt_hwrm_resource_free(bp
, 1, irq_re_init
);
5665 static int bnxt_init_nic(struct bnxt
*bp
, bool irq_re_init
)
5667 bnxt_init_cp_rings(bp
);
5668 bnxt_init_rx_rings(bp
);
5669 bnxt_init_tx_rings(bp
);
5670 bnxt_init_ring_grps(bp
, irq_re_init
);
5671 bnxt_init_vnics(bp
);
5673 return bnxt_init_chip(bp
, irq_re_init
);
5676 static int bnxt_set_real_num_queues(struct bnxt
*bp
)
5679 struct net_device
*dev
= bp
->dev
;
5681 rc
= netif_set_real_num_tx_queues(dev
, bp
->tx_nr_rings
-
5682 bp
->tx_nr_rings_xdp
);
5686 rc
= netif_set_real_num_rx_queues(dev
, bp
->rx_nr_rings
);
5690 #ifdef CONFIG_RFS_ACCEL
5691 if (bp
->flags
& BNXT_FLAG_RFS
)
5692 dev
->rx_cpu_rmap
= alloc_irq_cpu_rmap(bp
->rx_nr_rings
);
5698 static int bnxt_trim_rings(struct bnxt
*bp
, int *rx
, int *tx
, int max
,
5701 int _rx
= *rx
, _tx
= *tx
;
5704 *rx
= min_t(int, _rx
, max
);
5705 *tx
= min_t(int, _tx
, max
);
5710 while (_rx
+ _tx
> max
) {
5711 if (_rx
> _tx
&& _rx
> 1)
5722 static void bnxt_setup_msix(struct bnxt
*bp
)
5724 const int len
= sizeof(bp
->irq_tbl
[0].name
);
5725 struct net_device
*dev
= bp
->dev
;
5728 tcs
= netdev_get_num_tc(dev
);
5732 for (i
= 0; i
< tcs
; i
++) {
5733 count
= bp
->tx_nr_rings_per_tc
;
5735 netdev_set_tc_queue(dev
, i
, count
, off
);
5739 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
5742 if (bp
->flags
& BNXT_FLAG_SHARED_RINGS
)
5744 else if (i
< bp
->rx_nr_rings
)
5749 snprintf(bp
->irq_tbl
[i
].name
, len
, "%s-%s-%d", dev
->name
, attr
,
5751 bp
->irq_tbl
[i
].handler
= bnxt_msix
;
5755 static void bnxt_setup_inta(struct bnxt
*bp
)
5757 const int len
= sizeof(bp
->irq_tbl
[0].name
);
5759 if (netdev_get_num_tc(bp
->dev
))
5760 netdev_reset_tc(bp
->dev
);
5762 snprintf(bp
->irq_tbl
[0].name
, len
, "%s-%s-%d", bp
->dev
->name
, "TxRx",
5764 bp
->irq_tbl
[0].handler
= bnxt_inta
;
5767 static int bnxt_setup_int_mode(struct bnxt
*bp
)
5771 if (bp
->flags
& BNXT_FLAG_USING_MSIX
)
5772 bnxt_setup_msix(bp
);
5774 bnxt_setup_inta(bp
);
5776 rc
= bnxt_set_real_num_queues(bp
);
5780 #ifdef CONFIG_RFS_ACCEL
5781 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt
*bp
)
5783 return bp
->hw_resc
.max_rsscos_ctxs
;
5786 static unsigned int bnxt_get_max_func_vnics(struct bnxt
*bp
)
5788 return bp
->hw_resc
.max_vnics
;
5792 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt
*bp
)
5794 return bp
->hw_resc
.max_stat_ctxs
;
5797 void bnxt_set_max_func_stat_ctxs(struct bnxt
*bp
, unsigned int max
)
5799 bp
->hw_resc
.max_stat_ctxs
= max
;
5802 unsigned int bnxt_get_max_func_cp_rings(struct bnxt
*bp
)
5804 return bp
->hw_resc
.max_cp_rings
;
5807 void bnxt_set_max_func_cp_rings(struct bnxt
*bp
, unsigned int max
)
5809 bp
->hw_resc
.max_cp_rings
= max
;
5812 static unsigned int bnxt_get_max_func_irqs(struct bnxt
*bp
)
5814 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
5816 return min_t(unsigned int, hw_resc
->max_irqs
, hw_resc
->max_cp_rings
);
5819 void bnxt_set_max_func_irqs(struct bnxt
*bp
, unsigned int max_irqs
)
5821 bp
->hw_resc
.max_irqs
= max_irqs
;
5824 static int bnxt_init_msix(struct bnxt
*bp
)
5826 int i
, total_vecs
, rc
= 0, min
= 1;
5827 struct msix_entry
*msix_ent
;
5829 total_vecs
= bnxt_get_max_func_irqs(bp
);
5830 msix_ent
= kcalloc(total_vecs
, sizeof(struct msix_entry
), GFP_KERNEL
);
5834 for (i
= 0; i
< total_vecs
; i
++) {
5835 msix_ent
[i
].entry
= i
;
5836 msix_ent
[i
].vector
= 0;
5839 if (!(bp
->flags
& BNXT_FLAG_SHARED_RINGS
))
5842 total_vecs
= pci_enable_msix_range(bp
->pdev
, msix_ent
, min
, total_vecs
);
5843 if (total_vecs
< 0) {
5845 goto msix_setup_exit
;
5848 bp
->irq_tbl
= kcalloc(total_vecs
, sizeof(struct bnxt_irq
), GFP_KERNEL
);
5850 for (i
= 0; i
< total_vecs
; i
++)
5851 bp
->irq_tbl
[i
].vector
= msix_ent
[i
].vector
;
5853 bp
->total_irqs
= total_vecs
;
5854 /* Trim rings based upon num of vectors allocated */
5855 rc
= bnxt_trim_rings(bp
, &bp
->rx_nr_rings
, &bp
->tx_nr_rings
,
5856 total_vecs
, min
== 1);
5858 goto msix_setup_exit
;
5860 bp
->cp_nr_rings
= (min
== 1) ?
5861 max_t(int, bp
->tx_nr_rings
, bp
->rx_nr_rings
) :
5862 bp
->tx_nr_rings
+ bp
->rx_nr_rings
;
5866 goto msix_setup_exit
;
5868 bp
->flags
|= BNXT_FLAG_USING_MSIX
;
5873 netdev_err(bp
->dev
, "bnxt_init_msix err: %x\n", rc
);
5876 pci_disable_msix(bp
->pdev
);
5881 static int bnxt_init_inta(struct bnxt
*bp
)
5883 bp
->irq_tbl
= kcalloc(1, sizeof(struct bnxt_irq
), GFP_KERNEL
);
5888 bp
->rx_nr_rings
= 1;
5889 bp
->tx_nr_rings
= 1;
5890 bp
->cp_nr_rings
= 1;
5891 bp
->flags
|= BNXT_FLAG_SHARED_RINGS
;
5892 bp
->irq_tbl
[0].vector
= bp
->pdev
->irq
;
5896 static int bnxt_init_int_mode(struct bnxt
*bp
)
5900 if (bp
->flags
& BNXT_FLAG_MSIX_CAP
)
5901 rc
= bnxt_init_msix(bp
);
5903 if (!(bp
->flags
& BNXT_FLAG_USING_MSIX
) && BNXT_PF(bp
)) {
5904 /* fallback to INTA */
5905 rc
= bnxt_init_inta(bp
);
5910 static void bnxt_clear_int_mode(struct bnxt
*bp
)
5912 if (bp
->flags
& BNXT_FLAG_USING_MSIX
)
5913 pci_disable_msix(bp
->pdev
);
5917 bp
->flags
&= ~BNXT_FLAG_USING_MSIX
;
5920 static int bnxt_reserve_rings(struct bnxt
*bp
)
5922 int orig_cp
= bp
->hw_resc
.resv_cp_rings
;
5923 int tcs
= netdev_get_num_tc(bp
->dev
);
5926 if (!bnxt_need_reserve_rings(bp
))
5929 rc
= __bnxt_reserve_rings(bp
);
5931 netdev_err(bp
->dev
, "ring reservation failure rc: %d\n", rc
);
5934 if ((bp
->flags
& BNXT_FLAG_NEW_RM
) && bp
->cp_nr_rings
> orig_cp
) {
5935 bnxt_clear_int_mode(bp
);
5936 rc
= bnxt_init_int_mode(bp
);
5940 if (tcs
&& (bp
->tx_nr_rings_per_tc
* tcs
!= bp
->tx_nr_rings
)) {
5941 netdev_err(bp
->dev
, "tx ring reservation failure\n");
5942 netdev_reset_tc(bp
->dev
);
5943 bp
->tx_nr_rings_per_tc
= bp
->tx_nr_rings
;
5946 bp
->num_stat_ctxs
= bp
->cp_nr_rings
;
5950 static void bnxt_free_irq(struct bnxt
*bp
)
5952 struct bnxt_irq
*irq
;
5955 #ifdef CONFIG_RFS_ACCEL
5956 free_irq_cpu_rmap(bp
->dev
->rx_cpu_rmap
);
5957 bp
->dev
->rx_cpu_rmap
= NULL
;
5962 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
5963 irq
= &bp
->irq_tbl
[i
];
5964 if (irq
->requested
) {
5965 if (irq
->have_cpumask
) {
5966 irq_set_affinity_hint(irq
->vector
, NULL
);
5967 free_cpumask_var(irq
->cpu_mask
);
5968 irq
->have_cpumask
= 0;
5970 free_irq(irq
->vector
, bp
->bnapi
[i
]);
5977 static int bnxt_request_irq(struct bnxt
*bp
)
5980 unsigned long flags
= 0;
5981 #ifdef CONFIG_RFS_ACCEL
5982 struct cpu_rmap
*rmap
= bp
->dev
->rx_cpu_rmap
;
5985 if (!(bp
->flags
& BNXT_FLAG_USING_MSIX
))
5986 flags
= IRQF_SHARED
;
5988 for (i
= 0, j
= 0; i
< bp
->cp_nr_rings
; i
++) {
5989 struct bnxt_irq
*irq
= &bp
->irq_tbl
[i
];
5990 #ifdef CONFIG_RFS_ACCEL
5991 if (rmap
&& bp
->bnapi
[i
]->rx_ring
) {
5992 rc
= irq_cpu_rmap_add(rmap
, irq
->vector
);
5994 netdev_warn(bp
->dev
, "failed adding irq rmap for ring %d\n",
5999 rc
= request_irq(irq
->vector
, irq
->handler
, flags
, irq
->name
,
6006 if (zalloc_cpumask_var(&irq
->cpu_mask
, GFP_KERNEL
)) {
6007 int numa_node
= dev_to_node(&bp
->pdev
->dev
);
6009 irq
->have_cpumask
= 1;
6010 cpumask_set_cpu(cpumask_local_spread(i
, numa_node
),
6012 rc
= irq_set_affinity_hint(irq
->vector
, irq
->cpu_mask
);
6014 netdev_warn(bp
->dev
,
6015 "Set affinity failed, IRQ = %d\n",
6024 static void bnxt_del_napi(struct bnxt
*bp
)
6031 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
6032 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
6034 napi_hash_del(&bnapi
->napi
);
6035 netif_napi_del(&bnapi
->napi
);
6037 /* We called napi_hash_del() before netif_napi_del(), we need
6038 * to respect an RCU grace period before freeing napi structures.
6043 static void bnxt_init_napi(struct bnxt
*bp
)
6046 unsigned int cp_nr_rings
= bp
->cp_nr_rings
;
6047 struct bnxt_napi
*bnapi
;
6049 if (bp
->flags
& BNXT_FLAG_USING_MSIX
) {
6050 if (BNXT_CHIP_TYPE_NITRO_A0(bp
))
6052 for (i
= 0; i
< cp_nr_rings
; i
++) {
6053 bnapi
= bp
->bnapi
[i
];
6054 netif_napi_add(bp
->dev
, &bnapi
->napi
,
6057 if (BNXT_CHIP_TYPE_NITRO_A0(bp
)) {
6058 bnapi
= bp
->bnapi
[cp_nr_rings
];
6059 netif_napi_add(bp
->dev
, &bnapi
->napi
,
6060 bnxt_poll_nitroa0
, 64);
6063 bnapi
= bp
->bnapi
[0];
6064 netif_napi_add(bp
->dev
, &bnapi
->napi
, bnxt_poll
, 64);
6068 static void bnxt_disable_napi(struct bnxt
*bp
)
6075 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
6076 struct bnxt_cp_ring_info
*cpr
= &bp
->bnapi
[i
]->cp_ring
;
6078 if (bp
->bnapi
[i
]->rx_ring
)
6079 cancel_work_sync(&cpr
->dim
.work
);
6081 napi_disable(&bp
->bnapi
[i
]->napi
);
6085 static void bnxt_enable_napi(struct bnxt
*bp
)
6089 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
6090 struct bnxt_cp_ring_info
*cpr
= &bp
->bnapi
[i
]->cp_ring
;
6091 bp
->bnapi
[i
]->in_reset
= false;
6093 if (bp
->bnapi
[i
]->rx_ring
) {
6094 INIT_WORK(&cpr
->dim
.work
, bnxt_dim_work
);
6095 cpr
->dim
.mode
= NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE
;
6097 napi_enable(&bp
->bnapi
[i
]->napi
);
6101 void bnxt_tx_disable(struct bnxt
*bp
)
6104 struct bnxt_tx_ring_info
*txr
;
6107 for (i
= 0; i
< bp
->tx_nr_rings
; i
++) {
6108 txr
= &bp
->tx_ring
[i
];
6109 txr
->dev_state
= BNXT_DEV_STATE_CLOSING
;
6112 /* Stop all TX queues */
6113 netif_tx_disable(bp
->dev
);
6114 netif_carrier_off(bp
->dev
);
6117 void bnxt_tx_enable(struct bnxt
*bp
)
6120 struct bnxt_tx_ring_info
*txr
;
6122 for (i
= 0; i
< bp
->tx_nr_rings
; i
++) {
6123 txr
= &bp
->tx_ring
[i
];
6126 netif_tx_wake_all_queues(bp
->dev
);
6127 if (bp
->link_info
.link_up
)
6128 netif_carrier_on(bp
->dev
);
6131 static void bnxt_report_link(struct bnxt
*bp
)
6133 if (bp
->link_info
.link_up
) {
6135 const char *flow_ctrl
;
6139 netif_carrier_on(bp
->dev
);
6140 if (bp
->link_info
.duplex
== BNXT_LINK_DUPLEX_FULL
)
6144 if (bp
->link_info
.pause
== BNXT_LINK_PAUSE_BOTH
)
6145 flow_ctrl
= "ON - receive & transmit";
6146 else if (bp
->link_info
.pause
== BNXT_LINK_PAUSE_TX
)
6147 flow_ctrl
= "ON - transmit";
6148 else if (bp
->link_info
.pause
== BNXT_LINK_PAUSE_RX
)
6149 flow_ctrl
= "ON - receive";
6152 speed
= bnxt_fw_to_ethtool_speed(bp
->link_info
.link_speed
);
6153 netdev_info(bp
->dev
, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
6154 speed
, duplex
, flow_ctrl
);
6155 if (bp
->flags
& BNXT_FLAG_EEE_CAP
)
6156 netdev_info(bp
->dev
, "EEE is %s\n",
6157 bp
->eee
.eee_active
? "active" :
6159 fec
= bp
->link_info
.fec_cfg
;
6160 if (!(fec
& PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED
))
6161 netdev_info(bp
->dev
, "FEC autoneg %s encodings: %s\n",
6162 (fec
& BNXT_FEC_AUTONEG
) ? "on" : "off",
6163 (fec
& BNXT_FEC_ENC_BASE_R
) ? "BaseR" :
6164 (fec
& BNXT_FEC_ENC_RS
) ? "RS" : "None");
6166 netif_carrier_off(bp
->dev
);
6167 netdev_err(bp
->dev
, "NIC Link is Down\n");
6171 static int bnxt_hwrm_phy_qcaps(struct bnxt
*bp
)
6174 struct hwrm_port_phy_qcaps_input req
= {0};
6175 struct hwrm_port_phy_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
6176 struct bnxt_link_info
*link_info
= &bp
->link_info
;
6178 if (bp
->hwrm_spec_code
< 0x10201)
6181 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_PHY_QCAPS
, -1, -1);
6183 mutex_lock(&bp
->hwrm_cmd_lock
);
6184 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
6186 goto hwrm_phy_qcaps_exit
;
6188 if (resp
->flags
& PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED
) {
6189 struct ethtool_eee
*eee
= &bp
->eee
;
6190 u16 fw_speeds
= le16_to_cpu(resp
->supported_speeds_eee_mode
);
6192 bp
->flags
|= BNXT_FLAG_EEE_CAP
;
6193 eee
->supported
= _bnxt_fw_to_ethtool_adv_spds(fw_speeds
, 0);
6194 bp
->lpi_tmr_lo
= le32_to_cpu(resp
->tx_lpi_timer_low
) &
6195 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK
;
6196 bp
->lpi_tmr_hi
= le32_to_cpu(resp
->valid_tx_lpi_timer_high
) &
6197 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK
;
6199 if (resp
->supported_speeds_auto_mode
)
6200 link_info
->support_auto_speeds
=
6201 le16_to_cpu(resp
->supported_speeds_auto_mode
);
6203 bp
->port_count
= resp
->port_cnt
;
6205 hwrm_phy_qcaps_exit
:
6206 mutex_unlock(&bp
->hwrm_cmd_lock
);
6210 static int bnxt_update_link(struct bnxt
*bp
, bool chng_link_state
)
6213 struct bnxt_link_info
*link_info
= &bp
->link_info
;
6214 struct hwrm_port_phy_qcfg_input req
= {0};
6215 struct hwrm_port_phy_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
6216 u8 link_up
= link_info
->link_up
;
6219 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_PHY_QCFG
, -1, -1);
6221 mutex_lock(&bp
->hwrm_cmd_lock
);
6222 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
6224 mutex_unlock(&bp
->hwrm_cmd_lock
);
6228 memcpy(&link_info
->phy_qcfg_resp
, resp
, sizeof(*resp
));
6229 link_info
->phy_link_status
= resp
->link
;
6230 link_info
->duplex
= resp
->duplex_cfg
;
6231 if (bp
->hwrm_spec_code
>= 0x10800)
6232 link_info
->duplex
= resp
->duplex_state
;
6233 link_info
->pause
= resp
->pause
;
6234 link_info
->auto_mode
= resp
->auto_mode
;
6235 link_info
->auto_pause_setting
= resp
->auto_pause
;
6236 link_info
->lp_pause
= resp
->link_partner_adv_pause
;
6237 link_info
->force_pause_setting
= resp
->force_pause
;
6238 link_info
->duplex_setting
= resp
->duplex_cfg
;
6239 if (link_info
->phy_link_status
== BNXT_LINK_LINK
)
6240 link_info
->link_speed
= le16_to_cpu(resp
->link_speed
);
6242 link_info
->link_speed
= 0;
6243 link_info
->force_link_speed
= le16_to_cpu(resp
->force_link_speed
);
6244 link_info
->support_speeds
= le16_to_cpu(resp
->support_speeds
);
6245 link_info
->auto_link_speeds
= le16_to_cpu(resp
->auto_link_speed_mask
);
6246 link_info
->lp_auto_link_speeds
=
6247 le16_to_cpu(resp
->link_partner_adv_speeds
);
6248 link_info
->preemphasis
= le32_to_cpu(resp
->preemphasis
);
6249 link_info
->phy_ver
[0] = resp
->phy_maj
;
6250 link_info
->phy_ver
[1] = resp
->phy_min
;
6251 link_info
->phy_ver
[2] = resp
->phy_bld
;
6252 link_info
->media_type
= resp
->media_type
;
6253 link_info
->phy_type
= resp
->phy_type
;
6254 link_info
->transceiver
= resp
->xcvr_pkg_type
;
6255 link_info
->phy_addr
= resp
->eee_config_phy_addr
&
6256 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK
;
6257 link_info
->module_status
= resp
->module_status
;
6259 if (bp
->flags
& BNXT_FLAG_EEE_CAP
) {
6260 struct ethtool_eee
*eee
= &bp
->eee
;
6263 eee
->eee_active
= 0;
6264 if (resp
->eee_config_phy_addr
&
6265 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE
) {
6266 eee
->eee_active
= 1;
6267 fw_speeds
= le16_to_cpu(
6268 resp
->link_partner_adv_eee_link_speed_mask
);
6269 eee
->lp_advertised
=
6270 _bnxt_fw_to_ethtool_adv_spds(fw_speeds
, 0);
6273 /* Pull initial EEE config */
6274 if (!chng_link_state
) {
6275 if (resp
->eee_config_phy_addr
&
6276 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED
)
6277 eee
->eee_enabled
= 1;
6279 fw_speeds
= le16_to_cpu(resp
->adv_eee_link_speed_mask
);
6281 _bnxt_fw_to_ethtool_adv_spds(fw_speeds
, 0);
6283 if (resp
->eee_config_phy_addr
&
6284 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI
) {
6287 eee
->tx_lpi_enabled
= 1;
6288 tmr
= resp
->xcvr_identifier_type_tx_lpi_timer
;
6289 eee
->tx_lpi_timer
= le32_to_cpu(tmr
) &
6290 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK
;
6295 link_info
->fec_cfg
= PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED
;
6296 if (bp
->hwrm_spec_code
>= 0x10504)
6297 link_info
->fec_cfg
= le16_to_cpu(resp
->fec_cfg
);
6299 /* TODO: need to add more logic to report VF link */
6300 if (chng_link_state
) {
6301 if (link_info
->phy_link_status
== BNXT_LINK_LINK
)
6302 link_info
->link_up
= 1;
6304 link_info
->link_up
= 0;
6305 if (link_up
!= link_info
->link_up
)
6306 bnxt_report_link(bp
);
6308 /* alwasy link down if not require to update link state */
6309 link_info
->link_up
= 0;
6311 mutex_unlock(&bp
->hwrm_cmd_lock
);
6313 diff
= link_info
->support_auto_speeds
^ link_info
->advertising
;
6314 if ((link_info
->support_auto_speeds
| diff
) !=
6315 link_info
->support_auto_speeds
) {
6316 /* An advertised speed is no longer supported, so we need to
6317 * update the advertisement settings. Caller holds RTNL
6318 * so we can modify link settings.
6320 link_info
->advertising
= link_info
->support_auto_speeds
;
6321 if (link_info
->autoneg
& BNXT_AUTONEG_SPEED
)
6322 bnxt_hwrm_set_link_setting(bp
, true, false);
6327 static void bnxt_get_port_module_status(struct bnxt
*bp
)
6329 struct bnxt_link_info
*link_info
= &bp
->link_info
;
6330 struct hwrm_port_phy_qcfg_output
*resp
= &link_info
->phy_qcfg_resp
;
6333 if (bnxt_update_link(bp
, true))
6336 module_status
= link_info
->module_status
;
6337 switch (module_status
) {
6338 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX
:
6339 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN
:
6340 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG
:
6341 netdev_warn(bp
->dev
, "Unqualified SFP+ module detected on port %d\n",
6343 if (bp
->hwrm_spec_code
>= 0x10201) {
6344 netdev_warn(bp
->dev
, "Module part number %s\n",
6345 resp
->phy_vendor_partnumber
);
6347 if (module_status
== PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX
)
6348 netdev_warn(bp
->dev
, "TX is disabled\n");
6349 if (module_status
== PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN
)
6350 netdev_warn(bp
->dev
, "SFP+ module is shutdown\n");
6355 bnxt_hwrm_set_pause_common(struct bnxt
*bp
, struct hwrm_port_phy_cfg_input
*req
)
6357 if (bp
->link_info
.autoneg
& BNXT_AUTONEG_FLOW_CTRL
) {
6358 if (bp
->hwrm_spec_code
>= 0x10201)
6360 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE
;
6361 if (bp
->link_info
.req_flow_ctrl
& BNXT_LINK_PAUSE_RX
)
6362 req
->auto_pause
|= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX
;
6363 if (bp
->link_info
.req_flow_ctrl
& BNXT_LINK_PAUSE_TX
)
6364 req
->auto_pause
|= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX
;
6366 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE
);
6368 if (bp
->link_info
.req_flow_ctrl
& BNXT_LINK_PAUSE_RX
)
6369 req
->force_pause
|= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX
;
6370 if (bp
->link_info
.req_flow_ctrl
& BNXT_LINK_PAUSE_TX
)
6371 req
->force_pause
|= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX
;
6373 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE
);
6374 if (bp
->hwrm_spec_code
>= 0x10201) {
6375 req
->auto_pause
= req
->force_pause
;
6376 req
->enables
|= cpu_to_le32(
6377 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE
);
6382 static void bnxt_hwrm_set_link_common(struct bnxt
*bp
,
6383 struct hwrm_port_phy_cfg_input
*req
)
6385 u8 autoneg
= bp
->link_info
.autoneg
;
6386 u16 fw_link_speed
= bp
->link_info
.req_link_speed
;
6387 u16 advertising
= bp
->link_info
.advertising
;
6389 if (autoneg
& BNXT_AUTONEG_SPEED
) {
6391 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK
;
6393 req
->enables
|= cpu_to_le32(
6394 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK
);
6395 req
->auto_link_speed_mask
= cpu_to_le16(advertising
);
6397 req
->enables
|= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE
);
6399 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG
);
6401 req
->force_link_speed
= cpu_to_le16(fw_link_speed
);
6402 req
->flags
|= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE
);
6405 /* tell chimp that the setting takes effect immediately */
6406 req
->flags
|= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY
);
6409 int bnxt_hwrm_set_pause(struct bnxt
*bp
)
6411 struct hwrm_port_phy_cfg_input req
= {0};
6414 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_PHY_CFG
, -1, -1);
6415 bnxt_hwrm_set_pause_common(bp
, &req
);
6417 if ((bp
->link_info
.autoneg
& BNXT_AUTONEG_FLOW_CTRL
) ||
6418 bp
->link_info
.force_link_chng
)
6419 bnxt_hwrm_set_link_common(bp
, &req
);
6421 mutex_lock(&bp
->hwrm_cmd_lock
);
6422 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
6423 if (!rc
&& !(bp
->link_info
.autoneg
& BNXT_AUTONEG_FLOW_CTRL
)) {
6424 /* since changing of pause setting doesn't trigger any link
6425 * change event, the driver needs to update the current pause
6426 * result upon successfully return of the phy_cfg command
6428 bp
->link_info
.pause
=
6429 bp
->link_info
.force_pause_setting
= bp
->link_info
.req_flow_ctrl
;
6430 bp
->link_info
.auto_pause_setting
= 0;
6431 if (!bp
->link_info
.force_link_chng
)
6432 bnxt_report_link(bp
);
6434 bp
->link_info
.force_link_chng
= false;
6435 mutex_unlock(&bp
->hwrm_cmd_lock
);
6439 static void bnxt_hwrm_set_eee(struct bnxt
*bp
,
6440 struct hwrm_port_phy_cfg_input
*req
)
6442 struct ethtool_eee
*eee
= &bp
->eee
;
6444 if (eee
->eee_enabled
) {
6446 u32 flags
= PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE
;
6448 if (eee
->tx_lpi_enabled
)
6449 flags
|= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE
;
6451 flags
|= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE
;
6453 req
->flags
|= cpu_to_le32(flags
);
6454 eee_speeds
= bnxt_get_fw_auto_link_speeds(eee
->advertised
);
6455 req
->eee_link_speed_mask
= cpu_to_le16(eee_speeds
);
6456 req
->tx_lpi_timer
= cpu_to_le32(eee
->tx_lpi_timer
);
6458 req
->flags
|= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE
);
6462 int bnxt_hwrm_set_link_setting(struct bnxt
*bp
, bool set_pause
, bool set_eee
)
6464 struct hwrm_port_phy_cfg_input req
= {0};
6466 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_PHY_CFG
, -1, -1);
6468 bnxt_hwrm_set_pause_common(bp
, &req
);
6470 bnxt_hwrm_set_link_common(bp
, &req
);
6473 bnxt_hwrm_set_eee(bp
, &req
);
6474 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
6477 static int bnxt_hwrm_shutdown_link(struct bnxt
*bp
)
6479 struct hwrm_port_phy_cfg_input req
= {0};
6481 if (!BNXT_SINGLE_PF(bp
))
6484 if (pci_num_vf(bp
->pdev
))
6487 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_PHY_CFG
, -1, -1);
6488 req
.flags
= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN
);
6489 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
6492 static int bnxt_hwrm_port_led_qcaps(struct bnxt
*bp
)
6494 struct hwrm_port_led_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
6495 struct hwrm_port_led_qcaps_input req
= {0};
6496 struct bnxt_pf_info
*pf
= &bp
->pf
;
6499 if (BNXT_VF(bp
) || bp
->hwrm_spec_code
< 0x10601)
6502 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_LED_QCAPS
, -1, -1);
6503 req
.port_id
= cpu_to_le16(pf
->port_id
);
6504 mutex_lock(&bp
->hwrm_cmd_lock
);
6505 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
6507 mutex_unlock(&bp
->hwrm_cmd_lock
);
6510 if (resp
->num_leds
> 0 && resp
->num_leds
< BNXT_MAX_LED
) {
6513 bp
->num_leds
= resp
->num_leds
;
6514 memcpy(bp
->leds
, &resp
->led0_id
, sizeof(bp
->leds
[0]) *
6516 for (i
= 0; i
< bp
->num_leds
; i
++) {
6517 struct bnxt_led_info
*led
= &bp
->leds
[i
];
6518 __le16 caps
= led
->led_state_caps
;
6520 if (!led
->led_group_id
||
6521 !BNXT_LED_ALT_BLINK_CAP(caps
)) {
6527 mutex_unlock(&bp
->hwrm_cmd_lock
);
6531 int bnxt_hwrm_alloc_wol_fltr(struct bnxt
*bp
)
6533 struct hwrm_wol_filter_alloc_input req
= {0};
6534 struct hwrm_wol_filter_alloc_output
*resp
= bp
->hwrm_cmd_resp_addr
;
6537 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_WOL_FILTER_ALLOC
, -1, -1);
6538 req
.port_id
= cpu_to_le16(bp
->pf
.port_id
);
6539 req
.wol_type
= WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT
;
6540 req
.enables
= cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS
);
6541 memcpy(req
.mac_address
, bp
->dev
->dev_addr
, ETH_ALEN
);
6542 mutex_lock(&bp
->hwrm_cmd_lock
);
6543 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
6545 bp
->wol_filter_id
= resp
->wol_filter_id
;
6546 mutex_unlock(&bp
->hwrm_cmd_lock
);
6550 int bnxt_hwrm_free_wol_fltr(struct bnxt
*bp
)
6552 struct hwrm_wol_filter_free_input req
= {0};
6555 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_WOL_FILTER_FREE
, -1, -1);
6556 req
.port_id
= cpu_to_le16(bp
->pf
.port_id
);
6557 req
.enables
= cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID
);
6558 req
.wol_filter_id
= bp
->wol_filter_id
;
6559 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
6563 static u16
bnxt_hwrm_get_wol_fltrs(struct bnxt
*bp
, u16 handle
)
6565 struct hwrm_wol_filter_qcfg_input req
= {0};
6566 struct hwrm_wol_filter_qcfg_output
*resp
= bp
->hwrm_cmd_resp_addr
;
6567 u16 next_handle
= 0;
6570 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_WOL_FILTER_QCFG
, -1, -1);
6571 req
.port_id
= cpu_to_le16(bp
->pf
.port_id
);
6572 req
.handle
= cpu_to_le16(handle
);
6573 mutex_lock(&bp
->hwrm_cmd_lock
);
6574 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
6576 next_handle
= le16_to_cpu(resp
->next_handle
);
6577 if (next_handle
!= 0) {
6578 if (resp
->wol_type
==
6579 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT
) {
6581 bp
->wol_filter_id
= resp
->wol_filter_id
;
6585 mutex_unlock(&bp
->hwrm_cmd_lock
);
6589 static void bnxt_get_wol_settings(struct bnxt
*bp
)
6593 if (!BNXT_PF(bp
) || !(bp
->flags
& BNXT_FLAG_WOL_CAP
))
6597 handle
= bnxt_hwrm_get_wol_fltrs(bp
, handle
);
6598 } while (handle
&& handle
!= 0xffff);
6601 static bool bnxt_eee_config_ok(struct bnxt
*bp
)
6603 struct ethtool_eee
*eee
= &bp
->eee
;
6604 struct bnxt_link_info
*link_info
= &bp
->link_info
;
6606 if (!(bp
->flags
& BNXT_FLAG_EEE_CAP
))
6609 if (eee
->eee_enabled
) {
6611 _bnxt_fw_to_ethtool_adv_spds(link_info
->advertising
, 0);
6613 if (!(link_info
->autoneg
& BNXT_AUTONEG_SPEED
)) {
6614 eee
->eee_enabled
= 0;
6617 if (eee
->advertised
& ~advertising
) {
6618 eee
->advertised
= advertising
& eee
->supported
;
6625 static int bnxt_update_phy_setting(struct bnxt
*bp
)
6628 bool update_link
= false;
6629 bool update_pause
= false;
6630 bool update_eee
= false;
6631 struct bnxt_link_info
*link_info
= &bp
->link_info
;
6633 rc
= bnxt_update_link(bp
, true);
6635 netdev_err(bp
->dev
, "failed to update link (rc: %x)\n",
6639 if (!BNXT_SINGLE_PF(bp
))
6642 if ((link_info
->autoneg
& BNXT_AUTONEG_FLOW_CTRL
) &&
6643 (link_info
->auto_pause_setting
& BNXT_LINK_PAUSE_BOTH
) !=
6644 link_info
->req_flow_ctrl
)
6645 update_pause
= true;
6646 if (!(link_info
->autoneg
& BNXT_AUTONEG_FLOW_CTRL
) &&
6647 link_info
->force_pause_setting
!= link_info
->req_flow_ctrl
)
6648 update_pause
= true;
6649 if (!(link_info
->autoneg
& BNXT_AUTONEG_SPEED
)) {
6650 if (BNXT_AUTO_MODE(link_info
->auto_mode
))
6652 if (link_info
->req_link_speed
!= link_info
->force_link_speed
)
6654 if (link_info
->req_duplex
!= link_info
->duplex_setting
)
6657 if (link_info
->auto_mode
== BNXT_LINK_AUTO_NONE
)
6659 if (link_info
->advertising
!= link_info
->auto_link_speeds
)
6663 /* The last close may have shutdown the link, so need to call
6664 * PHY_CFG to bring it back up.
6666 if (!netif_carrier_ok(bp
->dev
))
6669 if (!bnxt_eee_config_ok(bp
))
6673 rc
= bnxt_hwrm_set_link_setting(bp
, update_pause
, update_eee
);
6674 else if (update_pause
)
6675 rc
= bnxt_hwrm_set_pause(bp
);
6677 netdev_err(bp
->dev
, "failed to update phy setting (rc: %x)\n",
6685 /* Common routine to pre-map certain register block to different GRC window.
6686 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
6687 * in PF and 3 windows in VF that can be customized to map in different
6690 static void bnxt_preset_reg_win(struct bnxt
*bp
)
6693 /* CAG registers map to GRC window #4 */
6694 writel(BNXT_CAG_REG_BASE
,
6695 bp
->bar0
+ BNXT_GRCPF_REG_WINDOW_BASE_OUT
+ 12);
6699 static int __bnxt_open_nic(struct bnxt
*bp
, bool irq_re_init
, bool link_re_init
)
6703 bnxt_preset_reg_win(bp
);
6704 netif_carrier_off(bp
->dev
);
6706 rc
= bnxt_reserve_rings(bp
);
6710 rc
= bnxt_setup_int_mode(bp
);
6712 netdev_err(bp
->dev
, "bnxt_setup_int_mode err: %x\n",
6717 if ((bp
->flags
& BNXT_FLAG_RFS
) &&
6718 !(bp
->flags
& BNXT_FLAG_USING_MSIX
)) {
6719 /* disable RFS if falling back to INTA */
6720 bp
->dev
->hw_features
&= ~NETIF_F_NTUPLE
;
6721 bp
->flags
&= ~BNXT_FLAG_RFS
;
6724 rc
= bnxt_alloc_mem(bp
, irq_re_init
);
6726 netdev_err(bp
->dev
, "bnxt_alloc_mem err: %x\n", rc
);
6727 goto open_err_free_mem
;
6732 rc
= bnxt_request_irq(bp
);
6734 netdev_err(bp
->dev
, "bnxt_request_irq err: %x\n", rc
);
6739 bnxt_enable_napi(bp
);
6741 rc
= bnxt_init_nic(bp
, irq_re_init
);
6743 netdev_err(bp
->dev
, "bnxt_init_nic err: %x\n", rc
);
6748 mutex_lock(&bp
->link_lock
);
6749 rc
= bnxt_update_phy_setting(bp
);
6750 mutex_unlock(&bp
->link_lock
);
6752 netdev_warn(bp
->dev
, "failed to update phy settings\n");
6756 udp_tunnel_get_rx_info(bp
->dev
);
6758 set_bit(BNXT_STATE_OPEN
, &bp
->state
);
6759 bnxt_enable_int(bp
);
6760 /* Enable TX queues */
6762 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
6763 /* Poll link status and check for SFP+ module status */
6764 bnxt_get_port_module_status(bp
);
6766 /* VF-reps may need to be re-opened after the PF is re-opened */
6768 bnxt_vf_reps_open(bp
);
6772 bnxt_disable_napi(bp
);
6778 bnxt_free_mem(bp
, true);
6782 /* rtnl_lock held */
6783 int bnxt_open_nic(struct bnxt
*bp
, bool irq_re_init
, bool link_re_init
)
6787 rc
= __bnxt_open_nic(bp
, irq_re_init
, link_re_init
);
6789 netdev_err(bp
->dev
, "nic open fail (rc: %x)\n", rc
);
6795 /* rtnl_lock held, open the NIC half way by allocating all resources, but
6796 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
6799 int bnxt_half_open_nic(struct bnxt
*bp
)
6803 rc
= bnxt_alloc_mem(bp
, false);
6805 netdev_err(bp
->dev
, "bnxt_alloc_mem err: %x\n", rc
);
6808 rc
= bnxt_init_nic(bp
, false);
6810 netdev_err(bp
->dev
, "bnxt_init_nic err: %x\n", rc
);
6817 bnxt_free_mem(bp
, false);
6822 /* rtnl_lock held, this call can only be made after a previous successful
6823 * call to bnxt_half_open_nic().
6825 void bnxt_half_close_nic(struct bnxt
*bp
)
6827 bnxt_hwrm_resource_free(bp
, false, false);
6829 bnxt_free_mem(bp
, false);
6832 static int bnxt_open(struct net_device
*dev
)
6834 struct bnxt
*bp
= netdev_priv(dev
);
6836 return __bnxt_open_nic(bp
, true, true);
6839 static bool bnxt_drv_busy(struct bnxt
*bp
)
6841 return (test_bit(BNXT_STATE_IN_SP_TASK
, &bp
->state
) ||
6842 test_bit(BNXT_STATE_READ_STATS
, &bp
->state
));
6845 static void __bnxt_close_nic(struct bnxt
*bp
, bool irq_re_init
,
6848 /* Close the VF-reps before closing PF */
6850 bnxt_vf_reps_close(bp
);
6852 /* Change device state to avoid TX queue wake up's */
6853 bnxt_tx_disable(bp
);
6855 clear_bit(BNXT_STATE_OPEN
, &bp
->state
);
6856 smp_mb__after_atomic();
6857 while (bnxt_drv_busy(bp
))
6860 /* Flush rings and and disable interrupts */
6861 bnxt_shutdown_nic(bp
, irq_re_init
);
6863 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
6865 bnxt_disable_napi(bp
);
6866 del_timer_sync(&bp
->timer
);
6873 bnxt_free_mem(bp
, irq_re_init
);
6876 int bnxt_close_nic(struct bnxt
*bp
, bool irq_re_init
, bool link_re_init
)
6880 #ifdef CONFIG_BNXT_SRIOV
6881 if (bp
->sriov_cfg
) {
6882 rc
= wait_event_interruptible_timeout(bp
->sriov_cfg_wait
,
6884 BNXT_SRIOV_CFG_WAIT_TMO
);
6886 netdev_warn(bp
->dev
, "timeout waiting for SRIOV config operation to complete!\n");
6889 __bnxt_close_nic(bp
, irq_re_init
, link_re_init
);
6893 static int bnxt_close(struct net_device
*dev
)
6895 struct bnxt
*bp
= netdev_priv(dev
);
6897 bnxt_close_nic(bp
, true, true);
6898 bnxt_hwrm_shutdown_link(bp
);
6902 /* rtnl_lock held */
6903 static int bnxt_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
6909 if (!netif_running(dev
))
6916 if (!netif_running(dev
))
6929 bnxt_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
6932 struct bnxt
*bp
= netdev_priv(dev
);
6934 set_bit(BNXT_STATE_READ_STATS
, &bp
->state
);
6935 /* Make sure bnxt_close_nic() sees that we are reading stats before
6936 * we check the BNXT_STATE_OPEN flag.
6938 smp_mb__after_atomic();
6939 if (!test_bit(BNXT_STATE_OPEN
, &bp
->state
)) {
6940 clear_bit(BNXT_STATE_READ_STATS
, &bp
->state
);
6944 /* TODO check if we need to synchronize with bnxt_close path */
6945 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
6946 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
6947 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
6948 struct ctx_hw_stats
*hw_stats
= cpr
->hw_stats
;
6950 stats
->rx_packets
+= le64_to_cpu(hw_stats
->rx_ucast_pkts
);
6951 stats
->rx_packets
+= le64_to_cpu(hw_stats
->rx_mcast_pkts
);
6952 stats
->rx_packets
+= le64_to_cpu(hw_stats
->rx_bcast_pkts
);
6954 stats
->tx_packets
+= le64_to_cpu(hw_stats
->tx_ucast_pkts
);
6955 stats
->tx_packets
+= le64_to_cpu(hw_stats
->tx_mcast_pkts
);
6956 stats
->tx_packets
+= le64_to_cpu(hw_stats
->tx_bcast_pkts
);
6958 stats
->rx_bytes
+= le64_to_cpu(hw_stats
->rx_ucast_bytes
);
6959 stats
->rx_bytes
+= le64_to_cpu(hw_stats
->rx_mcast_bytes
);
6960 stats
->rx_bytes
+= le64_to_cpu(hw_stats
->rx_bcast_bytes
);
6962 stats
->tx_bytes
+= le64_to_cpu(hw_stats
->tx_ucast_bytes
);
6963 stats
->tx_bytes
+= le64_to_cpu(hw_stats
->tx_mcast_bytes
);
6964 stats
->tx_bytes
+= le64_to_cpu(hw_stats
->tx_bcast_bytes
);
6966 stats
->rx_missed_errors
+=
6967 le64_to_cpu(hw_stats
->rx_discard_pkts
);
6969 stats
->multicast
+= le64_to_cpu(hw_stats
->rx_mcast_pkts
);
6971 stats
->tx_dropped
+= le64_to_cpu(hw_stats
->tx_drop_pkts
);
6974 if (bp
->flags
& BNXT_FLAG_PORT_STATS
) {
6975 struct rx_port_stats
*rx
= bp
->hw_rx_port_stats
;
6976 struct tx_port_stats
*tx
= bp
->hw_tx_port_stats
;
6978 stats
->rx_crc_errors
= le64_to_cpu(rx
->rx_fcs_err_frames
);
6979 stats
->rx_frame_errors
= le64_to_cpu(rx
->rx_align_err_frames
);
6980 stats
->rx_length_errors
= le64_to_cpu(rx
->rx_undrsz_frames
) +
6981 le64_to_cpu(rx
->rx_ovrsz_frames
) +
6982 le64_to_cpu(rx
->rx_runt_frames
);
6983 stats
->rx_errors
= le64_to_cpu(rx
->rx_false_carrier_frames
) +
6984 le64_to_cpu(rx
->rx_jbr_frames
);
6985 stats
->collisions
= le64_to_cpu(tx
->tx_total_collisions
);
6986 stats
->tx_fifo_errors
= le64_to_cpu(tx
->tx_fifo_underruns
);
6987 stats
->tx_errors
= le64_to_cpu(tx
->tx_err
);
6989 clear_bit(BNXT_STATE_READ_STATS
, &bp
->state
);
6992 static bool bnxt_mc_list_updated(struct bnxt
*bp
, u32
*rx_mask
)
6994 struct net_device
*dev
= bp
->dev
;
6995 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[0];
6996 struct netdev_hw_addr
*ha
;
6999 bool update
= false;
7002 netdev_for_each_mc_addr(ha
, dev
) {
7003 if (mc_count
>= BNXT_MAX_MC_ADDRS
) {
7004 *rx_mask
|= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST
;
7005 vnic
->mc_list_count
= 0;
7009 if (!ether_addr_equal(haddr
, vnic
->mc_list
+ off
)) {
7010 memcpy(vnic
->mc_list
+ off
, haddr
, ETH_ALEN
);
7017 *rx_mask
|= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST
;
7019 if (mc_count
!= vnic
->mc_list_count
) {
7020 vnic
->mc_list_count
= mc_count
;
7026 static bool bnxt_uc_list_updated(struct bnxt
*bp
)
7028 struct net_device
*dev
= bp
->dev
;
7029 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[0];
7030 struct netdev_hw_addr
*ha
;
7033 if (netdev_uc_count(dev
) != (vnic
->uc_filter_count
- 1))
7036 netdev_for_each_uc_addr(ha
, dev
) {
7037 if (!ether_addr_equal(ha
->addr
, vnic
->uc_list
+ off
))
7045 static void bnxt_set_rx_mode(struct net_device
*dev
)
7047 struct bnxt
*bp
= netdev_priv(dev
);
7048 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[0];
7049 u32 mask
= vnic
->rx_mask
;
7050 bool mc_update
= false;
7053 if (!netif_running(dev
))
7056 mask
&= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS
|
7057 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST
|
7058 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST
);
7060 if ((dev
->flags
& IFF_PROMISC
) && bnxt_promisc_ok(bp
))
7061 mask
|= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS
;
7063 uc_update
= bnxt_uc_list_updated(bp
);
7065 if (dev
->flags
& IFF_ALLMULTI
) {
7066 mask
|= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST
;
7067 vnic
->mc_list_count
= 0;
7069 mc_update
= bnxt_mc_list_updated(bp
, &mask
);
7072 if (mask
!= vnic
->rx_mask
|| uc_update
|| mc_update
) {
7073 vnic
->rx_mask
= mask
;
7075 set_bit(BNXT_RX_MASK_SP_EVENT
, &bp
->sp_event
);
7076 bnxt_queue_sp_work(bp
);
7080 static int bnxt_cfg_rx_mode(struct bnxt
*bp
)
7082 struct net_device
*dev
= bp
->dev
;
7083 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[0];
7084 struct netdev_hw_addr
*ha
;
7088 netif_addr_lock_bh(dev
);
7089 uc_update
= bnxt_uc_list_updated(bp
);
7090 netif_addr_unlock_bh(dev
);
7095 mutex_lock(&bp
->hwrm_cmd_lock
);
7096 for (i
= 1; i
< vnic
->uc_filter_count
; i
++) {
7097 struct hwrm_cfa_l2_filter_free_input req
= {0};
7099 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_CFA_L2_FILTER_FREE
, -1,
7102 req
.l2_filter_id
= vnic
->fw_l2_filter_id
[i
];
7104 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
7107 mutex_unlock(&bp
->hwrm_cmd_lock
);
7109 vnic
->uc_filter_count
= 1;
7111 netif_addr_lock_bh(dev
);
7112 if (netdev_uc_count(dev
) > (BNXT_MAX_UC_ADDRS
- 1)) {
7113 vnic
->rx_mask
|= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS
;
7115 netdev_for_each_uc_addr(ha
, dev
) {
7116 memcpy(vnic
->uc_list
+ off
, ha
->addr
, ETH_ALEN
);
7118 vnic
->uc_filter_count
++;
7121 netif_addr_unlock_bh(dev
);
7123 for (i
= 1, off
= 0; i
< vnic
->uc_filter_count
; i
++, off
+= ETH_ALEN
) {
7124 rc
= bnxt_hwrm_set_vnic_filter(bp
, 0, i
, vnic
->uc_list
+ off
);
7126 netdev_err(bp
->dev
, "HWRM vnic filter failure rc: %x\n",
7128 vnic
->uc_filter_count
= i
;
7134 rc
= bnxt_hwrm_cfa_l2_set_rx_mask(bp
, 0);
7136 netdev_err(bp
->dev
, "HWRM cfa l2 rx mask failure rc: %x\n",
7142 /* If the chip and firmware supports RFS */
7143 static bool bnxt_rfs_supported(struct bnxt
*bp
)
7145 if (BNXT_PF(bp
) && !BNXT_CHIP_TYPE_NITRO_A0(bp
))
7147 if (bp
->flags
& BNXT_FLAG_NEW_RSS_CAP
)
7152 /* If runtime conditions support RFS */
7153 static bool bnxt_rfs_capable(struct bnxt
*bp
)
7155 #ifdef CONFIG_RFS_ACCEL
7156 int vnics
, max_vnics
, max_rss_ctxs
;
7158 if (!(bp
->flags
& BNXT_FLAG_MSIX_CAP
))
7161 vnics
= 1 + bp
->rx_nr_rings
;
7162 max_vnics
= bnxt_get_max_func_vnics(bp
);
7163 max_rss_ctxs
= bnxt_get_max_func_rss_ctxs(bp
);
7165 /* RSS contexts not a limiting factor */
7166 if (bp
->flags
& BNXT_FLAG_NEW_RSS_CAP
)
7167 max_rss_ctxs
= max_vnics
;
7168 if (vnics
> max_vnics
|| vnics
> max_rss_ctxs
) {
7169 if (bp
->rx_nr_rings
> 1)
7170 netdev_warn(bp
->dev
,
7171 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
7172 min(max_rss_ctxs
- 1, max_vnics
- 1));
7176 if (!(bp
->flags
& BNXT_FLAG_NEW_RM
))
7179 if (vnics
== bp
->hw_resc
.resv_vnics
)
7182 bnxt_hwrm_reserve_rings(bp
, 0, 0, 0, 0, vnics
);
7183 if (vnics
<= bp
->hw_resc
.resv_vnics
)
7186 netdev_warn(bp
->dev
, "Unable to reserve resources to support NTUPLE filters.\n");
7187 bnxt_hwrm_reserve_rings(bp
, 0, 0, 0, 0, 1);
7194 static netdev_features_t
bnxt_fix_features(struct net_device
*dev
,
7195 netdev_features_t features
)
7197 struct bnxt
*bp
= netdev_priv(dev
);
7199 if ((features
& NETIF_F_NTUPLE
) && !bnxt_rfs_capable(bp
))
7200 features
&= ~NETIF_F_NTUPLE
;
7202 if (bp
->flags
& BNXT_FLAG_NO_AGG_RINGS
)
7203 features
&= ~(NETIF_F_LRO
| NETIF_F_GRO_HW
);
7205 if (!(features
& NETIF_F_GRO
))
7206 features
&= ~NETIF_F_GRO_HW
;
7208 if (features
& NETIF_F_GRO_HW
)
7209 features
&= ~NETIF_F_LRO
;
7211 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
7212 * turned on or off together.
7214 if ((features
& (NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_HW_VLAN_STAG_RX
)) !=
7215 (NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_HW_VLAN_STAG_RX
)) {
7216 if (dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
)
7217 features
&= ~(NETIF_F_HW_VLAN_CTAG_RX
|
7218 NETIF_F_HW_VLAN_STAG_RX
);
7220 features
|= NETIF_F_HW_VLAN_CTAG_RX
|
7221 NETIF_F_HW_VLAN_STAG_RX
;
7223 #ifdef CONFIG_BNXT_SRIOV
7226 features
&= ~(NETIF_F_HW_VLAN_CTAG_RX
|
7227 NETIF_F_HW_VLAN_STAG_RX
);
7234 static int bnxt_set_features(struct net_device
*dev
, netdev_features_t features
)
7236 struct bnxt
*bp
= netdev_priv(dev
);
7237 u32 flags
= bp
->flags
;
7240 bool re_init
= false;
7241 bool update_tpa
= false;
7243 flags
&= ~BNXT_FLAG_ALL_CONFIG_FEATS
;
7244 if (features
& NETIF_F_GRO_HW
)
7245 flags
|= BNXT_FLAG_GRO
;
7246 else if (features
& NETIF_F_LRO
)
7247 flags
|= BNXT_FLAG_LRO
;
7249 if (bp
->flags
& BNXT_FLAG_NO_AGG_RINGS
)
7250 flags
&= ~BNXT_FLAG_TPA
;
7252 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
7253 flags
|= BNXT_FLAG_STRIP_VLAN
;
7255 if (features
& NETIF_F_NTUPLE
)
7256 flags
|= BNXT_FLAG_RFS
;
7258 changes
= flags
^ bp
->flags
;
7259 if (changes
& BNXT_FLAG_TPA
) {
7261 if ((bp
->flags
& BNXT_FLAG_TPA
) == 0 ||
7262 (flags
& BNXT_FLAG_TPA
) == 0)
7266 if (changes
& ~BNXT_FLAG_TPA
)
7269 if (flags
!= bp
->flags
) {
7270 u32 old_flags
= bp
->flags
;
7274 if (!test_bit(BNXT_STATE_OPEN
, &bp
->state
)) {
7276 bnxt_set_ring_params(bp
);
7281 bnxt_close_nic(bp
, false, false);
7283 bnxt_set_ring_params(bp
);
7285 return bnxt_open_nic(bp
, false, false);
7288 rc
= bnxt_set_tpa(bp
,
7289 (flags
& BNXT_FLAG_TPA
) ?
7292 bp
->flags
= old_flags
;
7298 static void bnxt_dump_tx_sw_state(struct bnxt_napi
*bnapi
)
7300 struct bnxt_tx_ring_info
*txr
= bnapi
->tx_ring
;
7301 int i
= bnapi
->index
;
7306 netdev_info(bnapi
->bp
->dev
, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
7307 i
, txr
->tx_ring_struct
.fw_ring_id
, txr
->tx_prod
,
7311 static void bnxt_dump_rx_sw_state(struct bnxt_napi
*bnapi
)
7313 struct bnxt_rx_ring_info
*rxr
= bnapi
->rx_ring
;
7314 int i
= bnapi
->index
;
7319 netdev_info(bnapi
->bp
->dev
, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
7320 i
, rxr
->rx_ring_struct
.fw_ring_id
, rxr
->rx_prod
,
7321 rxr
->rx_agg_ring_struct
.fw_ring_id
, rxr
->rx_agg_prod
,
7322 rxr
->rx_sw_agg_prod
);
7325 static void bnxt_dump_cp_sw_state(struct bnxt_napi
*bnapi
)
7327 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
7328 int i
= bnapi
->index
;
7330 netdev_info(bnapi
->bp
->dev
, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
7331 i
, cpr
->cp_ring_struct
.fw_ring_id
, cpr
->cp_raw_cons
);
7334 static void bnxt_dbg_dump_states(struct bnxt
*bp
)
7337 struct bnxt_napi
*bnapi
;
7339 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
7340 bnapi
= bp
->bnapi
[i
];
7341 if (netif_msg_drv(bp
)) {
7342 bnxt_dump_tx_sw_state(bnapi
);
7343 bnxt_dump_rx_sw_state(bnapi
);
7344 bnxt_dump_cp_sw_state(bnapi
);
7349 static void bnxt_reset_task(struct bnxt
*bp
, bool silent
)
7352 bnxt_dbg_dump_states(bp
);
7353 if (netif_running(bp
->dev
)) {
7358 bnxt_close_nic(bp
, false, false);
7359 rc
= bnxt_open_nic(bp
, false, false);
7365 static void bnxt_tx_timeout(struct net_device
*dev
)
7367 struct bnxt
*bp
= netdev_priv(dev
);
7369 netdev_err(bp
->dev
, "TX timeout detected, starting reset task!\n");
7370 set_bit(BNXT_RESET_TASK_SP_EVENT
, &bp
->sp_event
);
7371 bnxt_queue_sp_work(bp
);
7374 #ifdef CONFIG_NET_POLL_CONTROLLER
7375 static void bnxt_poll_controller(struct net_device
*dev
)
7377 struct bnxt
*bp
= netdev_priv(dev
);
7380 /* Only process tx rings/combined rings in netpoll mode. */
7381 for (i
= 0; i
< bp
->tx_nr_rings
; i
++) {
7382 struct bnxt_tx_ring_info
*txr
= &bp
->tx_ring
[i
];
7384 napi_schedule(&txr
->bnapi
->napi
);
7389 static void bnxt_timer(struct timer_list
*t
)
7391 struct bnxt
*bp
= from_timer(bp
, t
, timer
);
7392 struct net_device
*dev
= bp
->dev
;
7394 if (!netif_running(dev
))
7397 if (atomic_read(&bp
->intr_sem
) != 0)
7398 goto bnxt_restart_timer
;
7400 if (bp
->link_info
.link_up
&& (bp
->flags
& BNXT_FLAG_PORT_STATS
) &&
7401 bp
->stats_coal_ticks
) {
7402 set_bit(BNXT_PERIODIC_STATS_SP_EVENT
, &bp
->sp_event
);
7403 bnxt_queue_sp_work(bp
);
7406 if (bnxt_tc_flower_enabled(bp
)) {
7407 set_bit(BNXT_FLOW_STATS_SP_EVENT
, &bp
->sp_event
);
7408 bnxt_queue_sp_work(bp
);
7411 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
7414 static void bnxt_rtnl_lock_sp(struct bnxt
*bp
)
7416 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
7417 * set. If the device is being closed, bnxt_close() may be holding
7418 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
7419 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
7421 clear_bit(BNXT_STATE_IN_SP_TASK
, &bp
->state
);
7425 static void bnxt_rtnl_unlock_sp(struct bnxt
*bp
)
7427 set_bit(BNXT_STATE_IN_SP_TASK
, &bp
->state
);
7431 /* Only called from bnxt_sp_task() */
7432 static void bnxt_reset(struct bnxt
*bp
, bool silent
)
7434 bnxt_rtnl_lock_sp(bp
);
7435 if (test_bit(BNXT_STATE_OPEN
, &bp
->state
))
7436 bnxt_reset_task(bp
, silent
);
7437 bnxt_rtnl_unlock_sp(bp
);
7440 static void bnxt_cfg_ntp_filters(struct bnxt
*);
7442 static void bnxt_sp_task(struct work_struct
*work
)
7444 struct bnxt
*bp
= container_of(work
, struct bnxt
, sp_task
);
7446 set_bit(BNXT_STATE_IN_SP_TASK
, &bp
->state
);
7447 smp_mb__after_atomic();
7448 if (!test_bit(BNXT_STATE_OPEN
, &bp
->state
)) {
7449 clear_bit(BNXT_STATE_IN_SP_TASK
, &bp
->state
);
7453 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT
, &bp
->sp_event
))
7454 bnxt_cfg_rx_mode(bp
);
7456 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT
, &bp
->sp_event
))
7457 bnxt_cfg_ntp_filters(bp
);
7458 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT
, &bp
->sp_event
))
7459 bnxt_hwrm_exec_fwd_req(bp
);
7460 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT
, &bp
->sp_event
)) {
7461 bnxt_hwrm_tunnel_dst_port_alloc(
7463 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN
);
7465 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT
, &bp
->sp_event
)) {
7466 bnxt_hwrm_tunnel_dst_port_free(
7467 bp
, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN
);
7469 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT
, &bp
->sp_event
)) {
7470 bnxt_hwrm_tunnel_dst_port_alloc(
7472 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE
);
7474 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT
, &bp
->sp_event
)) {
7475 bnxt_hwrm_tunnel_dst_port_free(
7476 bp
, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE
);
7478 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT
, &bp
->sp_event
))
7479 bnxt_hwrm_port_qstats(bp
);
7481 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT
, &bp
->sp_event
)) {
7484 mutex_lock(&bp
->link_lock
);
7485 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT
,
7487 bnxt_hwrm_phy_qcaps(bp
);
7489 rc
= bnxt_update_link(bp
, true);
7490 mutex_unlock(&bp
->link_lock
);
7492 netdev_err(bp
->dev
, "SP task can't update link (rc: %x)\n",
7495 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT
, &bp
->sp_event
)) {
7496 mutex_lock(&bp
->link_lock
);
7497 bnxt_get_port_module_status(bp
);
7498 mutex_unlock(&bp
->link_lock
);
7501 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT
, &bp
->sp_event
))
7502 bnxt_tc_flow_stats_work(bp
);
7504 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
7505 * must be the last functions to be called before exiting.
7507 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT
, &bp
->sp_event
))
7508 bnxt_reset(bp
, false);
7510 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT
, &bp
->sp_event
))
7511 bnxt_reset(bp
, true);
7513 smp_mb__before_atomic();
7514 clear_bit(BNXT_STATE_IN_SP_TASK
, &bp
->state
);
7517 /* Under rtnl_lock */
7518 int bnxt_check_rings(struct bnxt
*bp
, int tx
, int rx
, bool sh
, int tcs
,
7521 int max_rx
, max_tx
, tx_sets
= 1;
7522 int tx_rings_needed
;
7529 rc
= bnxt_get_max_rings(bp
, &max_rx
, &max_tx
, sh
);
7536 tx_rings_needed
= tx
* tx_sets
+ tx_xdp
;
7537 if (max_tx
< tx_rings_needed
)
7541 if (bp
->flags
& BNXT_FLAG_RFS
)
7544 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
7546 cp
= sh
? max_t(int, tx_rings_needed
, rx
) : tx_rings_needed
+ rx
;
7547 return bnxt_hwrm_check_rings(bp
, tx_rings_needed
, rx_rings
, rx
, cp
,
7551 static void bnxt_unmap_bars(struct bnxt
*bp
, struct pci_dev
*pdev
)
7554 pci_iounmap(pdev
, bp
->bar2
);
7559 pci_iounmap(pdev
, bp
->bar1
);
7564 pci_iounmap(pdev
, bp
->bar0
);
7569 static void bnxt_cleanup_pci(struct bnxt
*bp
)
7571 bnxt_unmap_bars(bp
, bp
->pdev
);
7572 pci_release_regions(bp
->pdev
);
7573 pci_disable_device(bp
->pdev
);
7576 static void bnxt_init_dflt_coal(struct bnxt
*bp
)
7578 struct bnxt_coal
*coal
;
7580 /* Tick values in micro seconds.
7581 * 1 coal_buf x bufs_per_record = 1 completion record.
7583 coal
= &bp
->rx_coal
;
7584 coal
->coal_ticks
= 14;
7585 coal
->coal_bufs
= 30;
7586 coal
->coal_ticks_irq
= 1;
7587 coal
->coal_bufs_irq
= 2;
7588 coal
->idle_thresh
= 25;
7589 coal
->bufs_per_record
= 2;
7590 coal
->budget
= 64; /* NAPI budget */
7592 coal
= &bp
->tx_coal
;
7593 coal
->coal_ticks
= 28;
7594 coal
->coal_bufs
= 30;
7595 coal
->coal_ticks_irq
= 2;
7596 coal
->coal_bufs_irq
= 2;
7597 coal
->bufs_per_record
= 1;
7599 bp
->stats_coal_ticks
= BNXT_DEF_STATS_COAL_TICKS
;
7602 static int bnxt_init_board(struct pci_dev
*pdev
, struct net_device
*dev
)
7605 struct bnxt
*bp
= netdev_priv(dev
);
7607 SET_NETDEV_DEV(dev
, &pdev
->dev
);
7609 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7610 rc
= pci_enable_device(pdev
);
7612 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
7616 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
7618 "Cannot find PCI device base address, aborting\n");
7620 goto init_err_disable
;
7623 rc
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
7625 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
7626 goto init_err_disable
;
7629 if (dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64)) != 0 &&
7630 dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32)) != 0) {
7631 dev_err(&pdev
->dev
, "System does not support DMA, aborting\n");
7632 goto init_err_disable
;
7635 pci_set_master(pdev
);
7640 bp
->bar0
= pci_ioremap_bar(pdev
, 0);
7642 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
7644 goto init_err_release
;
7647 bp
->bar1
= pci_ioremap_bar(pdev
, 2);
7649 dev_err(&pdev
->dev
, "Cannot map doorbell registers, aborting\n");
7651 goto init_err_release
;
7654 bp
->bar2
= pci_ioremap_bar(pdev
, 4);
7656 dev_err(&pdev
->dev
, "Cannot map bar4 registers, aborting\n");
7658 goto init_err_release
;
7661 pci_enable_pcie_error_reporting(pdev
);
7663 INIT_WORK(&bp
->sp_task
, bnxt_sp_task
);
7665 spin_lock_init(&bp
->ntp_fltr_lock
);
7667 bp
->rx_ring_size
= BNXT_DEFAULT_RX_RING_SIZE
;
7668 bp
->tx_ring_size
= BNXT_DEFAULT_TX_RING_SIZE
;
7670 bnxt_init_dflt_coal(bp
);
7672 timer_setup(&bp
->timer
, bnxt_timer
, 0);
7673 bp
->current_interval
= BNXT_TIMER_INTERVAL
;
7675 clear_bit(BNXT_STATE_OPEN
, &bp
->state
);
7679 bnxt_unmap_bars(bp
, pdev
);
7680 pci_release_regions(pdev
);
7683 pci_disable_device(pdev
);
7689 /* rtnl_lock held */
7690 static int bnxt_change_mac_addr(struct net_device
*dev
, void *p
)
7692 struct sockaddr
*addr
= p
;
7693 struct bnxt
*bp
= netdev_priv(dev
);
7696 if (!is_valid_ether_addr(addr
->sa_data
))
7697 return -EADDRNOTAVAIL
;
7699 if (ether_addr_equal(addr
->sa_data
, dev
->dev_addr
))
7702 rc
= bnxt_approve_mac(bp
, addr
->sa_data
);
7706 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
7707 if (netif_running(dev
)) {
7708 bnxt_close_nic(bp
, false, false);
7709 rc
= bnxt_open_nic(bp
, false, false);
7715 /* rtnl_lock held */
7716 static int bnxt_change_mtu(struct net_device
*dev
, int new_mtu
)
7718 struct bnxt
*bp
= netdev_priv(dev
);
7720 if (netif_running(dev
))
7721 bnxt_close_nic(bp
, false, false);
7724 bnxt_set_ring_params(bp
);
7726 if (netif_running(dev
))
7727 return bnxt_open_nic(bp
, false, false);
7732 int bnxt_setup_mq_tc(struct net_device
*dev
, u8 tc
)
7734 struct bnxt
*bp
= netdev_priv(dev
);
7738 if (tc
> bp
->max_tc
) {
7739 netdev_err(dev
, "Too many traffic classes requested: %d. Max supported is %d.\n",
7744 if (netdev_get_num_tc(dev
) == tc
)
7747 if (bp
->flags
& BNXT_FLAG_SHARED_RINGS
)
7750 rc
= bnxt_check_rings(bp
, bp
->tx_nr_rings_per_tc
, bp
->rx_nr_rings
,
7751 sh
, tc
, bp
->tx_nr_rings_xdp
);
7755 /* Needs to close the device and do hw resource re-allocations */
7756 if (netif_running(bp
->dev
))
7757 bnxt_close_nic(bp
, true, false);
7760 bp
->tx_nr_rings
= bp
->tx_nr_rings_per_tc
* tc
;
7761 netdev_set_num_tc(dev
, tc
);
7763 bp
->tx_nr_rings
= bp
->tx_nr_rings_per_tc
;
7764 netdev_reset_tc(dev
);
7766 bp
->tx_nr_rings
+= bp
->tx_nr_rings_xdp
;
7767 bp
->cp_nr_rings
= sh
? max_t(int, bp
->tx_nr_rings
, bp
->rx_nr_rings
) :
7768 bp
->tx_nr_rings
+ bp
->rx_nr_rings
;
7769 bp
->num_stat_ctxs
= bp
->cp_nr_rings
;
7771 if (netif_running(bp
->dev
))
7772 return bnxt_open_nic(bp
, true, false);
7777 static int bnxt_setup_tc_block_cb(enum tc_setup_type type
, void *type_data
,
7780 struct bnxt
*bp
= cb_priv
;
7782 if (!bnxt_tc_flower_enabled(bp
) ||
7783 !tc_cls_can_offload_and_chain0(bp
->dev
, type_data
))
7787 case TC_SETUP_CLSFLOWER
:
7788 return bnxt_tc_setup_flower(bp
, bp
->pf
.fw_fid
, type_data
);
7794 static int bnxt_setup_tc_block(struct net_device
*dev
,
7795 struct tc_block_offload
*f
)
7797 struct bnxt
*bp
= netdev_priv(dev
);
7799 if (f
->binder_type
!= TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS
)
7802 switch (f
->command
) {
7804 return tcf_block_cb_register(f
->block
, bnxt_setup_tc_block_cb
,
7806 case TC_BLOCK_UNBIND
:
7807 tcf_block_cb_unregister(f
->block
, bnxt_setup_tc_block_cb
, bp
);
7814 static int bnxt_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
7818 case TC_SETUP_BLOCK
:
7819 return bnxt_setup_tc_block(dev
, type_data
);
7820 case TC_SETUP_QDISC_MQPRIO
: {
7821 struct tc_mqprio_qopt
*mqprio
= type_data
;
7823 mqprio
->hw
= TC_MQPRIO_HW_OFFLOAD_TCS
;
7825 return bnxt_setup_mq_tc(dev
, mqprio
->num_tc
);
7832 #ifdef CONFIG_RFS_ACCEL
7833 static bool bnxt_fltr_match(struct bnxt_ntuple_filter
*f1
,
7834 struct bnxt_ntuple_filter
*f2
)
7836 struct flow_keys
*keys1
= &f1
->fkeys
;
7837 struct flow_keys
*keys2
= &f2
->fkeys
;
7839 if (keys1
->addrs
.v4addrs
.src
== keys2
->addrs
.v4addrs
.src
&&
7840 keys1
->addrs
.v4addrs
.dst
== keys2
->addrs
.v4addrs
.dst
&&
7841 keys1
->ports
.ports
== keys2
->ports
.ports
&&
7842 keys1
->basic
.ip_proto
== keys2
->basic
.ip_proto
&&
7843 keys1
->basic
.n_proto
== keys2
->basic
.n_proto
&&
7844 keys1
->control
.flags
== keys2
->control
.flags
&&
7845 ether_addr_equal(f1
->src_mac_addr
, f2
->src_mac_addr
) &&
7846 ether_addr_equal(f1
->dst_mac_addr
, f2
->dst_mac_addr
))
7852 static int bnxt_rx_flow_steer(struct net_device
*dev
, const struct sk_buff
*skb
,
7853 u16 rxq_index
, u32 flow_id
)
7855 struct bnxt
*bp
= netdev_priv(dev
);
7856 struct bnxt_ntuple_filter
*fltr
, *new_fltr
;
7857 struct flow_keys
*fkeys
;
7858 struct ethhdr
*eth
= (struct ethhdr
*)skb_mac_header(skb
);
7859 int rc
= 0, idx
, bit_id
, l2_idx
= 0;
7860 struct hlist_head
*head
;
7862 if (!ether_addr_equal(dev
->dev_addr
, eth
->h_dest
)) {
7863 struct bnxt_vnic_info
*vnic
= &bp
->vnic_info
[0];
7866 netif_addr_lock_bh(dev
);
7867 for (j
= 0; j
< vnic
->uc_filter_count
; j
++, off
+= ETH_ALEN
) {
7868 if (ether_addr_equal(eth
->h_dest
,
7869 vnic
->uc_list
+ off
)) {
7874 netif_addr_unlock_bh(dev
);
7878 new_fltr
= kzalloc(sizeof(*new_fltr
), GFP_ATOMIC
);
7882 fkeys
= &new_fltr
->fkeys
;
7883 if (!skb_flow_dissect_flow_keys(skb
, fkeys
, 0)) {
7884 rc
= -EPROTONOSUPPORT
;
7888 if ((fkeys
->basic
.n_proto
!= htons(ETH_P_IP
) &&
7889 fkeys
->basic
.n_proto
!= htons(ETH_P_IPV6
)) ||
7890 ((fkeys
->basic
.ip_proto
!= IPPROTO_TCP
) &&
7891 (fkeys
->basic
.ip_proto
!= IPPROTO_UDP
))) {
7892 rc
= -EPROTONOSUPPORT
;
7895 if (fkeys
->basic
.n_proto
== htons(ETH_P_IPV6
) &&
7896 bp
->hwrm_spec_code
< 0x10601) {
7897 rc
= -EPROTONOSUPPORT
;
7900 if ((fkeys
->control
.flags
& FLOW_DIS_ENCAPSULATION
) &&
7901 bp
->hwrm_spec_code
< 0x10601) {
7902 rc
= -EPROTONOSUPPORT
;
7906 memcpy(new_fltr
->dst_mac_addr
, eth
->h_dest
, ETH_ALEN
);
7907 memcpy(new_fltr
->src_mac_addr
, eth
->h_source
, ETH_ALEN
);
7909 idx
= skb_get_hash_raw(skb
) & BNXT_NTP_FLTR_HASH_MASK
;
7910 head
= &bp
->ntp_fltr_hash_tbl
[idx
];
7912 hlist_for_each_entry_rcu(fltr
, head
, hash
) {
7913 if (bnxt_fltr_match(fltr
, new_fltr
)) {
7921 spin_lock_bh(&bp
->ntp_fltr_lock
);
7922 bit_id
= bitmap_find_free_region(bp
->ntp_fltr_bmap
,
7923 BNXT_NTP_FLTR_MAX_FLTR
, 0);
7925 spin_unlock_bh(&bp
->ntp_fltr_lock
);
7930 new_fltr
->sw_id
= (u16
)bit_id
;
7931 new_fltr
->flow_id
= flow_id
;
7932 new_fltr
->l2_fltr_idx
= l2_idx
;
7933 new_fltr
->rxq
= rxq_index
;
7934 hlist_add_head_rcu(&new_fltr
->hash
, head
);
7935 bp
->ntp_fltr_count
++;
7936 spin_unlock_bh(&bp
->ntp_fltr_lock
);
7938 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT
, &bp
->sp_event
);
7939 bnxt_queue_sp_work(bp
);
7941 return new_fltr
->sw_id
;
7948 static void bnxt_cfg_ntp_filters(struct bnxt
*bp
)
7952 for (i
= 0; i
< BNXT_NTP_FLTR_HASH_SIZE
; i
++) {
7953 struct hlist_head
*head
;
7954 struct hlist_node
*tmp
;
7955 struct bnxt_ntuple_filter
*fltr
;
7958 head
= &bp
->ntp_fltr_hash_tbl
[i
];
7959 hlist_for_each_entry_safe(fltr
, tmp
, head
, hash
) {
7962 if (test_bit(BNXT_FLTR_VALID
, &fltr
->state
)) {
7963 if (rps_may_expire_flow(bp
->dev
, fltr
->rxq
,
7966 bnxt_hwrm_cfa_ntuple_filter_free(bp
,
7971 rc
= bnxt_hwrm_cfa_ntuple_filter_alloc(bp
,
7976 set_bit(BNXT_FLTR_VALID
, &fltr
->state
);
7980 spin_lock_bh(&bp
->ntp_fltr_lock
);
7981 hlist_del_rcu(&fltr
->hash
);
7982 bp
->ntp_fltr_count
--;
7983 spin_unlock_bh(&bp
->ntp_fltr_lock
);
7985 clear_bit(fltr
->sw_id
, bp
->ntp_fltr_bmap
);
7990 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT
, &bp
->sp_event
))
7991 netdev_info(bp
->dev
, "Receive PF driver unload event!");
7996 static void bnxt_cfg_ntp_filters(struct bnxt
*bp
)
8000 #endif /* CONFIG_RFS_ACCEL */
8002 static void bnxt_udp_tunnel_add(struct net_device
*dev
,
8003 struct udp_tunnel_info
*ti
)
8005 struct bnxt
*bp
= netdev_priv(dev
);
8007 if (ti
->sa_family
!= AF_INET6
&& ti
->sa_family
!= AF_INET
)
8010 if (!netif_running(dev
))
8014 case UDP_TUNNEL_TYPE_VXLAN
:
8015 if (bp
->vxlan_port_cnt
&& bp
->vxlan_port
!= ti
->port
)
8018 bp
->vxlan_port_cnt
++;
8019 if (bp
->vxlan_port_cnt
== 1) {
8020 bp
->vxlan_port
= ti
->port
;
8021 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT
, &bp
->sp_event
);
8022 bnxt_queue_sp_work(bp
);
8025 case UDP_TUNNEL_TYPE_GENEVE
:
8026 if (bp
->nge_port_cnt
&& bp
->nge_port
!= ti
->port
)
8030 if (bp
->nge_port_cnt
== 1) {
8031 bp
->nge_port
= ti
->port
;
8032 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT
, &bp
->sp_event
);
8039 bnxt_queue_sp_work(bp
);
8042 static void bnxt_udp_tunnel_del(struct net_device
*dev
,
8043 struct udp_tunnel_info
*ti
)
8045 struct bnxt
*bp
= netdev_priv(dev
);
8047 if (ti
->sa_family
!= AF_INET6
&& ti
->sa_family
!= AF_INET
)
8050 if (!netif_running(dev
))
8054 case UDP_TUNNEL_TYPE_VXLAN
:
8055 if (!bp
->vxlan_port_cnt
|| bp
->vxlan_port
!= ti
->port
)
8057 bp
->vxlan_port_cnt
--;
8059 if (bp
->vxlan_port_cnt
!= 0)
8062 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT
, &bp
->sp_event
);
8064 case UDP_TUNNEL_TYPE_GENEVE
:
8065 if (!bp
->nge_port_cnt
|| bp
->nge_port
!= ti
->port
)
8069 if (bp
->nge_port_cnt
!= 0)
8072 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT
, &bp
->sp_event
);
8078 bnxt_queue_sp_work(bp
);
8081 static int bnxt_bridge_getlink(struct sk_buff
*skb
, u32 pid
, u32 seq
,
8082 struct net_device
*dev
, u32 filter_mask
,
8085 struct bnxt
*bp
= netdev_priv(dev
);
8087 return ndo_dflt_bridge_getlink(skb
, pid
, seq
, dev
, bp
->br_mode
, 0, 0,
8088 nlflags
, filter_mask
, NULL
);
8091 static int bnxt_bridge_setlink(struct net_device
*dev
, struct nlmsghdr
*nlh
,
8094 struct bnxt
*bp
= netdev_priv(dev
);
8095 struct nlattr
*attr
, *br_spec
;
8098 if (bp
->hwrm_spec_code
< 0x10708 || !BNXT_SINGLE_PF(bp
))
8101 br_spec
= nlmsg_find_attr(nlh
, sizeof(struct ifinfomsg
), IFLA_AF_SPEC
);
8105 nla_for_each_nested(attr
, br_spec
, rem
) {
8108 if (nla_type(attr
) != IFLA_BRIDGE_MODE
)
8111 if (nla_len(attr
) < sizeof(mode
))
8114 mode
= nla_get_u16(attr
);
8115 if (mode
== bp
->br_mode
)
8118 rc
= bnxt_hwrm_set_br_mode(bp
, mode
);
8126 static int bnxt_get_phys_port_name(struct net_device
*dev
, char *buf
,
8129 struct bnxt
*bp
= netdev_priv(dev
);
8132 /* The PF and it's VF-reps only support the switchdev framework */
8136 rc
= snprintf(buf
, len
, "p%d", bp
->pf
.port_id
);
8143 int bnxt_port_attr_get(struct bnxt
*bp
, struct switchdev_attr
*attr
)
8145 if (bp
->eswitch_mode
!= DEVLINK_ESWITCH_MODE_SWITCHDEV
)
8148 /* The PF and it's VF-reps only support the switchdev framework */
8153 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID
:
8154 attr
->u
.ppid
.id_len
= sizeof(bp
->switch_id
);
8155 memcpy(attr
->u
.ppid
.id
, bp
->switch_id
, attr
->u
.ppid
.id_len
);
8163 static int bnxt_swdev_port_attr_get(struct net_device
*dev
,
8164 struct switchdev_attr
*attr
)
8166 return bnxt_port_attr_get(netdev_priv(dev
), attr
);
8169 static const struct switchdev_ops bnxt_switchdev_ops
= {
8170 .switchdev_port_attr_get
= bnxt_swdev_port_attr_get
8173 static const struct net_device_ops bnxt_netdev_ops
= {
8174 .ndo_open
= bnxt_open
,
8175 .ndo_start_xmit
= bnxt_start_xmit
,
8176 .ndo_stop
= bnxt_close
,
8177 .ndo_get_stats64
= bnxt_get_stats64
,
8178 .ndo_set_rx_mode
= bnxt_set_rx_mode
,
8179 .ndo_do_ioctl
= bnxt_ioctl
,
8180 .ndo_validate_addr
= eth_validate_addr
,
8181 .ndo_set_mac_address
= bnxt_change_mac_addr
,
8182 .ndo_change_mtu
= bnxt_change_mtu
,
8183 .ndo_fix_features
= bnxt_fix_features
,
8184 .ndo_set_features
= bnxt_set_features
,
8185 .ndo_tx_timeout
= bnxt_tx_timeout
,
8186 #ifdef CONFIG_BNXT_SRIOV
8187 .ndo_get_vf_config
= bnxt_get_vf_config
,
8188 .ndo_set_vf_mac
= bnxt_set_vf_mac
,
8189 .ndo_set_vf_vlan
= bnxt_set_vf_vlan
,
8190 .ndo_set_vf_rate
= bnxt_set_vf_bw
,
8191 .ndo_set_vf_link_state
= bnxt_set_vf_link_state
,
8192 .ndo_set_vf_spoofchk
= bnxt_set_vf_spoofchk
,
8194 #ifdef CONFIG_NET_POLL_CONTROLLER
8195 .ndo_poll_controller
= bnxt_poll_controller
,
8197 .ndo_setup_tc
= bnxt_setup_tc
,
8198 #ifdef CONFIG_RFS_ACCEL
8199 .ndo_rx_flow_steer
= bnxt_rx_flow_steer
,
8201 .ndo_udp_tunnel_add
= bnxt_udp_tunnel_add
,
8202 .ndo_udp_tunnel_del
= bnxt_udp_tunnel_del
,
8203 .ndo_bpf
= bnxt_xdp
,
8204 .ndo_bridge_getlink
= bnxt_bridge_getlink
,
8205 .ndo_bridge_setlink
= bnxt_bridge_setlink
,
8206 .ndo_get_phys_port_name
= bnxt_get_phys_port_name
8209 static void bnxt_remove_one(struct pci_dev
*pdev
)
8211 struct net_device
*dev
= pci_get_drvdata(pdev
);
8212 struct bnxt
*bp
= netdev_priv(dev
);
8215 bnxt_sriov_disable(bp
);
8216 bnxt_dl_unregister(bp
);
8219 pci_disable_pcie_error_reporting(pdev
);
8220 unregister_netdev(dev
);
8221 bnxt_shutdown_tc(bp
);
8222 bnxt_cancel_sp_work(bp
);
8225 bnxt_clear_int_mode(bp
);
8226 bnxt_hwrm_func_drv_unrgtr(bp
);
8227 bnxt_free_hwrm_resources(bp
);
8228 bnxt_free_hwrm_short_cmd_req(bp
);
8229 bnxt_ethtool_free(bp
);
8233 bnxt_cleanup_pci(bp
);
8237 static int bnxt_probe_phy(struct bnxt
*bp
)
8240 struct bnxt_link_info
*link_info
= &bp
->link_info
;
8242 rc
= bnxt_hwrm_phy_qcaps(bp
);
8244 netdev_err(bp
->dev
, "Probe phy can't get phy capabilities (rc: %x)\n",
8248 mutex_init(&bp
->link_lock
);
8250 rc
= bnxt_update_link(bp
, false);
8252 netdev_err(bp
->dev
, "Probe phy can't update link (rc: %x)\n",
8257 /* Older firmware does not have supported_auto_speeds, so assume
8258 * that all supported speeds can be autonegotiated.
8260 if (link_info
->auto_link_speeds
&& !link_info
->support_auto_speeds
)
8261 link_info
->support_auto_speeds
= link_info
->support_speeds
;
8263 /*initialize the ethool setting copy with NVM settings */
8264 if (BNXT_AUTO_MODE(link_info
->auto_mode
)) {
8265 link_info
->autoneg
= BNXT_AUTONEG_SPEED
;
8266 if (bp
->hwrm_spec_code
>= 0x10201) {
8267 if (link_info
->auto_pause_setting
&
8268 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE
)
8269 link_info
->autoneg
|= BNXT_AUTONEG_FLOW_CTRL
;
8271 link_info
->autoneg
|= BNXT_AUTONEG_FLOW_CTRL
;
8273 link_info
->advertising
= link_info
->auto_link_speeds
;
8275 link_info
->req_link_speed
= link_info
->force_link_speed
;
8276 link_info
->req_duplex
= link_info
->duplex_setting
;
8278 if (link_info
->autoneg
& BNXT_AUTONEG_FLOW_CTRL
)
8279 link_info
->req_flow_ctrl
=
8280 link_info
->auto_pause_setting
& BNXT_LINK_PAUSE_BOTH
;
8282 link_info
->req_flow_ctrl
= link_info
->force_pause_setting
;
8286 static int bnxt_get_max_irq(struct pci_dev
*pdev
)
8290 if (!pdev
->msix_cap
)
8293 pci_read_config_word(pdev
, pdev
->msix_cap
+ PCI_MSIX_FLAGS
, &ctrl
);
8294 return (ctrl
& PCI_MSIX_FLAGS_QSIZE
) + 1;
8297 static void _bnxt_get_max_rings(struct bnxt
*bp
, int *max_rx
, int *max_tx
,
8300 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
8301 int max_ring_grps
= 0;
8303 *max_tx
= hw_resc
->max_tx_rings
;
8304 *max_rx
= hw_resc
->max_rx_rings
;
8305 *max_cp
= min_t(int, hw_resc
->max_irqs
, hw_resc
->max_cp_rings
);
8306 *max_cp
= min_t(int, *max_cp
, hw_resc
->max_stat_ctxs
);
8307 max_ring_grps
= hw_resc
->max_hw_ring_grps
;
8308 if (BNXT_CHIP_TYPE_NITRO_A0(bp
) && BNXT_PF(bp
)) {
8312 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
)
8314 *max_rx
= min_t(int, *max_rx
, max_ring_grps
);
8317 int bnxt_get_max_rings(struct bnxt
*bp
, int *max_rx
, int *max_tx
, bool shared
)
8321 _bnxt_get_max_rings(bp
, &rx
, &tx
, &cp
);
8322 if (!rx
|| !tx
|| !cp
)
8327 return bnxt_trim_rings(bp
, max_rx
, max_tx
, cp
, shared
);
8330 static int bnxt_get_dflt_rings(struct bnxt
*bp
, int *max_rx
, int *max_tx
,
8335 rc
= bnxt_get_max_rings(bp
, max_rx
, max_tx
, shared
);
8336 if (rc
&& (bp
->flags
& BNXT_FLAG_AGG_RINGS
)) {
8337 /* Not enough rings, try disabling agg rings. */
8338 bp
->flags
&= ~BNXT_FLAG_AGG_RINGS
;
8339 rc
= bnxt_get_max_rings(bp
, max_rx
, max_tx
, shared
);
8342 bp
->flags
|= BNXT_FLAG_NO_AGG_RINGS
;
8343 bp
->dev
->hw_features
&= ~(NETIF_F_LRO
| NETIF_F_GRO_HW
);
8344 bp
->dev
->features
&= ~(NETIF_F_LRO
| NETIF_F_GRO_HW
);
8345 bnxt_set_ring_params(bp
);
8348 if (bp
->flags
& BNXT_FLAG_ROCE_CAP
) {
8349 int max_cp
, max_stat
, max_irq
;
8351 /* Reserve minimum resources for RoCE */
8352 max_cp
= bnxt_get_max_func_cp_rings(bp
);
8353 max_stat
= bnxt_get_max_func_stat_ctxs(bp
);
8354 max_irq
= bnxt_get_max_func_irqs(bp
);
8355 if (max_cp
<= BNXT_MIN_ROCE_CP_RINGS
||
8356 max_irq
<= BNXT_MIN_ROCE_CP_RINGS
||
8357 max_stat
<= BNXT_MIN_ROCE_STAT_CTXS
)
8360 max_cp
-= BNXT_MIN_ROCE_CP_RINGS
;
8361 max_irq
-= BNXT_MIN_ROCE_CP_RINGS
;
8362 max_stat
-= BNXT_MIN_ROCE_STAT_CTXS
;
8363 max_cp
= min_t(int, max_cp
, max_irq
);
8364 max_cp
= min_t(int, max_cp
, max_stat
);
8365 rc
= bnxt_trim_rings(bp
, max_rx
, max_tx
, max_cp
, shared
);
8372 /* In initial default shared ring setting, each shared ring must have a
8375 static void bnxt_trim_dflt_sh_rings(struct bnxt
*bp
)
8377 bp
->cp_nr_rings
= min_t(int, bp
->tx_nr_rings_per_tc
, bp
->rx_nr_rings
);
8378 bp
->rx_nr_rings
= bp
->cp_nr_rings
;
8379 bp
->tx_nr_rings_per_tc
= bp
->cp_nr_rings
;
8380 bp
->tx_nr_rings
= bp
->tx_nr_rings_per_tc
;
8383 static int bnxt_set_dflt_rings(struct bnxt
*bp
, bool sh
)
8385 int dflt_rings
, max_rx_rings
, max_tx_rings
, rc
;
8388 bp
->flags
|= BNXT_FLAG_SHARED_RINGS
;
8389 dflt_rings
= netif_get_num_default_rss_queues();
8390 /* Reduce default rings to reduce memory usage on multi-port cards */
8391 if (bp
->port_count
> 1)
8392 dflt_rings
= min_t(int, dflt_rings
, 4);
8393 rc
= bnxt_get_dflt_rings(bp
, &max_rx_rings
, &max_tx_rings
, sh
);
8396 bp
->rx_nr_rings
= min_t(int, dflt_rings
, max_rx_rings
);
8397 bp
->tx_nr_rings_per_tc
= min_t(int, dflt_rings
, max_tx_rings
);
8399 bnxt_trim_dflt_sh_rings(bp
);
8401 bp
->cp_nr_rings
= bp
->tx_nr_rings_per_tc
+ bp
->rx_nr_rings
;
8402 bp
->tx_nr_rings
= bp
->tx_nr_rings_per_tc
;
8404 rc
= __bnxt_reserve_rings(bp
);
8406 netdev_warn(bp
->dev
, "Unable to reserve tx rings\n");
8407 bp
->tx_nr_rings_per_tc
= bp
->tx_nr_rings
;
8409 bnxt_trim_dflt_sh_rings(bp
);
8411 /* Rings may have been trimmed, re-reserve the trimmed rings. */
8412 if (bnxt_need_reserve_rings(bp
)) {
8413 rc
= __bnxt_reserve_rings(bp
);
8415 netdev_warn(bp
->dev
, "2nd rings reservation failed.\n");
8416 bp
->tx_nr_rings_per_tc
= bp
->tx_nr_rings
;
8418 bp
->num_stat_ctxs
= bp
->cp_nr_rings
;
8419 if (BNXT_CHIP_TYPE_NITRO_A0(bp
)) {
8426 int bnxt_restore_pf_fw_resources(struct bnxt
*bp
)
8431 if (bnxt_ulp_registered(bp
->edev
, BNXT_ROCE_ULP
))
8434 bnxt_hwrm_func_qcaps(bp
);
8435 __bnxt_close_nic(bp
, true, false);
8436 bnxt_clear_int_mode(bp
);
8437 rc
= bnxt_init_int_mode(bp
);
8441 rc
= bnxt_open_nic(bp
, true, false);
8445 static int bnxt_init_mac_addr(struct bnxt
*bp
)
8450 memcpy(bp
->dev
->dev_addr
, bp
->pf
.mac_addr
, ETH_ALEN
);
8452 #ifdef CONFIG_BNXT_SRIOV
8453 struct bnxt_vf_info
*vf
= &bp
->vf
;
8455 if (is_valid_ether_addr(vf
->mac_addr
)) {
8456 /* overwrite netdev dev_addr with admin VF MAC */
8457 memcpy(bp
->dev
->dev_addr
, vf
->mac_addr
, ETH_ALEN
);
8459 eth_hw_addr_random(bp
->dev
);
8460 rc
= bnxt_approve_mac(bp
, bp
->dev
->dev_addr
);
8467 static void bnxt_parse_log_pcie_link(struct bnxt
*bp
)
8469 enum pcie_link_width width
= PCIE_LNK_WIDTH_UNKNOWN
;
8470 enum pci_bus_speed speed
= PCI_SPEED_UNKNOWN
;
8472 if (pcie_get_minimum_link(pci_physfn(bp
->pdev
), &speed
, &width
) ||
8473 speed
== PCI_SPEED_UNKNOWN
|| width
== PCIE_LNK_WIDTH_UNKNOWN
)
8474 netdev_info(bp
->dev
, "Failed to determine PCIe Link Info\n");
8476 netdev_info(bp
->dev
, "PCIe: Speed %s Width x%d\n",
8477 speed
== PCIE_SPEED_2_5GT
? "2.5GT/s" :
8478 speed
== PCIE_SPEED_5_0GT
? "5.0GT/s" :
8479 speed
== PCIE_SPEED_8_0GT
? "8.0GT/s" :
8483 static int bnxt_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
8485 static int version_printed
;
8486 struct net_device
*dev
;
8490 if (pci_is_bridge(pdev
))
8493 if (version_printed
++ == 0)
8494 pr_info("%s", version
);
8496 max_irqs
= bnxt_get_max_irq(pdev
);
8497 dev
= alloc_etherdev_mq(sizeof(*bp
), max_irqs
);
8501 bp
= netdev_priv(dev
);
8503 if (bnxt_vf_pciid(ent
->driver_data
))
8504 bp
->flags
|= BNXT_FLAG_VF
;
8507 bp
->flags
|= BNXT_FLAG_MSIX_CAP
;
8509 rc
= bnxt_init_board(pdev
, dev
);
8513 dev
->netdev_ops
= &bnxt_netdev_ops
;
8514 dev
->watchdog_timeo
= BNXT_TX_TIMEOUT
;
8515 dev
->ethtool_ops
= &bnxt_ethtool_ops
;
8516 SWITCHDEV_SET_OPS(dev
, &bnxt_switchdev_ops
);
8517 pci_set_drvdata(pdev
, dev
);
8519 rc
= bnxt_alloc_hwrm_resources(bp
);
8521 goto init_err_pci_clean
;
8523 mutex_init(&bp
->hwrm_cmd_lock
);
8524 rc
= bnxt_hwrm_ver_get(bp
);
8526 goto init_err_pci_clean
;
8528 if (bp
->flags
& BNXT_FLAG_SHORT_CMD
) {
8529 rc
= bnxt_alloc_hwrm_short_cmd_req(bp
);
8531 goto init_err_pci_clean
;
8534 rc
= bnxt_hwrm_func_reset(bp
);
8536 goto init_err_pci_clean
;
8538 bnxt_hwrm_fw_set_time(bp
);
8540 dev
->hw_features
= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_SG
|
8541 NETIF_F_TSO
| NETIF_F_TSO6
|
8542 NETIF_F_GSO_UDP_TUNNEL
| NETIF_F_GSO_GRE
|
8543 NETIF_F_GSO_IPXIP4
|
8544 NETIF_F_GSO_UDP_TUNNEL_CSUM
| NETIF_F_GSO_GRE_CSUM
|
8545 NETIF_F_GSO_PARTIAL
| NETIF_F_RXHASH
|
8546 NETIF_F_RXCSUM
| NETIF_F_GRO
;
8548 if (!BNXT_CHIP_TYPE_NITRO_A0(bp
))
8549 dev
->hw_features
|= NETIF_F_LRO
;
8551 dev
->hw_enc_features
=
8552 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_SG
|
8553 NETIF_F_TSO
| NETIF_F_TSO6
|
8554 NETIF_F_GSO_UDP_TUNNEL
| NETIF_F_GSO_GRE
|
8555 NETIF_F_GSO_UDP_TUNNEL_CSUM
| NETIF_F_GSO_GRE_CSUM
|
8556 NETIF_F_GSO_IPXIP4
| NETIF_F_GSO_PARTIAL
;
8557 dev
->gso_partial_features
= NETIF_F_GSO_UDP_TUNNEL_CSUM
|
8558 NETIF_F_GSO_GRE_CSUM
;
8559 dev
->vlan_features
= dev
->hw_features
| NETIF_F_HIGHDMA
;
8560 dev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_HW_VLAN_CTAG_TX
|
8561 NETIF_F_HW_VLAN_STAG_RX
| NETIF_F_HW_VLAN_STAG_TX
;
8562 if (!BNXT_CHIP_TYPE_NITRO_A0(bp
))
8563 dev
->hw_features
|= NETIF_F_GRO_HW
;
8564 dev
->features
|= dev
->hw_features
| NETIF_F_HIGHDMA
;
8565 if (dev
->features
& NETIF_F_GRO_HW
)
8566 dev
->features
&= ~NETIF_F_LRO
;
8567 dev
->priv_flags
|= IFF_UNICAST_FLT
;
8569 #ifdef CONFIG_BNXT_SRIOV
8570 init_waitqueue_head(&bp
->sriov_cfg_wait
);
8571 mutex_init(&bp
->sriov_lock
);
8573 bp
->gro_func
= bnxt_gro_func_5730x
;
8574 if (BNXT_CHIP_P4_PLUS(bp
))
8575 bp
->gro_func
= bnxt_gro_func_5731x
;
8577 bp
->flags
|= BNXT_FLAG_DOUBLE_DB
;
8579 rc
= bnxt_hwrm_func_drv_rgtr(bp
);
8581 goto init_err_pci_clean
;
8583 rc
= bnxt_hwrm_func_rgtr_async_events(bp
, NULL
, 0);
8585 goto init_err_pci_clean
;
8587 bp
->ulp_probe
= bnxt_ulp_probe
;
8589 /* Get the MAX capabilities for this function */
8590 rc
= bnxt_hwrm_func_qcaps(bp
);
8592 netdev_err(bp
->dev
, "hwrm query capability failure rc: %x\n",
8595 goto init_err_pci_clean
;
8597 rc
= bnxt_init_mac_addr(bp
);
8599 dev_err(&pdev
->dev
, "Unable to initialize mac address.\n");
8600 rc
= -EADDRNOTAVAIL
;
8601 goto init_err_pci_clean
;
8603 rc
= bnxt_hwrm_queue_qportcfg(bp
);
8605 netdev_err(bp
->dev
, "hwrm query qportcfg failure rc: %x\n",
8608 goto init_err_pci_clean
;
8611 bnxt_hwrm_func_qcfg(bp
);
8612 bnxt_hwrm_port_led_qcaps(bp
);
8613 bnxt_ethtool_init(bp
);
8616 /* MTU range: 60 - FW defined max */
8617 dev
->min_mtu
= ETH_ZLEN
;
8618 dev
->max_mtu
= bp
->max_mtu
;
8620 rc
= bnxt_probe_phy(bp
);
8622 goto init_err_pci_clean
;
8624 bnxt_set_rx_skb_mode(bp
, false);
8625 bnxt_set_tpa_flags(bp
);
8626 bnxt_set_ring_params(bp
);
8627 bnxt_set_max_func_irqs(bp
, max_irqs
);
8628 rc
= bnxt_set_dflt_rings(bp
, true);
8630 netdev_err(bp
->dev
, "Not enough rings available.\n");
8632 goto init_err_pci_clean
;
8635 /* Default RSS hash cfg. */
8636 bp
->rss_hash_cfg
= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4
|
8637 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4
|
8638 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6
|
8639 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6
;
8640 if (BNXT_CHIP_P4_PLUS(bp
) && bp
->hwrm_spec_code
>= 0x10501) {
8641 bp
->flags
|= BNXT_FLAG_UDP_RSS_CAP
;
8642 bp
->rss_hash_cfg
|= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4
|
8643 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6
;
8646 bnxt_hwrm_vnic_qcaps(bp
);
8647 if (bnxt_rfs_supported(bp
)) {
8648 dev
->hw_features
|= NETIF_F_NTUPLE
;
8649 if (bnxt_rfs_capable(bp
)) {
8650 bp
->flags
|= BNXT_FLAG_RFS
;
8651 dev
->features
|= NETIF_F_NTUPLE
;
8655 if (dev
->hw_features
& NETIF_F_HW_VLAN_CTAG_RX
)
8656 bp
->flags
|= BNXT_FLAG_STRIP_VLAN
;
8658 rc
= bnxt_init_int_mode(bp
);
8660 goto init_err_pci_clean
;
8662 /* No TC has been set yet and rings may have been trimmed due to
8663 * limited MSIX, so we re-initialize the TX rings per TC.
8665 bp
->tx_nr_rings_per_tc
= bp
->tx_nr_rings
;
8667 bnxt_get_wol_settings(bp
);
8668 if (bp
->flags
& BNXT_FLAG_WOL_CAP
)
8669 device_set_wakeup_enable(&pdev
->dev
, bp
->wol
);
8671 device_set_wakeup_capable(&pdev
->dev
, false);
8673 bnxt_hwrm_set_cache_line_size(bp
, cache_line_size());
8678 create_singlethread_workqueue("bnxt_pf_wq");
8680 dev_err(&pdev
->dev
, "Unable to create workqueue.\n");
8681 goto init_err_pci_clean
;
8687 rc
= register_netdev(dev
);
8689 goto init_err_cleanup_tc
;
8692 bnxt_dl_register(bp
);
8694 netdev_info(dev
, "%s found at mem %lx, node addr %pM\n",
8695 board_info
[ent
->driver_data
].name
,
8696 (long)pci_resource_start(pdev
, 0), dev
->dev_addr
);
8698 bnxt_parse_log_pcie_link(bp
);
8702 init_err_cleanup_tc
:
8703 bnxt_shutdown_tc(bp
);
8704 bnxt_clear_int_mode(bp
);
8707 bnxt_cleanup_pci(bp
);
8714 static void bnxt_shutdown(struct pci_dev
*pdev
)
8716 struct net_device
*dev
= pci_get_drvdata(pdev
);
8723 bp
= netdev_priv(dev
);
8727 if (netif_running(dev
))
8730 bnxt_ulp_shutdown(bp
);
8732 if (system_state
== SYSTEM_POWER_OFF
) {
8733 bnxt_clear_int_mode(bp
);
8734 pci_wake_from_d3(pdev
, bp
->wol
);
8735 pci_set_power_state(pdev
, PCI_D3hot
);
8742 #ifdef CONFIG_PM_SLEEP
8743 static int bnxt_suspend(struct device
*device
)
8745 struct pci_dev
*pdev
= to_pci_dev(device
);
8746 struct net_device
*dev
= pci_get_drvdata(pdev
);
8747 struct bnxt
*bp
= netdev_priv(dev
);
8751 if (netif_running(dev
)) {
8752 netif_device_detach(dev
);
8753 rc
= bnxt_close(dev
);
8755 bnxt_hwrm_func_drv_unrgtr(bp
);
8760 static int bnxt_resume(struct device
*device
)
8762 struct pci_dev
*pdev
= to_pci_dev(device
);
8763 struct net_device
*dev
= pci_get_drvdata(pdev
);
8764 struct bnxt
*bp
= netdev_priv(dev
);
8768 if (bnxt_hwrm_ver_get(bp
) || bnxt_hwrm_func_drv_rgtr(bp
)) {
8772 rc
= bnxt_hwrm_func_reset(bp
);
8777 bnxt_get_wol_settings(bp
);
8778 if (netif_running(dev
)) {
8779 rc
= bnxt_open(dev
);
8781 netif_device_attach(dev
);
8789 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops
, bnxt_suspend
, bnxt_resume
);
8790 #define BNXT_PM_OPS (&bnxt_pm_ops)
8794 #define BNXT_PM_OPS NULL
8796 #endif /* CONFIG_PM_SLEEP */
8799 * bnxt_io_error_detected - called when PCI error is detected
8800 * @pdev: Pointer to PCI device
8801 * @state: The current pci connection state
8803 * This function is called after a PCI bus error affecting
8804 * this device has been detected.
8806 static pci_ers_result_t
bnxt_io_error_detected(struct pci_dev
*pdev
,
8807 pci_channel_state_t state
)
8809 struct net_device
*netdev
= pci_get_drvdata(pdev
);
8810 struct bnxt
*bp
= netdev_priv(netdev
);
8812 netdev_info(netdev
, "PCI I/O error detected\n");
8815 netif_device_detach(netdev
);
8819 if (state
== pci_channel_io_perm_failure
) {
8821 return PCI_ERS_RESULT_DISCONNECT
;
8824 if (netif_running(netdev
))
8827 pci_disable_device(pdev
);
8830 /* Request a slot slot reset. */
8831 return PCI_ERS_RESULT_NEED_RESET
;
8835 * bnxt_io_slot_reset - called after the pci bus has been reset.
8836 * @pdev: Pointer to PCI device
8838 * Restart the card from scratch, as if from a cold-boot.
8839 * At this point, the card has exprienced a hard reset,
8840 * followed by fixups by BIOS, and has its config space
8841 * set up identically to what it was at cold boot.
8843 static pci_ers_result_t
bnxt_io_slot_reset(struct pci_dev
*pdev
)
8845 struct net_device
*netdev
= pci_get_drvdata(pdev
);
8846 struct bnxt
*bp
= netdev_priv(netdev
);
8848 pci_ers_result_t result
= PCI_ERS_RESULT_DISCONNECT
;
8850 netdev_info(bp
->dev
, "PCI Slot Reset\n");
8854 if (pci_enable_device(pdev
)) {
8856 "Cannot re-enable PCI device after reset.\n");
8858 pci_set_master(pdev
);
8860 err
= bnxt_hwrm_func_reset(bp
);
8861 if (!err
&& netif_running(netdev
))
8862 err
= bnxt_open(netdev
);
8865 result
= PCI_ERS_RESULT_RECOVERED
;
8870 if (result
!= PCI_ERS_RESULT_RECOVERED
&& netif_running(netdev
))
8875 err
= pci_cleanup_aer_uncorrect_error_status(pdev
);
8878 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8879 err
); /* non-fatal, continue */
8882 return PCI_ERS_RESULT_RECOVERED
;
8886 * bnxt_io_resume - called when traffic can start flowing again.
8887 * @pdev: Pointer to PCI device
8889 * This callback is called when the error recovery driver tells
8890 * us that its OK to resume normal operation.
8892 static void bnxt_io_resume(struct pci_dev
*pdev
)
8894 struct net_device
*netdev
= pci_get_drvdata(pdev
);
8898 netif_device_attach(netdev
);
8903 static const struct pci_error_handlers bnxt_err_handler
= {
8904 .error_detected
= bnxt_io_error_detected
,
8905 .slot_reset
= bnxt_io_slot_reset
,
8906 .resume
= bnxt_io_resume
8909 static struct pci_driver bnxt_pci_driver
= {
8910 .name
= DRV_MODULE_NAME
,
8911 .id_table
= bnxt_pci_tbl
,
8912 .probe
= bnxt_init_one
,
8913 .remove
= bnxt_remove_one
,
8914 .shutdown
= bnxt_shutdown
,
8915 .driver
.pm
= BNXT_PM_OPS
,
8916 .err_handler
= &bnxt_err_handler
,
8917 #if defined(CONFIG_BNXT_SRIOV)
8918 .sriov_configure
= bnxt_sriov_configure
,
8922 static int __init
bnxt_init(void)
8924 return pci_register_driver(&bnxt_pci_driver
);
8927 static void __exit
bnxt_exit(void)
8929 pci_unregister_driver(&bnxt_pci_driver
);
8931 destroy_workqueue(bnxt_pf_wq
);
8934 module_init(bnxt_init
);
8935 module_exit(bnxt_exit
);