]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/ethernet/broadcom/bnxt/bnxt.c
bnxt_en: Add support to invoke OP-TEE API to reset firmware
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2019 Broadcom Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/ip.h>
41 #include <net/tcp.h>
42 #include <net/udp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <net/udp_tunnel.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <net/page_pool.h>
58
59 #include "bnxt_hsi.h"
60 #include "bnxt.h"
61 #include "bnxt_ulp.h"
62 #include "bnxt_sriov.h"
63 #include "bnxt_ethtool.h"
64 #include "bnxt_dcb.h"
65 #include "bnxt_xdp.h"
66 #include "bnxt_vfr.h"
67 #include "bnxt_tc.h"
68 #include "bnxt_devlink.h"
69 #include "bnxt_debugfs.h"
70
71 #define BNXT_TX_TIMEOUT (5 * HZ)
72
73 static const char version[] =
74 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
75
76 MODULE_LICENSE("GPL");
77 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
78 MODULE_VERSION(DRV_MODULE_VERSION);
79
80 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
81 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
82 #define BNXT_RX_COPY_THRESH 256
83
84 #define BNXT_TX_PUSH_THRESH 164
85
86 enum board_idx {
87 BCM57301,
88 BCM57302,
89 BCM57304,
90 BCM57417_NPAR,
91 BCM58700,
92 BCM57311,
93 BCM57312,
94 BCM57402,
95 BCM57404,
96 BCM57406,
97 BCM57402_NPAR,
98 BCM57407,
99 BCM57412,
100 BCM57414,
101 BCM57416,
102 BCM57417,
103 BCM57412_NPAR,
104 BCM57314,
105 BCM57417_SFP,
106 BCM57416_SFP,
107 BCM57404_NPAR,
108 BCM57406_NPAR,
109 BCM57407_SFP,
110 BCM57407_NPAR,
111 BCM57414_NPAR,
112 BCM57416_NPAR,
113 BCM57452,
114 BCM57454,
115 BCM5745x_NPAR,
116 BCM57508,
117 BCM57504,
118 BCM57502,
119 BCM57508_NPAR,
120 BCM57504_NPAR,
121 BCM57502_NPAR,
122 BCM58802,
123 BCM58804,
124 BCM58808,
125 NETXTREME_E_VF,
126 NETXTREME_C_VF,
127 NETXTREME_S_VF,
128 NETXTREME_E_P5_VF,
129 };
130
131 /* indexed by enum above */
132 static const struct {
133 char *name;
134 } board_info[] = {
135 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
136 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
137 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
138 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
139 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
140 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
141 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
142 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
143 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
144 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
145 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
146 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
147 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
148 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
149 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
150 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
151 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
152 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
153 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
154 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
155 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
156 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
157 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
158 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
159 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
160 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
161 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
162 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
163 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
164 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
165 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
166 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
167 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
168 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
169 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
170 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
171 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
172 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
173 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
174 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
175 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
176 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
177 };
178
179 static const struct pci_device_id bnxt_pci_tbl[] = {
180 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
181 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
182 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
183 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
184 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
185 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
186 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
187 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
188 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
189 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
190 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
191 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
192 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
193 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
194 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
195 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
196 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
197 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
198 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
199 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
200 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
201 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
202 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
203 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
204 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
205 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
206 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
207 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
208 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
209 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
210 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
211 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
212 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
213 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
214 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
215 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
216 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
217 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
218 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
219 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
220 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
221 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
222 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
223 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
224 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
225 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
226 #ifdef CONFIG_BNXT_SRIOV
227 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
228 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
229 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
230 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
231 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
232 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
233 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
234 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
235 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
236 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
237 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
238 #endif
239 { 0 }
240 };
241
242 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
243
244 static const u16 bnxt_vf_req_snif[] = {
245 HWRM_FUNC_CFG,
246 HWRM_FUNC_VF_CFG,
247 HWRM_PORT_PHY_QCFG,
248 HWRM_CFA_L2_FILTER_ALLOC,
249 };
250
251 static const u16 bnxt_async_events_arr[] = {
252 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
253 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
254 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
255 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
256 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
257 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
258 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
259 };
260
261 static struct workqueue_struct *bnxt_pf_wq;
262
263 static bool bnxt_vf_pciid(enum board_idx idx)
264 {
265 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
266 idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
267 }
268
269 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
270 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
271 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
272
273 #define BNXT_CP_DB_IRQ_DIS(db) \
274 writel(DB_CP_IRQ_DIS_FLAGS, db)
275
276 #define BNXT_DB_CQ(db, idx) \
277 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
278
279 #define BNXT_DB_NQ_P5(db, idx) \
280 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
281
282 #define BNXT_DB_CQ_ARM(db, idx) \
283 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
284
285 #define BNXT_DB_NQ_ARM_P5(db, idx) \
286 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
287
288 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
289 {
290 if (bp->flags & BNXT_FLAG_CHIP_P5)
291 BNXT_DB_NQ_P5(db, idx);
292 else
293 BNXT_DB_CQ(db, idx);
294 }
295
296 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
297 {
298 if (bp->flags & BNXT_FLAG_CHIP_P5)
299 BNXT_DB_NQ_ARM_P5(db, idx);
300 else
301 BNXT_DB_CQ_ARM(db, idx);
302 }
303
304 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
305 {
306 if (bp->flags & BNXT_FLAG_CHIP_P5)
307 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
308 db->doorbell);
309 else
310 BNXT_DB_CQ(db, idx);
311 }
312
313 const u16 bnxt_lhint_arr[] = {
314 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
315 TX_BD_FLAGS_LHINT_512_TO_1023,
316 TX_BD_FLAGS_LHINT_1024_TO_2047,
317 TX_BD_FLAGS_LHINT_1024_TO_2047,
318 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
319 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
320 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
321 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
322 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
323 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
324 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
325 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
326 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
327 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
328 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
329 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
330 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
331 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
332 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
333 };
334
335 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
336 {
337 struct metadata_dst *md_dst = skb_metadata_dst(skb);
338
339 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
340 return 0;
341
342 return md_dst->u.port_info.port_id;
343 }
344
345 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
346 {
347 struct bnxt *bp = netdev_priv(dev);
348 struct tx_bd *txbd;
349 struct tx_bd_ext *txbd1;
350 struct netdev_queue *txq;
351 int i;
352 dma_addr_t mapping;
353 unsigned int length, pad = 0;
354 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
355 u16 prod, last_frag;
356 struct pci_dev *pdev = bp->pdev;
357 struct bnxt_tx_ring_info *txr;
358 struct bnxt_sw_tx_bd *tx_buf;
359
360 i = skb_get_queue_mapping(skb);
361 if (unlikely(i >= bp->tx_nr_rings)) {
362 dev_kfree_skb_any(skb);
363 return NETDEV_TX_OK;
364 }
365
366 txq = netdev_get_tx_queue(dev, i);
367 txr = &bp->tx_ring[bp->tx_ring_map[i]];
368 prod = txr->tx_prod;
369
370 free_size = bnxt_tx_avail(bp, txr);
371 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
372 netif_tx_stop_queue(txq);
373 return NETDEV_TX_BUSY;
374 }
375
376 length = skb->len;
377 len = skb_headlen(skb);
378 last_frag = skb_shinfo(skb)->nr_frags;
379
380 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
381
382 txbd->tx_bd_opaque = prod;
383
384 tx_buf = &txr->tx_buf_ring[prod];
385 tx_buf->skb = skb;
386 tx_buf->nr_frags = last_frag;
387
388 vlan_tag_flags = 0;
389 cfa_action = bnxt_xmit_get_cfa_action(skb);
390 if (skb_vlan_tag_present(skb)) {
391 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
392 skb_vlan_tag_get(skb);
393 /* Currently supports 8021Q, 8021AD vlan offloads
394 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
395 */
396 if (skb->vlan_proto == htons(ETH_P_8021Q))
397 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
398 }
399
400 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
401 struct tx_push_buffer *tx_push_buf = txr->tx_push;
402 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
403 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
404 void __iomem *db = txr->tx_db.doorbell;
405 void *pdata = tx_push_buf->data;
406 u64 *end;
407 int j, push_len;
408
409 /* Set COAL_NOW to be ready quickly for the next push */
410 tx_push->tx_bd_len_flags_type =
411 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
412 TX_BD_TYPE_LONG_TX_BD |
413 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
414 TX_BD_FLAGS_COAL_NOW |
415 TX_BD_FLAGS_PACKET_END |
416 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
417
418 if (skb->ip_summed == CHECKSUM_PARTIAL)
419 tx_push1->tx_bd_hsize_lflags =
420 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
421 else
422 tx_push1->tx_bd_hsize_lflags = 0;
423
424 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
425 tx_push1->tx_bd_cfa_action =
426 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
427
428 end = pdata + length;
429 end = PTR_ALIGN(end, 8) - 1;
430 *end = 0;
431
432 skb_copy_from_linear_data(skb, pdata, len);
433 pdata += len;
434 for (j = 0; j < last_frag; j++) {
435 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
436 void *fptr;
437
438 fptr = skb_frag_address_safe(frag);
439 if (!fptr)
440 goto normal_tx;
441
442 memcpy(pdata, fptr, skb_frag_size(frag));
443 pdata += skb_frag_size(frag);
444 }
445
446 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
447 txbd->tx_bd_haddr = txr->data_mapping;
448 prod = NEXT_TX(prod);
449 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
450 memcpy(txbd, tx_push1, sizeof(*txbd));
451 prod = NEXT_TX(prod);
452 tx_push->doorbell =
453 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
454 txr->tx_prod = prod;
455
456 tx_buf->is_push = 1;
457 netdev_tx_sent_queue(txq, skb->len);
458 wmb(); /* Sync is_push and byte queue before pushing data */
459
460 push_len = (length + sizeof(*tx_push) + 7) / 8;
461 if (push_len > 16) {
462 __iowrite64_copy(db, tx_push_buf, 16);
463 __iowrite32_copy(db + 4, tx_push_buf + 1,
464 (push_len - 16) << 1);
465 } else {
466 __iowrite64_copy(db, tx_push_buf, push_len);
467 }
468
469 goto tx_done;
470 }
471
472 normal_tx:
473 if (length < BNXT_MIN_PKT_SIZE) {
474 pad = BNXT_MIN_PKT_SIZE - length;
475 if (skb_pad(skb, pad)) {
476 /* SKB already freed. */
477 tx_buf->skb = NULL;
478 return NETDEV_TX_OK;
479 }
480 length = BNXT_MIN_PKT_SIZE;
481 }
482
483 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
484
485 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
486 dev_kfree_skb_any(skb);
487 tx_buf->skb = NULL;
488 return NETDEV_TX_OK;
489 }
490
491 dma_unmap_addr_set(tx_buf, mapping, mapping);
492 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
493 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
494
495 txbd->tx_bd_haddr = cpu_to_le64(mapping);
496
497 prod = NEXT_TX(prod);
498 txbd1 = (struct tx_bd_ext *)
499 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
500
501 txbd1->tx_bd_hsize_lflags = 0;
502 if (skb_is_gso(skb)) {
503 u32 hdr_len;
504
505 if (skb->encapsulation)
506 hdr_len = skb_inner_network_offset(skb) +
507 skb_inner_network_header_len(skb) +
508 inner_tcp_hdrlen(skb);
509 else
510 hdr_len = skb_transport_offset(skb) +
511 tcp_hdrlen(skb);
512
513 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
514 TX_BD_FLAGS_T_IPID |
515 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
516 length = skb_shinfo(skb)->gso_size;
517 txbd1->tx_bd_mss = cpu_to_le32(length);
518 length += hdr_len;
519 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
520 txbd1->tx_bd_hsize_lflags =
521 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
522 txbd1->tx_bd_mss = 0;
523 }
524
525 length >>= 9;
526 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
527 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
528 skb->len);
529 i = 0;
530 goto tx_dma_error;
531 }
532 flags |= bnxt_lhint_arr[length];
533 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
534
535 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
536 txbd1->tx_bd_cfa_action =
537 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
538 for (i = 0; i < last_frag; i++) {
539 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
540
541 prod = NEXT_TX(prod);
542 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
543
544 len = skb_frag_size(frag);
545 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
546 DMA_TO_DEVICE);
547
548 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
549 goto tx_dma_error;
550
551 tx_buf = &txr->tx_buf_ring[prod];
552 dma_unmap_addr_set(tx_buf, mapping, mapping);
553
554 txbd->tx_bd_haddr = cpu_to_le64(mapping);
555
556 flags = len << TX_BD_LEN_SHIFT;
557 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
558 }
559
560 flags &= ~TX_BD_LEN;
561 txbd->tx_bd_len_flags_type =
562 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
563 TX_BD_FLAGS_PACKET_END);
564
565 netdev_tx_sent_queue(txq, skb->len);
566
567 /* Sync BD data before updating doorbell */
568 wmb();
569
570 prod = NEXT_TX(prod);
571 txr->tx_prod = prod;
572
573 if (!netdev_xmit_more() || netif_xmit_stopped(txq))
574 bnxt_db_write(bp, &txr->tx_db, prod);
575
576 tx_done:
577
578 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
579 if (netdev_xmit_more() && !tx_buf->is_push)
580 bnxt_db_write(bp, &txr->tx_db, prod);
581
582 netif_tx_stop_queue(txq);
583
584 /* netif_tx_stop_queue() must be done before checking
585 * tx index in bnxt_tx_avail() below, because in
586 * bnxt_tx_int(), we update tx index before checking for
587 * netif_tx_queue_stopped().
588 */
589 smp_mb();
590 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
591 netif_tx_wake_queue(txq);
592 }
593 return NETDEV_TX_OK;
594
595 tx_dma_error:
596 last_frag = i;
597
598 /* start back at beginning and unmap skb */
599 prod = txr->tx_prod;
600 tx_buf = &txr->tx_buf_ring[prod];
601 tx_buf->skb = NULL;
602 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
603 skb_headlen(skb), PCI_DMA_TODEVICE);
604 prod = NEXT_TX(prod);
605
606 /* unmap remaining mapped pages */
607 for (i = 0; i < last_frag; i++) {
608 prod = NEXT_TX(prod);
609 tx_buf = &txr->tx_buf_ring[prod];
610 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
611 skb_frag_size(&skb_shinfo(skb)->frags[i]),
612 PCI_DMA_TODEVICE);
613 }
614
615 dev_kfree_skb_any(skb);
616 return NETDEV_TX_OK;
617 }
618
619 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
620 {
621 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
622 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
623 u16 cons = txr->tx_cons;
624 struct pci_dev *pdev = bp->pdev;
625 int i;
626 unsigned int tx_bytes = 0;
627
628 for (i = 0; i < nr_pkts; i++) {
629 struct bnxt_sw_tx_bd *tx_buf;
630 struct sk_buff *skb;
631 int j, last;
632
633 tx_buf = &txr->tx_buf_ring[cons];
634 cons = NEXT_TX(cons);
635 skb = tx_buf->skb;
636 tx_buf->skb = NULL;
637
638 if (tx_buf->is_push) {
639 tx_buf->is_push = 0;
640 goto next_tx_int;
641 }
642
643 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
644 skb_headlen(skb), PCI_DMA_TODEVICE);
645 last = tx_buf->nr_frags;
646
647 for (j = 0; j < last; j++) {
648 cons = NEXT_TX(cons);
649 tx_buf = &txr->tx_buf_ring[cons];
650 dma_unmap_page(
651 &pdev->dev,
652 dma_unmap_addr(tx_buf, mapping),
653 skb_frag_size(&skb_shinfo(skb)->frags[j]),
654 PCI_DMA_TODEVICE);
655 }
656
657 next_tx_int:
658 cons = NEXT_TX(cons);
659
660 tx_bytes += skb->len;
661 dev_kfree_skb_any(skb);
662 }
663
664 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
665 txr->tx_cons = cons;
666
667 /* Need to make the tx_cons update visible to bnxt_start_xmit()
668 * before checking for netif_tx_queue_stopped(). Without the
669 * memory barrier, there is a small possibility that bnxt_start_xmit()
670 * will miss it and cause the queue to be stopped forever.
671 */
672 smp_mb();
673
674 if (unlikely(netif_tx_queue_stopped(txq)) &&
675 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
676 __netif_tx_lock(txq, smp_processor_id());
677 if (netif_tx_queue_stopped(txq) &&
678 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
679 txr->dev_state != BNXT_DEV_STATE_CLOSING)
680 netif_tx_wake_queue(txq);
681 __netif_tx_unlock(txq);
682 }
683 }
684
685 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
686 struct bnxt_rx_ring_info *rxr,
687 gfp_t gfp)
688 {
689 struct device *dev = &bp->pdev->dev;
690 struct page *page;
691
692 page = page_pool_dev_alloc_pages(rxr->page_pool);
693 if (!page)
694 return NULL;
695
696 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
697 DMA_ATTR_WEAK_ORDERING);
698 if (dma_mapping_error(dev, *mapping)) {
699 page_pool_recycle_direct(rxr->page_pool, page);
700 return NULL;
701 }
702 *mapping += bp->rx_dma_offset;
703 return page;
704 }
705
706 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
707 gfp_t gfp)
708 {
709 u8 *data;
710 struct pci_dev *pdev = bp->pdev;
711
712 data = kmalloc(bp->rx_buf_size, gfp);
713 if (!data)
714 return NULL;
715
716 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
717 bp->rx_buf_use_size, bp->rx_dir,
718 DMA_ATTR_WEAK_ORDERING);
719
720 if (dma_mapping_error(&pdev->dev, *mapping)) {
721 kfree(data);
722 data = NULL;
723 }
724 return data;
725 }
726
727 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
728 u16 prod, gfp_t gfp)
729 {
730 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
731 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
732 dma_addr_t mapping;
733
734 if (BNXT_RX_PAGE_MODE(bp)) {
735 struct page *page =
736 __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
737
738 if (!page)
739 return -ENOMEM;
740
741 rx_buf->data = page;
742 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
743 } else {
744 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
745
746 if (!data)
747 return -ENOMEM;
748
749 rx_buf->data = data;
750 rx_buf->data_ptr = data + bp->rx_offset;
751 }
752 rx_buf->mapping = mapping;
753
754 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
755 return 0;
756 }
757
758 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
759 {
760 u16 prod = rxr->rx_prod;
761 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
762 struct rx_bd *cons_bd, *prod_bd;
763
764 prod_rx_buf = &rxr->rx_buf_ring[prod];
765 cons_rx_buf = &rxr->rx_buf_ring[cons];
766
767 prod_rx_buf->data = data;
768 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
769
770 prod_rx_buf->mapping = cons_rx_buf->mapping;
771
772 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
773 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
774
775 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
776 }
777
778 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
779 {
780 u16 next, max = rxr->rx_agg_bmap_size;
781
782 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
783 if (next >= max)
784 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
785 return next;
786 }
787
788 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
789 struct bnxt_rx_ring_info *rxr,
790 u16 prod, gfp_t gfp)
791 {
792 struct rx_bd *rxbd =
793 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
794 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
795 struct pci_dev *pdev = bp->pdev;
796 struct page *page;
797 dma_addr_t mapping;
798 u16 sw_prod = rxr->rx_sw_agg_prod;
799 unsigned int offset = 0;
800
801 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
802 page = rxr->rx_page;
803 if (!page) {
804 page = alloc_page(gfp);
805 if (!page)
806 return -ENOMEM;
807 rxr->rx_page = page;
808 rxr->rx_page_offset = 0;
809 }
810 offset = rxr->rx_page_offset;
811 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
812 if (rxr->rx_page_offset == PAGE_SIZE)
813 rxr->rx_page = NULL;
814 else
815 get_page(page);
816 } else {
817 page = alloc_page(gfp);
818 if (!page)
819 return -ENOMEM;
820 }
821
822 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
823 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
824 DMA_ATTR_WEAK_ORDERING);
825 if (dma_mapping_error(&pdev->dev, mapping)) {
826 __free_page(page);
827 return -EIO;
828 }
829
830 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
831 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
832
833 __set_bit(sw_prod, rxr->rx_agg_bmap);
834 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
835 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
836
837 rx_agg_buf->page = page;
838 rx_agg_buf->offset = offset;
839 rx_agg_buf->mapping = mapping;
840 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
841 rxbd->rx_bd_opaque = sw_prod;
842 return 0;
843 }
844
845 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
846 struct bnxt_cp_ring_info *cpr,
847 u16 cp_cons, u16 curr)
848 {
849 struct rx_agg_cmp *agg;
850
851 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
852 agg = (struct rx_agg_cmp *)
853 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
854 return agg;
855 }
856
857 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
858 struct bnxt_rx_ring_info *rxr,
859 u16 agg_id, u16 curr)
860 {
861 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
862
863 return &tpa_info->agg_arr[curr];
864 }
865
866 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
867 u16 start, u32 agg_bufs, bool tpa)
868 {
869 struct bnxt_napi *bnapi = cpr->bnapi;
870 struct bnxt *bp = bnapi->bp;
871 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
872 u16 prod = rxr->rx_agg_prod;
873 u16 sw_prod = rxr->rx_sw_agg_prod;
874 bool p5_tpa = false;
875 u32 i;
876
877 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
878 p5_tpa = true;
879
880 for (i = 0; i < agg_bufs; i++) {
881 u16 cons;
882 struct rx_agg_cmp *agg;
883 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
884 struct rx_bd *prod_bd;
885 struct page *page;
886
887 if (p5_tpa)
888 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
889 else
890 agg = bnxt_get_agg(bp, cpr, idx, start + i);
891 cons = agg->rx_agg_cmp_opaque;
892 __clear_bit(cons, rxr->rx_agg_bmap);
893
894 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
895 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
896
897 __set_bit(sw_prod, rxr->rx_agg_bmap);
898 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
899 cons_rx_buf = &rxr->rx_agg_ring[cons];
900
901 /* It is possible for sw_prod to be equal to cons, so
902 * set cons_rx_buf->page to NULL first.
903 */
904 page = cons_rx_buf->page;
905 cons_rx_buf->page = NULL;
906 prod_rx_buf->page = page;
907 prod_rx_buf->offset = cons_rx_buf->offset;
908
909 prod_rx_buf->mapping = cons_rx_buf->mapping;
910
911 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
912
913 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
914 prod_bd->rx_bd_opaque = sw_prod;
915
916 prod = NEXT_RX_AGG(prod);
917 sw_prod = NEXT_RX_AGG(sw_prod);
918 }
919 rxr->rx_agg_prod = prod;
920 rxr->rx_sw_agg_prod = sw_prod;
921 }
922
923 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
924 struct bnxt_rx_ring_info *rxr,
925 u16 cons, void *data, u8 *data_ptr,
926 dma_addr_t dma_addr,
927 unsigned int offset_and_len)
928 {
929 unsigned int payload = offset_and_len >> 16;
930 unsigned int len = offset_and_len & 0xffff;
931 skb_frag_t *frag;
932 struct page *page = data;
933 u16 prod = rxr->rx_prod;
934 struct sk_buff *skb;
935 int off, err;
936
937 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
938 if (unlikely(err)) {
939 bnxt_reuse_rx_data(rxr, cons, data);
940 return NULL;
941 }
942 dma_addr -= bp->rx_dma_offset;
943 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
944 DMA_ATTR_WEAK_ORDERING);
945
946 if (unlikely(!payload))
947 payload = eth_get_headlen(bp->dev, data_ptr, len);
948
949 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
950 if (!skb) {
951 __free_page(page);
952 return NULL;
953 }
954
955 off = (void *)data_ptr - page_address(page);
956 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
957 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
958 payload + NET_IP_ALIGN);
959
960 frag = &skb_shinfo(skb)->frags[0];
961 skb_frag_size_sub(frag, payload);
962 skb_frag_off_add(frag, payload);
963 skb->data_len -= payload;
964 skb->tail += payload;
965
966 return skb;
967 }
968
969 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
970 struct bnxt_rx_ring_info *rxr, u16 cons,
971 void *data, u8 *data_ptr,
972 dma_addr_t dma_addr,
973 unsigned int offset_and_len)
974 {
975 u16 prod = rxr->rx_prod;
976 struct sk_buff *skb;
977 int err;
978
979 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
980 if (unlikely(err)) {
981 bnxt_reuse_rx_data(rxr, cons, data);
982 return NULL;
983 }
984
985 skb = build_skb(data, 0);
986 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
987 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
988 if (!skb) {
989 kfree(data);
990 return NULL;
991 }
992
993 skb_reserve(skb, bp->rx_offset);
994 skb_put(skb, offset_and_len & 0xffff);
995 return skb;
996 }
997
998 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
999 struct bnxt_cp_ring_info *cpr,
1000 struct sk_buff *skb, u16 idx,
1001 u32 agg_bufs, bool tpa)
1002 {
1003 struct bnxt_napi *bnapi = cpr->bnapi;
1004 struct pci_dev *pdev = bp->pdev;
1005 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1006 u16 prod = rxr->rx_agg_prod;
1007 bool p5_tpa = false;
1008 u32 i;
1009
1010 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1011 p5_tpa = true;
1012
1013 for (i = 0; i < agg_bufs; i++) {
1014 u16 cons, frag_len;
1015 struct rx_agg_cmp *agg;
1016 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1017 struct page *page;
1018 dma_addr_t mapping;
1019
1020 if (p5_tpa)
1021 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1022 else
1023 agg = bnxt_get_agg(bp, cpr, idx, i);
1024 cons = agg->rx_agg_cmp_opaque;
1025 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1026 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1027
1028 cons_rx_buf = &rxr->rx_agg_ring[cons];
1029 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1030 cons_rx_buf->offset, frag_len);
1031 __clear_bit(cons, rxr->rx_agg_bmap);
1032
1033 /* It is possible for bnxt_alloc_rx_page() to allocate
1034 * a sw_prod index that equals the cons index, so we
1035 * need to clear the cons entry now.
1036 */
1037 mapping = cons_rx_buf->mapping;
1038 page = cons_rx_buf->page;
1039 cons_rx_buf->page = NULL;
1040
1041 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1042 struct skb_shared_info *shinfo;
1043 unsigned int nr_frags;
1044
1045 shinfo = skb_shinfo(skb);
1046 nr_frags = --shinfo->nr_frags;
1047 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1048
1049 dev_kfree_skb(skb);
1050
1051 cons_rx_buf->page = page;
1052
1053 /* Update prod since possibly some pages have been
1054 * allocated already.
1055 */
1056 rxr->rx_agg_prod = prod;
1057 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1058 return NULL;
1059 }
1060
1061 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1062 PCI_DMA_FROMDEVICE,
1063 DMA_ATTR_WEAK_ORDERING);
1064
1065 skb->data_len += frag_len;
1066 skb->len += frag_len;
1067 skb->truesize += PAGE_SIZE;
1068
1069 prod = NEXT_RX_AGG(prod);
1070 }
1071 rxr->rx_agg_prod = prod;
1072 return skb;
1073 }
1074
1075 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1076 u8 agg_bufs, u32 *raw_cons)
1077 {
1078 u16 last;
1079 struct rx_agg_cmp *agg;
1080
1081 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1082 last = RING_CMP(*raw_cons);
1083 agg = (struct rx_agg_cmp *)
1084 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1085 return RX_AGG_CMP_VALID(agg, *raw_cons);
1086 }
1087
1088 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1089 unsigned int len,
1090 dma_addr_t mapping)
1091 {
1092 struct bnxt *bp = bnapi->bp;
1093 struct pci_dev *pdev = bp->pdev;
1094 struct sk_buff *skb;
1095
1096 skb = napi_alloc_skb(&bnapi->napi, len);
1097 if (!skb)
1098 return NULL;
1099
1100 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1101 bp->rx_dir);
1102
1103 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1104 len + NET_IP_ALIGN);
1105
1106 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1107 bp->rx_dir);
1108
1109 skb_put(skb, len);
1110 return skb;
1111 }
1112
1113 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1114 u32 *raw_cons, void *cmp)
1115 {
1116 struct rx_cmp *rxcmp = cmp;
1117 u32 tmp_raw_cons = *raw_cons;
1118 u8 cmp_type, agg_bufs = 0;
1119
1120 cmp_type = RX_CMP_TYPE(rxcmp);
1121
1122 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1123 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1124 RX_CMP_AGG_BUFS) >>
1125 RX_CMP_AGG_BUFS_SHIFT;
1126 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1127 struct rx_tpa_end_cmp *tpa_end = cmp;
1128
1129 if (bp->flags & BNXT_FLAG_CHIP_P5)
1130 return 0;
1131
1132 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1133 }
1134
1135 if (agg_bufs) {
1136 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1137 return -EBUSY;
1138 }
1139 *raw_cons = tmp_raw_cons;
1140 return 0;
1141 }
1142
1143 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1144 {
1145 if (BNXT_PF(bp))
1146 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1147 else
1148 schedule_delayed_work(&bp->fw_reset_task, delay);
1149 }
1150
1151 static void bnxt_queue_sp_work(struct bnxt *bp)
1152 {
1153 if (BNXT_PF(bp))
1154 queue_work(bnxt_pf_wq, &bp->sp_task);
1155 else
1156 schedule_work(&bp->sp_task);
1157 }
1158
1159 static void bnxt_cancel_sp_work(struct bnxt *bp)
1160 {
1161 if (BNXT_PF(bp))
1162 flush_workqueue(bnxt_pf_wq);
1163 else
1164 cancel_work_sync(&bp->sp_task);
1165 }
1166
1167 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1168 {
1169 if (!rxr->bnapi->in_reset) {
1170 rxr->bnapi->in_reset = true;
1171 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1172 bnxt_queue_sp_work(bp);
1173 }
1174 rxr->rx_next_cons = 0xffff;
1175 }
1176
1177 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1178 {
1179 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1180 u16 idx = agg_id & MAX_TPA_P5_MASK;
1181
1182 if (test_bit(idx, map->agg_idx_bmap))
1183 idx = find_first_zero_bit(map->agg_idx_bmap,
1184 BNXT_AGG_IDX_BMAP_SIZE);
1185 __set_bit(idx, map->agg_idx_bmap);
1186 map->agg_id_tbl[agg_id] = idx;
1187 return idx;
1188 }
1189
1190 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1191 {
1192 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1193
1194 __clear_bit(idx, map->agg_idx_bmap);
1195 }
1196
1197 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1198 {
1199 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1200
1201 return map->agg_id_tbl[agg_id];
1202 }
1203
1204 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1205 struct rx_tpa_start_cmp *tpa_start,
1206 struct rx_tpa_start_cmp_ext *tpa_start1)
1207 {
1208 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1209 struct bnxt_tpa_info *tpa_info;
1210 u16 cons, prod, agg_id;
1211 struct rx_bd *prod_bd;
1212 dma_addr_t mapping;
1213
1214 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1215 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1216 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1217 } else {
1218 agg_id = TPA_START_AGG_ID(tpa_start);
1219 }
1220 cons = tpa_start->rx_tpa_start_cmp_opaque;
1221 prod = rxr->rx_prod;
1222 cons_rx_buf = &rxr->rx_buf_ring[cons];
1223 prod_rx_buf = &rxr->rx_buf_ring[prod];
1224 tpa_info = &rxr->rx_tpa[agg_id];
1225
1226 if (unlikely(cons != rxr->rx_next_cons ||
1227 TPA_START_ERROR(tpa_start))) {
1228 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1229 cons, rxr->rx_next_cons,
1230 TPA_START_ERROR_CODE(tpa_start1));
1231 bnxt_sched_reset(bp, rxr);
1232 return;
1233 }
1234 /* Store cfa_code in tpa_info to use in tpa_end
1235 * completion processing.
1236 */
1237 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1238 prod_rx_buf->data = tpa_info->data;
1239 prod_rx_buf->data_ptr = tpa_info->data_ptr;
1240
1241 mapping = tpa_info->mapping;
1242 prod_rx_buf->mapping = mapping;
1243
1244 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1245
1246 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1247
1248 tpa_info->data = cons_rx_buf->data;
1249 tpa_info->data_ptr = cons_rx_buf->data_ptr;
1250 cons_rx_buf->data = NULL;
1251 tpa_info->mapping = cons_rx_buf->mapping;
1252
1253 tpa_info->len =
1254 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1255 RX_TPA_START_CMP_LEN_SHIFT;
1256 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1257 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1258
1259 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1260 tpa_info->gso_type = SKB_GSO_TCPV4;
1261 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1262 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1263 tpa_info->gso_type = SKB_GSO_TCPV6;
1264 tpa_info->rss_hash =
1265 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1266 } else {
1267 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1268 tpa_info->gso_type = 0;
1269 if (netif_msg_rx_err(bp))
1270 netdev_warn(bp->dev, "TPA packet without valid hash\n");
1271 }
1272 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1273 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1274 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1275 tpa_info->agg_count = 0;
1276
1277 rxr->rx_prod = NEXT_RX(prod);
1278 cons = NEXT_RX(cons);
1279 rxr->rx_next_cons = NEXT_RX(cons);
1280 cons_rx_buf = &rxr->rx_buf_ring[cons];
1281
1282 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1283 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1284 cons_rx_buf->data = NULL;
1285 }
1286
1287 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1288 {
1289 if (agg_bufs)
1290 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1291 }
1292
1293 #ifdef CONFIG_INET
1294 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1295 {
1296 struct udphdr *uh = NULL;
1297
1298 if (ip_proto == htons(ETH_P_IP)) {
1299 struct iphdr *iph = (struct iphdr *)skb->data;
1300
1301 if (iph->protocol == IPPROTO_UDP)
1302 uh = (struct udphdr *)(iph + 1);
1303 } else {
1304 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1305
1306 if (iph->nexthdr == IPPROTO_UDP)
1307 uh = (struct udphdr *)(iph + 1);
1308 }
1309 if (uh) {
1310 if (uh->check)
1311 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1312 else
1313 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1314 }
1315 }
1316 #endif
1317
1318 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1319 int payload_off, int tcp_ts,
1320 struct sk_buff *skb)
1321 {
1322 #ifdef CONFIG_INET
1323 struct tcphdr *th;
1324 int len, nw_off;
1325 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1326 u32 hdr_info = tpa_info->hdr_info;
1327 bool loopback = false;
1328
1329 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1330 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1331 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1332
1333 /* If the packet is an internal loopback packet, the offsets will
1334 * have an extra 4 bytes.
1335 */
1336 if (inner_mac_off == 4) {
1337 loopback = true;
1338 } else if (inner_mac_off > 4) {
1339 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1340 ETH_HLEN - 2));
1341
1342 /* We only support inner iPv4/ipv6. If we don't see the
1343 * correct protocol ID, it must be a loopback packet where
1344 * the offsets are off by 4.
1345 */
1346 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1347 loopback = true;
1348 }
1349 if (loopback) {
1350 /* internal loopback packet, subtract all offsets by 4 */
1351 inner_ip_off -= 4;
1352 inner_mac_off -= 4;
1353 outer_ip_off -= 4;
1354 }
1355
1356 nw_off = inner_ip_off - ETH_HLEN;
1357 skb_set_network_header(skb, nw_off);
1358 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1359 struct ipv6hdr *iph = ipv6_hdr(skb);
1360
1361 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1362 len = skb->len - skb_transport_offset(skb);
1363 th = tcp_hdr(skb);
1364 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1365 } else {
1366 struct iphdr *iph = ip_hdr(skb);
1367
1368 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1369 len = skb->len - skb_transport_offset(skb);
1370 th = tcp_hdr(skb);
1371 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1372 }
1373
1374 if (inner_mac_off) { /* tunnel */
1375 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1376 ETH_HLEN - 2));
1377
1378 bnxt_gro_tunnel(skb, proto);
1379 }
1380 #endif
1381 return skb;
1382 }
1383
1384 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1385 int payload_off, int tcp_ts,
1386 struct sk_buff *skb)
1387 {
1388 #ifdef CONFIG_INET
1389 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1390 u32 hdr_info = tpa_info->hdr_info;
1391 int iphdr_len, nw_off;
1392
1393 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1394 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1395 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1396
1397 nw_off = inner_ip_off - ETH_HLEN;
1398 skb_set_network_header(skb, nw_off);
1399 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1400 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1401 skb_set_transport_header(skb, nw_off + iphdr_len);
1402
1403 if (inner_mac_off) { /* tunnel */
1404 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1405 ETH_HLEN - 2));
1406
1407 bnxt_gro_tunnel(skb, proto);
1408 }
1409 #endif
1410 return skb;
1411 }
1412
1413 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1414 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1415
1416 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1417 int payload_off, int tcp_ts,
1418 struct sk_buff *skb)
1419 {
1420 #ifdef CONFIG_INET
1421 struct tcphdr *th;
1422 int len, nw_off, tcp_opt_len = 0;
1423
1424 if (tcp_ts)
1425 tcp_opt_len = 12;
1426
1427 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1428 struct iphdr *iph;
1429
1430 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1431 ETH_HLEN;
1432 skb_set_network_header(skb, nw_off);
1433 iph = ip_hdr(skb);
1434 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1435 len = skb->len - skb_transport_offset(skb);
1436 th = tcp_hdr(skb);
1437 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1438 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1439 struct ipv6hdr *iph;
1440
1441 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1442 ETH_HLEN;
1443 skb_set_network_header(skb, nw_off);
1444 iph = ipv6_hdr(skb);
1445 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1446 len = skb->len - skb_transport_offset(skb);
1447 th = tcp_hdr(skb);
1448 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1449 } else {
1450 dev_kfree_skb_any(skb);
1451 return NULL;
1452 }
1453
1454 if (nw_off) /* tunnel */
1455 bnxt_gro_tunnel(skb, skb->protocol);
1456 #endif
1457 return skb;
1458 }
1459
1460 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1461 struct bnxt_tpa_info *tpa_info,
1462 struct rx_tpa_end_cmp *tpa_end,
1463 struct rx_tpa_end_cmp_ext *tpa_end1,
1464 struct sk_buff *skb)
1465 {
1466 #ifdef CONFIG_INET
1467 int payload_off;
1468 u16 segs;
1469
1470 segs = TPA_END_TPA_SEGS(tpa_end);
1471 if (segs == 1)
1472 return skb;
1473
1474 NAPI_GRO_CB(skb)->count = segs;
1475 skb_shinfo(skb)->gso_size =
1476 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1477 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1478 if (bp->flags & BNXT_FLAG_CHIP_P5)
1479 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1480 else
1481 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1482 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1483 if (likely(skb))
1484 tcp_gro_complete(skb);
1485 #endif
1486 return skb;
1487 }
1488
1489 /* Given the cfa_code of a received packet determine which
1490 * netdev (vf-rep or PF) the packet is destined to.
1491 */
1492 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1493 {
1494 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1495
1496 /* if vf-rep dev is NULL, the must belongs to the PF */
1497 return dev ? dev : bp->dev;
1498 }
1499
1500 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1501 struct bnxt_cp_ring_info *cpr,
1502 u32 *raw_cons,
1503 struct rx_tpa_end_cmp *tpa_end,
1504 struct rx_tpa_end_cmp_ext *tpa_end1,
1505 u8 *event)
1506 {
1507 struct bnxt_napi *bnapi = cpr->bnapi;
1508 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1509 u8 *data_ptr, agg_bufs;
1510 unsigned int len;
1511 struct bnxt_tpa_info *tpa_info;
1512 dma_addr_t mapping;
1513 struct sk_buff *skb;
1514 u16 idx = 0, agg_id;
1515 void *data;
1516 bool gro;
1517
1518 if (unlikely(bnapi->in_reset)) {
1519 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1520
1521 if (rc < 0)
1522 return ERR_PTR(-EBUSY);
1523 return NULL;
1524 }
1525
1526 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1527 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1528 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1529 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1530 tpa_info = &rxr->rx_tpa[agg_id];
1531 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1532 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1533 agg_bufs, tpa_info->agg_count);
1534 agg_bufs = tpa_info->agg_count;
1535 }
1536 tpa_info->agg_count = 0;
1537 *event |= BNXT_AGG_EVENT;
1538 bnxt_free_agg_idx(rxr, agg_id);
1539 idx = agg_id;
1540 gro = !!(bp->flags & BNXT_FLAG_GRO);
1541 } else {
1542 agg_id = TPA_END_AGG_ID(tpa_end);
1543 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1544 tpa_info = &rxr->rx_tpa[agg_id];
1545 idx = RING_CMP(*raw_cons);
1546 if (agg_bufs) {
1547 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1548 return ERR_PTR(-EBUSY);
1549
1550 *event |= BNXT_AGG_EVENT;
1551 idx = NEXT_CMP(idx);
1552 }
1553 gro = !!TPA_END_GRO(tpa_end);
1554 }
1555 data = tpa_info->data;
1556 data_ptr = tpa_info->data_ptr;
1557 prefetch(data_ptr);
1558 len = tpa_info->len;
1559 mapping = tpa_info->mapping;
1560
1561 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1562 bnxt_abort_tpa(cpr, idx, agg_bufs);
1563 if (agg_bufs > MAX_SKB_FRAGS)
1564 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1565 agg_bufs, (int)MAX_SKB_FRAGS);
1566 return NULL;
1567 }
1568
1569 if (len <= bp->rx_copy_thresh) {
1570 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1571 if (!skb) {
1572 bnxt_abort_tpa(cpr, idx, agg_bufs);
1573 return NULL;
1574 }
1575 } else {
1576 u8 *new_data;
1577 dma_addr_t new_mapping;
1578
1579 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1580 if (!new_data) {
1581 bnxt_abort_tpa(cpr, idx, agg_bufs);
1582 return NULL;
1583 }
1584
1585 tpa_info->data = new_data;
1586 tpa_info->data_ptr = new_data + bp->rx_offset;
1587 tpa_info->mapping = new_mapping;
1588
1589 skb = build_skb(data, 0);
1590 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1591 bp->rx_buf_use_size, bp->rx_dir,
1592 DMA_ATTR_WEAK_ORDERING);
1593
1594 if (!skb) {
1595 kfree(data);
1596 bnxt_abort_tpa(cpr, idx, agg_bufs);
1597 return NULL;
1598 }
1599 skb_reserve(skb, bp->rx_offset);
1600 skb_put(skb, len);
1601 }
1602
1603 if (agg_bufs) {
1604 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
1605 if (!skb) {
1606 /* Page reuse already handled by bnxt_rx_pages(). */
1607 return NULL;
1608 }
1609 }
1610
1611 skb->protocol =
1612 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1613
1614 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1615 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1616
1617 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1618 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1619 u16 vlan_proto = tpa_info->metadata >>
1620 RX_CMP_FLAGS2_METADATA_TPID_SFT;
1621 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1622
1623 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1624 }
1625
1626 skb_checksum_none_assert(skb);
1627 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1628 skb->ip_summed = CHECKSUM_UNNECESSARY;
1629 skb->csum_level =
1630 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1631 }
1632
1633 if (gro)
1634 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1635
1636 return skb;
1637 }
1638
1639 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1640 struct rx_agg_cmp *rx_agg)
1641 {
1642 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1643 struct bnxt_tpa_info *tpa_info;
1644
1645 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1646 tpa_info = &rxr->rx_tpa[agg_id];
1647 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1648 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1649 }
1650
1651 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1652 struct sk_buff *skb)
1653 {
1654 if (skb->dev != bp->dev) {
1655 /* this packet belongs to a vf-rep */
1656 bnxt_vf_rep_rx(bp, skb);
1657 return;
1658 }
1659 skb_record_rx_queue(skb, bnapi->index);
1660 napi_gro_receive(&bnapi->napi, skb);
1661 }
1662
1663 /* returns the following:
1664 * 1 - 1 packet successfully received
1665 * 0 - successful TPA_START, packet not completed yet
1666 * -EBUSY - completion ring does not have all the agg buffers yet
1667 * -ENOMEM - packet aborted due to out of memory
1668 * -EIO - packet aborted due to hw error indicated in BD
1669 */
1670 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1671 u32 *raw_cons, u8 *event)
1672 {
1673 struct bnxt_napi *bnapi = cpr->bnapi;
1674 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1675 struct net_device *dev = bp->dev;
1676 struct rx_cmp *rxcmp;
1677 struct rx_cmp_ext *rxcmp1;
1678 u32 tmp_raw_cons = *raw_cons;
1679 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1680 struct bnxt_sw_rx_bd *rx_buf;
1681 unsigned int len;
1682 u8 *data_ptr, agg_bufs, cmp_type;
1683 dma_addr_t dma_addr;
1684 struct sk_buff *skb;
1685 void *data;
1686 int rc = 0;
1687 u32 misc;
1688
1689 rxcmp = (struct rx_cmp *)
1690 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1691
1692 cmp_type = RX_CMP_TYPE(rxcmp);
1693
1694 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1695 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1696 goto next_rx_no_prod_no_len;
1697 }
1698
1699 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1700 cp_cons = RING_CMP(tmp_raw_cons);
1701 rxcmp1 = (struct rx_cmp_ext *)
1702 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1703
1704 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1705 return -EBUSY;
1706
1707 prod = rxr->rx_prod;
1708
1709 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1710 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1711 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1712
1713 *event |= BNXT_RX_EVENT;
1714 goto next_rx_no_prod_no_len;
1715
1716 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1717 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1718 (struct rx_tpa_end_cmp *)rxcmp,
1719 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1720
1721 if (IS_ERR(skb))
1722 return -EBUSY;
1723
1724 rc = -ENOMEM;
1725 if (likely(skb)) {
1726 bnxt_deliver_skb(bp, bnapi, skb);
1727 rc = 1;
1728 }
1729 *event |= BNXT_RX_EVENT;
1730 goto next_rx_no_prod_no_len;
1731 }
1732
1733 cons = rxcmp->rx_cmp_opaque;
1734 if (unlikely(cons != rxr->rx_next_cons)) {
1735 int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
1736
1737 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1738 cons, rxr->rx_next_cons);
1739 bnxt_sched_reset(bp, rxr);
1740 return rc1;
1741 }
1742 rx_buf = &rxr->rx_buf_ring[cons];
1743 data = rx_buf->data;
1744 data_ptr = rx_buf->data_ptr;
1745 prefetch(data_ptr);
1746
1747 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1748 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1749
1750 if (agg_bufs) {
1751 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1752 return -EBUSY;
1753
1754 cp_cons = NEXT_CMP(cp_cons);
1755 *event |= BNXT_AGG_EVENT;
1756 }
1757 *event |= BNXT_RX_EVENT;
1758
1759 rx_buf->data = NULL;
1760 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1761 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1762
1763 bnxt_reuse_rx_data(rxr, cons, data);
1764 if (agg_bufs)
1765 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1766 false);
1767
1768 rc = -EIO;
1769 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1770 netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
1771 bnxt_sched_reset(bp, rxr);
1772 }
1773 goto next_rx_no_len;
1774 }
1775
1776 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1777 dma_addr = rx_buf->mapping;
1778
1779 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1780 rc = 1;
1781 goto next_rx;
1782 }
1783
1784 if (len <= bp->rx_copy_thresh) {
1785 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1786 bnxt_reuse_rx_data(rxr, cons, data);
1787 if (!skb) {
1788 if (agg_bufs)
1789 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1790 agg_bufs, false);
1791 rc = -ENOMEM;
1792 goto next_rx;
1793 }
1794 } else {
1795 u32 payload;
1796
1797 if (rx_buf->data_ptr == data_ptr)
1798 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1799 else
1800 payload = 0;
1801 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1802 payload | len);
1803 if (!skb) {
1804 rc = -ENOMEM;
1805 goto next_rx;
1806 }
1807 }
1808
1809 if (agg_bufs) {
1810 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
1811 if (!skb) {
1812 rc = -ENOMEM;
1813 goto next_rx;
1814 }
1815 }
1816
1817 if (RX_CMP_HASH_VALID(rxcmp)) {
1818 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1819 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1820
1821 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1822 if (hash_type != 1 && hash_type != 3)
1823 type = PKT_HASH_TYPE_L3;
1824 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1825 }
1826
1827 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1828 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1829
1830 if ((rxcmp1->rx_cmp_flags2 &
1831 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1832 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1833 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1834 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1835 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1836
1837 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1838 }
1839
1840 skb_checksum_none_assert(skb);
1841 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1842 if (dev->features & NETIF_F_RXCSUM) {
1843 skb->ip_summed = CHECKSUM_UNNECESSARY;
1844 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1845 }
1846 } else {
1847 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1848 if (dev->features & NETIF_F_RXCSUM)
1849 bnapi->cp_ring.rx_l4_csum_errors++;
1850 }
1851 }
1852
1853 bnxt_deliver_skb(bp, bnapi, skb);
1854 rc = 1;
1855
1856 next_rx:
1857 cpr->rx_packets += 1;
1858 cpr->rx_bytes += len;
1859
1860 next_rx_no_len:
1861 rxr->rx_prod = NEXT_RX(prod);
1862 rxr->rx_next_cons = NEXT_RX(cons);
1863
1864 next_rx_no_prod_no_len:
1865 *raw_cons = tmp_raw_cons;
1866
1867 return rc;
1868 }
1869
1870 /* In netpoll mode, if we are using a combined completion ring, we need to
1871 * discard the rx packets and recycle the buffers.
1872 */
1873 static int bnxt_force_rx_discard(struct bnxt *bp,
1874 struct bnxt_cp_ring_info *cpr,
1875 u32 *raw_cons, u8 *event)
1876 {
1877 u32 tmp_raw_cons = *raw_cons;
1878 struct rx_cmp_ext *rxcmp1;
1879 struct rx_cmp *rxcmp;
1880 u16 cp_cons;
1881 u8 cmp_type;
1882
1883 cp_cons = RING_CMP(tmp_raw_cons);
1884 rxcmp = (struct rx_cmp *)
1885 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1886
1887 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1888 cp_cons = RING_CMP(tmp_raw_cons);
1889 rxcmp1 = (struct rx_cmp_ext *)
1890 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1891
1892 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1893 return -EBUSY;
1894
1895 cmp_type = RX_CMP_TYPE(rxcmp);
1896 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1897 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1898 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1899 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1900 struct rx_tpa_end_cmp_ext *tpa_end1;
1901
1902 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1903 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1904 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1905 }
1906 return bnxt_rx_pkt(bp, cpr, raw_cons, event);
1907 }
1908
1909 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
1910 {
1911 struct bnxt_fw_health *fw_health = bp->fw_health;
1912 u32 reg = fw_health->regs[reg_idx];
1913 u32 reg_type, reg_off, val = 0;
1914
1915 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1916 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1917 switch (reg_type) {
1918 case BNXT_FW_HEALTH_REG_TYPE_CFG:
1919 pci_read_config_dword(bp->pdev, reg_off, &val);
1920 break;
1921 case BNXT_FW_HEALTH_REG_TYPE_GRC:
1922 reg_off = fw_health->mapped_regs[reg_idx];
1923 /* fall through */
1924 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1925 val = readl(bp->bar0 + reg_off);
1926 break;
1927 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1928 val = readl(bp->bar1 + reg_off);
1929 break;
1930 }
1931 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
1932 val &= fw_health->fw_reset_inprog_reg_mask;
1933 return val;
1934 }
1935
1936 #define BNXT_GET_EVENT_PORT(data) \
1937 ((data) & \
1938 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1939
1940 static int bnxt_async_event_process(struct bnxt *bp,
1941 struct hwrm_async_event_cmpl *cmpl)
1942 {
1943 u16 event_id = le16_to_cpu(cmpl->event_id);
1944
1945 /* TODO CHIMP_FW: Define event id's for link change, error etc */
1946 switch (event_id) {
1947 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
1948 u32 data1 = le32_to_cpu(cmpl->event_data1);
1949 struct bnxt_link_info *link_info = &bp->link_info;
1950
1951 if (BNXT_VF(bp))
1952 goto async_event_process_exit;
1953
1954 /* print unsupported speed warning in forced speed mode only */
1955 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
1956 (data1 & 0x20000)) {
1957 u16 fw_speed = link_info->force_link_speed;
1958 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1959
1960 if (speed != SPEED_UNKNOWN)
1961 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1962 speed);
1963 }
1964 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
1965 }
1966 /* fall through */
1967 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1968 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
1969 break;
1970 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
1971 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
1972 break;
1973 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
1974 u32 data1 = le32_to_cpu(cmpl->event_data1);
1975 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1976
1977 if (BNXT_VF(bp))
1978 break;
1979
1980 if (bp->pf.port_id != port_id)
1981 break;
1982
1983 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1984 break;
1985 }
1986 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
1987 if (BNXT_PF(bp))
1988 goto async_event_process_exit;
1989 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1990 break;
1991 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
1992 u32 data1 = le32_to_cpu(cmpl->event_data1);
1993
1994 bp->fw_reset_timestamp = jiffies;
1995 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
1996 if (!bp->fw_reset_min_dsecs)
1997 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
1998 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
1999 if (!bp->fw_reset_max_dsecs)
2000 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2001 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2002 netdev_warn(bp->dev, "Firmware fatal reset event received\n");
2003 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2004 } else {
2005 netdev_warn(bp->dev, "Firmware non-fatal reset event received, max wait time %d msec\n",
2006 bp->fw_reset_max_dsecs * 100);
2007 }
2008 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2009 break;
2010 }
2011 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2012 struct bnxt_fw_health *fw_health = bp->fw_health;
2013 u32 data1 = le32_to_cpu(cmpl->event_data1);
2014
2015 if (!fw_health)
2016 goto async_event_process_exit;
2017
2018 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
2019 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2020 if (!fw_health->enabled)
2021 break;
2022
2023 if (netif_msg_drv(bp))
2024 netdev_info(bp->dev, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n",
2025 fw_health->enabled, fw_health->master,
2026 bnxt_fw_health_readl(bp,
2027 BNXT_FW_RESET_CNT_REG),
2028 bnxt_fw_health_readl(bp,
2029 BNXT_FW_HEALTH_REG));
2030 fw_health->tmr_multiplier =
2031 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2032 bp->current_interval * 10);
2033 fw_health->tmr_counter = fw_health->tmr_multiplier;
2034 fw_health->last_fw_heartbeat =
2035 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2036 fw_health->last_fw_reset_cnt =
2037 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2038 goto async_event_process_exit;
2039 }
2040 default:
2041 goto async_event_process_exit;
2042 }
2043 bnxt_queue_sp_work(bp);
2044 async_event_process_exit:
2045 bnxt_ulp_async_events(bp, cmpl);
2046 return 0;
2047 }
2048
2049 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2050 {
2051 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2052 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2053 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2054 (struct hwrm_fwd_req_cmpl *)txcmp;
2055
2056 switch (cmpl_type) {
2057 case CMPL_BASE_TYPE_HWRM_DONE:
2058 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2059 if (seq_id == bp->hwrm_intr_seq_id)
2060 bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
2061 else
2062 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
2063 break;
2064
2065 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2066 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2067
2068 if ((vf_id < bp->pf.first_vf_id) ||
2069 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2070 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2071 vf_id);
2072 return -EINVAL;
2073 }
2074
2075 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2076 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2077 bnxt_queue_sp_work(bp);
2078 break;
2079
2080 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2081 bnxt_async_event_process(bp,
2082 (struct hwrm_async_event_cmpl *)txcmp);
2083
2084 default:
2085 break;
2086 }
2087
2088 return 0;
2089 }
2090
2091 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2092 {
2093 struct bnxt_napi *bnapi = dev_instance;
2094 struct bnxt *bp = bnapi->bp;
2095 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2096 u32 cons = RING_CMP(cpr->cp_raw_cons);
2097
2098 cpr->event_ctr++;
2099 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2100 napi_schedule(&bnapi->napi);
2101 return IRQ_HANDLED;
2102 }
2103
2104 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2105 {
2106 u32 raw_cons = cpr->cp_raw_cons;
2107 u16 cons = RING_CMP(raw_cons);
2108 struct tx_cmp *txcmp;
2109
2110 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2111
2112 return TX_CMP_VALID(txcmp, raw_cons);
2113 }
2114
2115 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2116 {
2117 struct bnxt_napi *bnapi = dev_instance;
2118 struct bnxt *bp = bnapi->bp;
2119 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2120 u32 cons = RING_CMP(cpr->cp_raw_cons);
2121 u32 int_status;
2122
2123 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2124
2125 if (!bnxt_has_work(bp, cpr)) {
2126 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2127 /* return if erroneous interrupt */
2128 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2129 return IRQ_NONE;
2130 }
2131
2132 /* disable ring IRQ */
2133 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2134
2135 /* Return here if interrupt is shared and is disabled. */
2136 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2137 return IRQ_HANDLED;
2138
2139 napi_schedule(&bnapi->napi);
2140 return IRQ_HANDLED;
2141 }
2142
2143 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2144 int budget)
2145 {
2146 struct bnxt_napi *bnapi = cpr->bnapi;
2147 u32 raw_cons = cpr->cp_raw_cons;
2148 u32 cons;
2149 int tx_pkts = 0;
2150 int rx_pkts = 0;
2151 u8 event = 0;
2152 struct tx_cmp *txcmp;
2153
2154 cpr->has_more_work = 0;
2155 while (1) {
2156 int rc;
2157
2158 cons = RING_CMP(raw_cons);
2159 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2160
2161 if (!TX_CMP_VALID(txcmp, raw_cons))
2162 break;
2163
2164 /* The valid test of the entry must be done first before
2165 * reading any further.
2166 */
2167 dma_rmb();
2168 cpr->had_work_done = 1;
2169 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2170 tx_pkts++;
2171 /* return full budget so NAPI will complete. */
2172 if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
2173 rx_pkts = budget;
2174 raw_cons = NEXT_RAW_CMP(raw_cons);
2175 if (budget)
2176 cpr->has_more_work = 1;
2177 break;
2178 }
2179 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2180 if (likely(budget))
2181 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2182 else
2183 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2184 &event);
2185 if (likely(rc >= 0))
2186 rx_pkts += rc;
2187 /* Increment rx_pkts when rc is -ENOMEM to count towards
2188 * the NAPI budget. Otherwise, we may potentially loop
2189 * here forever if we consistently cannot allocate
2190 * buffers.
2191 */
2192 else if (rc == -ENOMEM && budget)
2193 rx_pkts++;
2194 else if (rc == -EBUSY) /* partial completion */
2195 break;
2196 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2197 CMPL_BASE_TYPE_HWRM_DONE) ||
2198 (TX_CMP_TYPE(txcmp) ==
2199 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2200 (TX_CMP_TYPE(txcmp) ==
2201 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2202 bnxt_hwrm_handler(bp, txcmp);
2203 }
2204 raw_cons = NEXT_RAW_CMP(raw_cons);
2205
2206 if (rx_pkts && rx_pkts == budget) {
2207 cpr->has_more_work = 1;
2208 break;
2209 }
2210 }
2211
2212 if (event & BNXT_REDIRECT_EVENT)
2213 xdp_do_flush_map();
2214
2215 if (event & BNXT_TX_EVENT) {
2216 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2217 u16 prod = txr->tx_prod;
2218
2219 /* Sync BD data before updating doorbell */
2220 wmb();
2221
2222 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2223 }
2224
2225 cpr->cp_raw_cons = raw_cons;
2226 bnapi->tx_pkts += tx_pkts;
2227 bnapi->events |= event;
2228 return rx_pkts;
2229 }
2230
2231 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2232 {
2233 if (bnapi->tx_pkts) {
2234 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2235 bnapi->tx_pkts = 0;
2236 }
2237
2238 if (bnapi->events & BNXT_RX_EVENT) {
2239 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2240
2241 if (bnapi->events & BNXT_AGG_EVENT)
2242 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2243 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2244 }
2245 bnapi->events = 0;
2246 }
2247
2248 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2249 int budget)
2250 {
2251 struct bnxt_napi *bnapi = cpr->bnapi;
2252 int rx_pkts;
2253
2254 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2255
2256 /* ACK completion ring before freeing tx ring and producing new
2257 * buffers in rx/agg rings to prevent overflowing the completion
2258 * ring.
2259 */
2260 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2261
2262 __bnxt_poll_work_done(bp, bnapi);
2263 return rx_pkts;
2264 }
2265
2266 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2267 {
2268 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2269 struct bnxt *bp = bnapi->bp;
2270 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2271 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2272 struct tx_cmp *txcmp;
2273 struct rx_cmp_ext *rxcmp1;
2274 u32 cp_cons, tmp_raw_cons;
2275 u32 raw_cons = cpr->cp_raw_cons;
2276 u32 rx_pkts = 0;
2277 u8 event = 0;
2278
2279 while (1) {
2280 int rc;
2281
2282 cp_cons = RING_CMP(raw_cons);
2283 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2284
2285 if (!TX_CMP_VALID(txcmp, raw_cons))
2286 break;
2287
2288 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2289 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2290 cp_cons = RING_CMP(tmp_raw_cons);
2291 rxcmp1 = (struct rx_cmp_ext *)
2292 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2293
2294 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2295 break;
2296
2297 /* force an error to recycle the buffer */
2298 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2299 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2300
2301 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2302 if (likely(rc == -EIO) && budget)
2303 rx_pkts++;
2304 else if (rc == -EBUSY) /* partial completion */
2305 break;
2306 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2307 CMPL_BASE_TYPE_HWRM_DONE)) {
2308 bnxt_hwrm_handler(bp, txcmp);
2309 } else {
2310 netdev_err(bp->dev,
2311 "Invalid completion received on special ring\n");
2312 }
2313 raw_cons = NEXT_RAW_CMP(raw_cons);
2314
2315 if (rx_pkts == budget)
2316 break;
2317 }
2318
2319 cpr->cp_raw_cons = raw_cons;
2320 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2321 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2322
2323 if (event & BNXT_AGG_EVENT)
2324 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2325
2326 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2327 napi_complete_done(napi, rx_pkts);
2328 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2329 }
2330 return rx_pkts;
2331 }
2332
2333 static int bnxt_poll(struct napi_struct *napi, int budget)
2334 {
2335 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2336 struct bnxt *bp = bnapi->bp;
2337 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2338 int work_done = 0;
2339
2340 while (1) {
2341 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2342
2343 if (work_done >= budget) {
2344 if (!budget)
2345 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2346 break;
2347 }
2348
2349 if (!bnxt_has_work(bp, cpr)) {
2350 if (napi_complete_done(napi, work_done))
2351 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2352 break;
2353 }
2354 }
2355 if (bp->flags & BNXT_FLAG_DIM) {
2356 struct dim_sample dim_sample = {};
2357
2358 dim_update_sample(cpr->event_ctr,
2359 cpr->rx_packets,
2360 cpr->rx_bytes,
2361 &dim_sample);
2362 net_dim(&cpr->dim, dim_sample);
2363 }
2364 return work_done;
2365 }
2366
2367 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2368 {
2369 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2370 int i, work_done = 0;
2371
2372 for (i = 0; i < 2; i++) {
2373 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2374
2375 if (cpr2) {
2376 work_done += __bnxt_poll_work(bp, cpr2,
2377 budget - work_done);
2378 cpr->has_more_work |= cpr2->has_more_work;
2379 }
2380 }
2381 return work_done;
2382 }
2383
2384 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2385 u64 dbr_type, bool all)
2386 {
2387 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2388 int i;
2389
2390 for (i = 0; i < 2; i++) {
2391 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2392 struct bnxt_db_info *db;
2393
2394 if (cpr2 && (all || cpr2->had_work_done)) {
2395 db = &cpr2->cp_db;
2396 writeq(db->db_key64 | dbr_type |
2397 RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2398 cpr2->had_work_done = 0;
2399 }
2400 }
2401 __bnxt_poll_work_done(bp, bnapi);
2402 }
2403
2404 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2405 {
2406 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2407 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2408 u32 raw_cons = cpr->cp_raw_cons;
2409 struct bnxt *bp = bnapi->bp;
2410 struct nqe_cn *nqcmp;
2411 int work_done = 0;
2412 u32 cons;
2413
2414 if (cpr->has_more_work) {
2415 cpr->has_more_work = 0;
2416 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2417 if (cpr->has_more_work) {
2418 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false);
2419 return work_done;
2420 }
2421 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true);
2422 if (napi_complete_done(napi, work_done))
2423 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons);
2424 return work_done;
2425 }
2426 while (1) {
2427 cons = RING_CMP(raw_cons);
2428 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2429
2430 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2431 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
2432 false);
2433 cpr->cp_raw_cons = raw_cons;
2434 if (napi_complete_done(napi, work_done))
2435 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2436 cpr->cp_raw_cons);
2437 return work_done;
2438 }
2439
2440 /* The valid test of the entry must be done first before
2441 * reading any further.
2442 */
2443 dma_rmb();
2444
2445 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2446 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2447 struct bnxt_cp_ring_info *cpr2;
2448
2449 cpr2 = cpr->cp_ring_arr[idx];
2450 work_done += __bnxt_poll_work(bp, cpr2,
2451 budget - work_done);
2452 cpr->has_more_work = cpr2->has_more_work;
2453 } else {
2454 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2455 }
2456 raw_cons = NEXT_RAW_CMP(raw_cons);
2457 if (cpr->has_more_work)
2458 break;
2459 }
2460 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true);
2461 cpr->cp_raw_cons = raw_cons;
2462 return work_done;
2463 }
2464
2465 static void bnxt_free_tx_skbs(struct bnxt *bp)
2466 {
2467 int i, max_idx;
2468 struct pci_dev *pdev = bp->pdev;
2469
2470 if (!bp->tx_ring)
2471 return;
2472
2473 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2474 for (i = 0; i < bp->tx_nr_rings; i++) {
2475 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2476 int j;
2477
2478 for (j = 0; j < max_idx;) {
2479 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2480 struct sk_buff *skb;
2481 int k, last;
2482
2483 if (i < bp->tx_nr_rings_xdp &&
2484 tx_buf->action == XDP_REDIRECT) {
2485 dma_unmap_single(&pdev->dev,
2486 dma_unmap_addr(tx_buf, mapping),
2487 dma_unmap_len(tx_buf, len),
2488 PCI_DMA_TODEVICE);
2489 xdp_return_frame(tx_buf->xdpf);
2490 tx_buf->action = 0;
2491 tx_buf->xdpf = NULL;
2492 j++;
2493 continue;
2494 }
2495
2496 skb = tx_buf->skb;
2497 if (!skb) {
2498 j++;
2499 continue;
2500 }
2501
2502 tx_buf->skb = NULL;
2503
2504 if (tx_buf->is_push) {
2505 dev_kfree_skb(skb);
2506 j += 2;
2507 continue;
2508 }
2509
2510 dma_unmap_single(&pdev->dev,
2511 dma_unmap_addr(tx_buf, mapping),
2512 skb_headlen(skb),
2513 PCI_DMA_TODEVICE);
2514
2515 last = tx_buf->nr_frags;
2516 j += 2;
2517 for (k = 0; k < last; k++, j++) {
2518 int ring_idx = j & bp->tx_ring_mask;
2519 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2520
2521 tx_buf = &txr->tx_buf_ring[ring_idx];
2522 dma_unmap_page(
2523 &pdev->dev,
2524 dma_unmap_addr(tx_buf, mapping),
2525 skb_frag_size(frag), PCI_DMA_TODEVICE);
2526 }
2527 dev_kfree_skb(skb);
2528 }
2529 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2530 }
2531 }
2532
2533 static void bnxt_free_rx_skbs(struct bnxt *bp)
2534 {
2535 int i, max_idx, max_agg_idx;
2536 struct pci_dev *pdev = bp->pdev;
2537
2538 if (!bp->rx_ring)
2539 return;
2540
2541 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2542 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2543 for (i = 0; i < bp->rx_nr_rings; i++) {
2544 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2545 struct bnxt_tpa_idx_map *map;
2546 int j;
2547
2548 if (rxr->rx_tpa) {
2549 for (j = 0; j < bp->max_tpa; j++) {
2550 struct bnxt_tpa_info *tpa_info =
2551 &rxr->rx_tpa[j];
2552 u8 *data = tpa_info->data;
2553
2554 if (!data)
2555 continue;
2556
2557 dma_unmap_single_attrs(&pdev->dev,
2558 tpa_info->mapping,
2559 bp->rx_buf_use_size,
2560 bp->rx_dir,
2561 DMA_ATTR_WEAK_ORDERING);
2562
2563 tpa_info->data = NULL;
2564
2565 kfree(data);
2566 }
2567 }
2568
2569 for (j = 0; j < max_idx; j++) {
2570 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
2571 dma_addr_t mapping = rx_buf->mapping;
2572 void *data = rx_buf->data;
2573
2574 if (!data)
2575 continue;
2576
2577 rx_buf->data = NULL;
2578
2579 if (BNXT_RX_PAGE_MODE(bp)) {
2580 mapping -= bp->rx_dma_offset;
2581 dma_unmap_page_attrs(&pdev->dev, mapping,
2582 PAGE_SIZE, bp->rx_dir,
2583 DMA_ATTR_WEAK_ORDERING);
2584 page_pool_recycle_direct(rxr->page_pool, data);
2585 } else {
2586 dma_unmap_single_attrs(&pdev->dev, mapping,
2587 bp->rx_buf_use_size,
2588 bp->rx_dir,
2589 DMA_ATTR_WEAK_ORDERING);
2590 kfree(data);
2591 }
2592 }
2593
2594 for (j = 0; j < max_agg_idx; j++) {
2595 struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2596 &rxr->rx_agg_ring[j];
2597 struct page *page = rx_agg_buf->page;
2598
2599 if (!page)
2600 continue;
2601
2602 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2603 BNXT_RX_PAGE_SIZE,
2604 PCI_DMA_FROMDEVICE,
2605 DMA_ATTR_WEAK_ORDERING);
2606
2607 rx_agg_buf->page = NULL;
2608 __clear_bit(j, rxr->rx_agg_bmap);
2609
2610 __free_page(page);
2611 }
2612 if (rxr->rx_page) {
2613 __free_page(rxr->rx_page);
2614 rxr->rx_page = NULL;
2615 }
2616 map = rxr->rx_tpa_idx_map;
2617 if (map)
2618 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2619 }
2620 }
2621
2622 static void bnxt_free_skbs(struct bnxt *bp)
2623 {
2624 bnxt_free_tx_skbs(bp);
2625 bnxt_free_rx_skbs(bp);
2626 }
2627
2628 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2629 {
2630 struct pci_dev *pdev = bp->pdev;
2631 int i;
2632
2633 for (i = 0; i < rmem->nr_pages; i++) {
2634 if (!rmem->pg_arr[i])
2635 continue;
2636
2637 dma_free_coherent(&pdev->dev, rmem->page_size,
2638 rmem->pg_arr[i], rmem->dma_arr[i]);
2639
2640 rmem->pg_arr[i] = NULL;
2641 }
2642 if (rmem->pg_tbl) {
2643 size_t pg_tbl_size = rmem->nr_pages * 8;
2644
2645 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2646 pg_tbl_size = rmem->page_size;
2647 dma_free_coherent(&pdev->dev, pg_tbl_size,
2648 rmem->pg_tbl, rmem->pg_tbl_map);
2649 rmem->pg_tbl = NULL;
2650 }
2651 if (rmem->vmem_size && *rmem->vmem) {
2652 vfree(*rmem->vmem);
2653 *rmem->vmem = NULL;
2654 }
2655 }
2656
2657 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2658 {
2659 struct pci_dev *pdev = bp->pdev;
2660 u64 valid_bit = 0;
2661 int i;
2662
2663 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2664 valid_bit = PTU_PTE_VALID;
2665 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2666 size_t pg_tbl_size = rmem->nr_pages * 8;
2667
2668 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2669 pg_tbl_size = rmem->page_size;
2670 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2671 &rmem->pg_tbl_map,
2672 GFP_KERNEL);
2673 if (!rmem->pg_tbl)
2674 return -ENOMEM;
2675 }
2676
2677 for (i = 0; i < rmem->nr_pages; i++) {
2678 u64 extra_bits = valid_bit;
2679
2680 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2681 rmem->page_size,
2682 &rmem->dma_arr[i],
2683 GFP_KERNEL);
2684 if (!rmem->pg_arr[i])
2685 return -ENOMEM;
2686
2687 if (rmem->nr_pages > 1 || rmem->depth > 0) {
2688 if (i == rmem->nr_pages - 2 &&
2689 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2690 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2691 else if (i == rmem->nr_pages - 1 &&
2692 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2693 extra_bits |= PTU_PTE_LAST;
2694 rmem->pg_tbl[i] =
2695 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2696 }
2697 }
2698
2699 if (rmem->vmem_size) {
2700 *rmem->vmem = vzalloc(rmem->vmem_size);
2701 if (!(*rmem->vmem))
2702 return -ENOMEM;
2703 }
2704 return 0;
2705 }
2706
2707 static void bnxt_free_tpa_info(struct bnxt *bp)
2708 {
2709 int i;
2710
2711 for (i = 0; i < bp->rx_nr_rings; i++) {
2712 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2713
2714 kfree(rxr->rx_tpa_idx_map);
2715 rxr->rx_tpa_idx_map = NULL;
2716 if (rxr->rx_tpa) {
2717 kfree(rxr->rx_tpa[0].agg_arr);
2718 rxr->rx_tpa[0].agg_arr = NULL;
2719 }
2720 kfree(rxr->rx_tpa);
2721 rxr->rx_tpa = NULL;
2722 }
2723 }
2724
2725 static int bnxt_alloc_tpa_info(struct bnxt *bp)
2726 {
2727 int i, j, total_aggs = 0;
2728
2729 bp->max_tpa = MAX_TPA;
2730 if (bp->flags & BNXT_FLAG_CHIP_P5) {
2731 if (!bp->max_tpa_v2)
2732 return 0;
2733 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2734 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
2735 }
2736
2737 for (i = 0; i < bp->rx_nr_rings; i++) {
2738 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2739 struct rx_agg_cmp *agg;
2740
2741 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
2742 GFP_KERNEL);
2743 if (!rxr->rx_tpa)
2744 return -ENOMEM;
2745
2746 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2747 continue;
2748 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
2749 rxr->rx_tpa[0].agg_arr = agg;
2750 if (!agg)
2751 return -ENOMEM;
2752 for (j = 1; j < bp->max_tpa; j++)
2753 rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
2754 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
2755 GFP_KERNEL);
2756 if (!rxr->rx_tpa_idx_map)
2757 return -ENOMEM;
2758 }
2759 return 0;
2760 }
2761
2762 static void bnxt_free_rx_rings(struct bnxt *bp)
2763 {
2764 int i;
2765
2766 if (!bp->rx_ring)
2767 return;
2768
2769 bnxt_free_tpa_info(bp);
2770 for (i = 0; i < bp->rx_nr_rings; i++) {
2771 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2772 struct bnxt_ring_struct *ring;
2773
2774 if (rxr->xdp_prog)
2775 bpf_prog_put(rxr->xdp_prog);
2776
2777 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2778 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2779
2780 page_pool_destroy(rxr->page_pool);
2781 rxr->page_pool = NULL;
2782
2783 kfree(rxr->rx_agg_bmap);
2784 rxr->rx_agg_bmap = NULL;
2785
2786 ring = &rxr->rx_ring_struct;
2787 bnxt_free_ring(bp, &ring->ring_mem);
2788
2789 ring = &rxr->rx_agg_ring_struct;
2790 bnxt_free_ring(bp, &ring->ring_mem);
2791 }
2792 }
2793
2794 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
2795 struct bnxt_rx_ring_info *rxr)
2796 {
2797 struct page_pool_params pp = { 0 };
2798
2799 pp.pool_size = bp->rx_ring_size;
2800 pp.nid = dev_to_node(&bp->pdev->dev);
2801 pp.dev = &bp->pdev->dev;
2802 pp.dma_dir = DMA_BIDIRECTIONAL;
2803
2804 rxr->page_pool = page_pool_create(&pp);
2805 if (IS_ERR(rxr->page_pool)) {
2806 int err = PTR_ERR(rxr->page_pool);
2807
2808 rxr->page_pool = NULL;
2809 return err;
2810 }
2811 return 0;
2812 }
2813
2814 static int bnxt_alloc_rx_rings(struct bnxt *bp)
2815 {
2816 int i, rc = 0, agg_rings = 0;
2817
2818 if (!bp->rx_ring)
2819 return -ENOMEM;
2820
2821 if (bp->flags & BNXT_FLAG_AGG_RINGS)
2822 agg_rings = 1;
2823
2824 for (i = 0; i < bp->rx_nr_rings; i++) {
2825 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2826 struct bnxt_ring_struct *ring;
2827
2828 ring = &rxr->rx_ring_struct;
2829
2830 rc = bnxt_alloc_rx_page_pool(bp, rxr);
2831 if (rc)
2832 return rc;
2833
2834 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
2835 if (rc < 0)
2836 return rc;
2837
2838 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
2839 MEM_TYPE_PAGE_POOL,
2840 rxr->page_pool);
2841 if (rc) {
2842 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2843 return rc;
2844 }
2845
2846 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2847 if (rc)
2848 return rc;
2849
2850 ring->grp_idx = i;
2851 if (agg_rings) {
2852 u16 mem_size;
2853
2854 ring = &rxr->rx_agg_ring_struct;
2855 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2856 if (rc)
2857 return rc;
2858
2859 ring->grp_idx = i;
2860 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2861 mem_size = rxr->rx_agg_bmap_size / 8;
2862 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2863 if (!rxr->rx_agg_bmap)
2864 return -ENOMEM;
2865 }
2866 }
2867 if (bp->flags & BNXT_FLAG_TPA)
2868 rc = bnxt_alloc_tpa_info(bp);
2869 return rc;
2870 }
2871
2872 static void bnxt_free_tx_rings(struct bnxt *bp)
2873 {
2874 int i;
2875 struct pci_dev *pdev = bp->pdev;
2876
2877 if (!bp->tx_ring)
2878 return;
2879
2880 for (i = 0; i < bp->tx_nr_rings; i++) {
2881 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2882 struct bnxt_ring_struct *ring;
2883
2884 if (txr->tx_push) {
2885 dma_free_coherent(&pdev->dev, bp->tx_push_size,
2886 txr->tx_push, txr->tx_push_mapping);
2887 txr->tx_push = NULL;
2888 }
2889
2890 ring = &txr->tx_ring_struct;
2891
2892 bnxt_free_ring(bp, &ring->ring_mem);
2893 }
2894 }
2895
2896 static int bnxt_alloc_tx_rings(struct bnxt *bp)
2897 {
2898 int i, j, rc;
2899 struct pci_dev *pdev = bp->pdev;
2900
2901 bp->tx_push_size = 0;
2902 if (bp->tx_push_thresh) {
2903 int push_size;
2904
2905 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2906 bp->tx_push_thresh);
2907
2908 if (push_size > 256) {
2909 push_size = 0;
2910 bp->tx_push_thresh = 0;
2911 }
2912
2913 bp->tx_push_size = push_size;
2914 }
2915
2916 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
2917 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2918 struct bnxt_ring_struct *ring;
2919 u8 qidx;
2920
2921 ring = &txr->tx_ring_struct;
2922
2923 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2924 if (rc)
2925 return rc;
2926
2927 ring->grp_idx = txr->bnapi->index;
2928 if (bp->tx_push_size) {
2929 dma_addr_t mapping;
2930
2931 /* One pre-allocated DMA buffer to backup
2932 * TX push operation
2933 */
2934 txr->tx_push = dma_alloc_coherent(&pdev->dev,
2935 bp->tx_push_size,
2936 &txr->tx_push_mapping,
2937 GFP_KERNEL);
2938
2939 if (!txr->tx_push)
2940 return -ENOMEM;
2941
2942 mapping = txr->tx_push_mapping +
2943 sizeof(struct tx_push_bd);
2944 txr->data_mapping = cpu_to_le64(mapping);
2945 }
2946 qidx = bp->tc_to_qidx[j];
2947 ring->queue_id = bp->q_info[qidx].queue_id;
2948 if (i < bp->tx_nr_rings_xdp)
2949 continue;
2950 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2951 j++;
2952 }
2953 return 0;
2954 }
2955
2956 static void bnxt_free_cp_rings(struct bnxt *bp)
2957 {
2958 int i;
2959
2960 if (!bp->bnapi)
2961 return;
2962
2963 for (i = 0; i < bp->cp_nr_rings; i++) {
2964 struct bnxt_napi *bnapi = bp->bnapi[i];
2965 struct bnxt_cp_ring_info *cpr;
2966 struct bnxt_ring_struct *ring;
2967 int j;
2968
2969 if (!bnapi)
2970 continue;
2971
2972 cpr = &bnapi->cp_ring;
2973 ring = &cpr->cp_ring_struct;
2974
2975 bnxt_free_ring(bp, &ring->ring_mem);
2976
2977 for (j = 0; j < 2; j++) {
2978 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2979
2980 if (cpr2) {
2981 ring = &cpr2->cp_ring_struct;
2982 bnxt_free_ring(bp, &ring->ring_mem);
2983 kfree(cpr2);
2984 cpr->cp_ring_arr[j] = NULL;
2985 }
2986 }
2987 }
2988 }
2989
2990 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
2991 {
2992 struct bnxt_ring_mem_info *rmem;
2993 struct bnxt_ring_struct *ring;
2994 struct bnxt_cp_ring_info *cpr;
2995 int rc;
2996
2997 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
2998 if (!cpr)
2999 return NULL;
3000
3001 ring = &cpr->cp_ring_struct;
3002 rmem = &ring->ring_mem;
3003 rmem->nr_pages = bp->cp_nr_pages;
3004 rmem->page_size = HW_CMPD_RING_SIZE;
3005 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3006 rmem->dma_arr = cpr->cp_desc_mapping;
3007 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3008 rc = bnxt_alloc_ring(bp, rmem);
3009 if (rc) {
3010 bnxt_free_ring(bp, rmem);
3011 kfree(cpr);
3012 cpr = NULL;
3013 }
3014 return cpr;
3015 }
3016
3017 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3018 {
3019 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3020 int i, rc, ulp_base_vec, ulp_msix;
3021
3022 ulp_msix = bnxt_get_ulp_msix_num(bp);
3023 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3024 for (i = 0; i < bp->cp_nr_rings; i++) {
3025 struct bnxt_napi *bnapi = bp->bnapi[i];
3026 struct bnxt_cp_ring_info *cpr;
3027 struct bnxt_ring_struct *ring;
3028
3029 if (!bnapi)
3030 continue;
3031
3032 cpr = &bnapi->cp_ring;
3033 cpr->bnapi = bnapi;
3034 ring = &cpr->cp_ring_struct;
3035
3036 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3037 if (rc)
3038 return rc;
3039
3040 if (ulp_msix && i >= ulp_base_vec)
3041 ring->map_idx = i + ulp_msix;
3042 else
3043 ring->map_idx = i;
3044
3045 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3046 continue;
3047
3048 if (i < bp->rx_nr_rings) {
3049 struct bnxt_cp_ring_info *cpr2 =
3050 bnxt_alloc_cp_sub_ring(bp);
3051
3052 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3053 if (!cpr2)
3054 return -ENOMEM;
3055 cpr2->bnapi = bnapi;
3056 }
3057 if ((sh && i < bp->tx_nr_rings) ||
3058 (!sh && i >= bp->rx_nr_rings)) {
3059 struct bnxt_cp_ring_info *cpr2 =
3060 bnxt_alloc_cp_sub_ring(bp);
3061
3062 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3063 if (!cpr2)
3064 return -ENOMEM;
3065 cpr2->bnapi = bnapi;
3066 }
3067 }
3068 return 0;
3069 }
3070
3071 static void bnxt_init_ring_struct(struct bnxt *bp)
3072 {
3073 int i;
3074
3075 for (i = 0; i < bp->cp_nr_rings; i++) {
3076 struct bnxt_napi *bnapi = bp->bnapi[i];
3077 struct bnxt_ring_mem_info *rmem;
3078 struct bnxt_cp_ring_info *cpr;
3079 struct bnxt_rx_ring_info *rxr;
3080 struct bnxt_tx_ring_info *txr;
3081 struct bnxt_ring_struct *ring;
3082
3083 if (!bnapi)
3084 continue;
3085
3086 cpr = &bnapi->cp_ring;
3087 ring = &cpr->cp_ring_struct;
3088 rmem = &ring->ring_mem;
3089 rmem->nr_pages = bp->cp_nr_pages;
3090 rmem->page_size = HW_CMPD_RING_SIZE;
3091 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3092 rmem->dma_arr = cpr->cp_desc_mapping;
3093 rmem->vmem_size = 0;
3094
3095 rxr = bnapi->rx_ring;
3096 if (!rxr)
3097 goto skip_rx;
3098
3099 ring = &rxr->rx_ring_struct;
3100 rmem = &ring->ring_mem;
3101 rmem->nr_pages = bp->rx_nr_pages;
3102 rmem->page_size = HW_RXBD_RING_SIZE;
3103 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3104 rmem->dma_arr = rxr->rx_desc_mapping;
3105 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3106 rmem->vmem = (void **)&rxr->rx_buf_ring;
3107
3108 ring = &rxr->rx_agg_ring_struct;
3109 rmem = &ring->ring_mem;
3110 rmem->nr_pages = bp->rx_agg_nr_pages;
3111 rmem->page_size = HW_RXBD_RING_SIZE;
3112 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3113 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3114 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3115 rmem->vmem = (void **)&rxr->rx_agg_ring;
3116
3117 skip_rx:
3118 txr = bnapi->tx_ring;
3119 if (!txr)
3120 continue;
3121
3122 ring = &txr->tx_ring_struct;
3123 rmem = &ring->ring_mem;
3124 rmem->nr_pages = bp->tx_nr_pages;
3125 rmem->page_size = HW_RXBD_RING_SIZE;
3126 rmem->pg_arr = (void **)txr->tx_desc_ring;
3127 rmem->dma_arr = txr->tx_desc_mapping;
3128 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3129 rmem->vmem = (void **)&txr->tx_buf_ring;
3130 }
3131 }
3132
3133 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3134 {
3135 int i;
3136 u32 prod;
3137 struct rx_bd **rx_buf_ring;
3138
3139 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3140 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3141 int j;
3142 struct rx_bd *rxbd;
3143
3144 rxbd = rx_buf_ring[i];
3145 if (!rxbd)
3146 continue;
3147
3148 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3149 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3150 rxbd->rx_bd_opaque = prod;
3151 }
3152 }
3153 }
3154
3155 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3156 {
3157 struct net_device *dev = bp->dev;
3158 struct bnxt_rx_ring_info *rxr;
3159 struct bnxt_ring_struct *ring;
3160 u32 prod, type;
3161 int i;
3162
3163 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3164 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3165
3166 if (NET_IP_ALIGN == 2)
3167 type |= RX_BD_FLAGS_SOP;
3168
3169 rxr = &bp->rx_ring[ring_nr];
3170 ring = &rxr->rx_ring_struct;
3171 bnxt_init_rxbd_pages(ring, type);
3172
3173 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3174 rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
3175 if (IS_ERR(rxr->xdp_prog)) {
3176 int rc = PTR_ERR(rxr->xdp_prog);
3177
3178 rxr->xdp_prog = NULL;
3179 return rc;
3180 }
3181 }
3182 prod = rxr->rx_prod;
3183 for (i = 0; i < bp->rx_ring_size; i++) {
3184 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
3185 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3186 ring_nr, i, bp->rx_ring_size);
3187 break;
3188 }
3189 prod = NEXT_RX(prod);
3190 }
3191 rxr->rx_prod = prod;
3192 ring->fw_ring_id = INVALID_HW_RING_ID;
3193
3194 ring = &rxr->rx_agg_ring_struct;
3195 ring->fw_ring_id = INVALID_HW_RING_ID;
3196
3197 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3198 return 0;
3199
3200 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3201 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3202
3203 bnxt_init_rxbd_pages(ring, type);
3204
3205 prod = rxr->rx_agg_prod;
3206 for (i = 0; i < bp->rx_agg_ring_size; i++) {
3207 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
3208 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3209 ring_nr, i, bp->rx_ring_size);
3210 break;
3211 }
3212 prod = NEXT_RX_AGG(prod);
3213 }
3214 rxr->rx_agg_prod = prod;
3215
3216 if (bp->flags & BNXT_FLAG_TPA) {
3217 if (rxr->rx_tpa) {
3218 u8 *data;
3219 dma_addr_t mapping;
3220
3221 for (i = 0; i < bp->max_tpa; i++) {
3222 data = __bnxt_alloc_rx_data(bp, &mapping,
3223 GFP_KERNEL);
3224 if (!data)
3225 return -ENOMEM;
3226
3227 rxr->rx_tpa[i].data = data;
3228 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3229 rxr->rx_tpa[i].mapping = mapping;
3230 }
3231 } else {
3232 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
3233 return -ENOMEM;
3234 }
3235 }
3236
3237 return 0;
3238 }
3239
3240 static void bnxt_init_cp_rings(struct bnxt *bp)
3241 {
3242 int i, j;
3243
3244 for (i = 0; i < bp->cp_nr_rings; i++) {
3245 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3246 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3247
3248 ring->fw_ring_id = INVALID_HW_RING_ID;
3249 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3250 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3251 for (j = 0; j < 2; j++) {
3252 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3253
3254 if (!cpr2)
3255 continue;
3256
3257 ring = &cpr2->cp_ring_struct;
3258 ring->fw_ring_id = INVALID_HW_RING_ID;
3259 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3260 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3261 }
3262 }
3263 }
3264
3265 static int bnxt_init_rx_rings(struct bnxt *bp)
3266 {
3267 int i, rc = 0;
3268
3269 if (BNXT_RX_PAGE_MODE(bp)) {
3270 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3271 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3272 } else {
3273 bp->rx_offset = BNXT_RX_OFFSET;
3274 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3275 }
3276
3277 for (i = 0; i < bp->rx_nr_rings; i++) {
3278 rc = bnxt_init_one_rx_ring(bp, i);
3279 if (rc)
3280 break;
3281 }
3282
3283 return rc;
3284 }
3285
3286 static int bnxt_init_tx_rings(struct bnxt *bp)
3287 {
3288 u16 i;
3289
3290 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3291 MAX_SKB_FRAGS + 1);
3292
3293 for (i = 0; i < bp->tx_nr_rings; i++) {
3294 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3295 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3296
3297 ring->fw_ring_id = INVALID_HW_RING_ID;
3298 }
3299
3300 return 0;
3301 }
3302
3303 static void bnxt_free_ring_grps(struct bnxt *bp)
3304 {
3305 kfree(bp->grp_info);
3306 bp->grp_info = NULL;
3307 }
3308
3309 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3310 {
3311 int i;
3312
3313 if (irq_re_init) {
3314 bp->grp_info = kcalloc(bp->cp_nr_rings,
3315 sizeof(struct bnxt_ring_grp_info),
3316 GFP_KERNEL);
3317 if (!bp->grp_info)
3318 return -ENOMEM;
3319 }
3320 for (i = 0; i < bp->cp_nr_rings; i++) {
3321 if (irq_re_init)
3322 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3323 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3324 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3325 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3326 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3327 }
3328 return 0;
3329 }
3330
3331 static void bnxt_free_vnics(struct bnxt *bp)
3332 {
3333 kfree(bp->vnic_info);
3334 bp->vnic_info = NULL;
3335 bp->nr_vnics = 0;
3336 }
3337
3338 static int bnxt_alloc_vnics(struct bnxt *bp)
3339 {
3340 int num_vnics = 1;
3341
3342 #ifdef CONFIG_RFS_ACCEL
3343 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3344 num_vnics += bp->rx_nr_rings;
3345 #endif
3346
3347 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3348 num_vnics++;
3349
3350 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3351 GFP_KERNEL);
3352 if (!bp->vnic_info)
3353 return -ENOMEM;
3354
3355 bp->nr_vnics = num_vnics;
3356 return 0;
3357 }
3358
3359 static void bnxt_init_vnics(struct bnxt *bp)
3360 {
3361 int i;
3362
3363 for (i = 0; i < bp->nr_vnics; i++) {
3364 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3365 int j;
3366
3367 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3368 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3369 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3370
3371 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3372
3373 if (bp->vnic_info[i].rss_hash_key) {
3374 if (i == 0)
3375 prandom_bytes(vnic->rss_hash_key,
3376 HW_HASH_KEY_SIZE);
3377 else
3378 memcpy(vnic->rss_hash_key,
3379 bp->vnic_info[0].rss_hash_key,
3380 HW_HASH_KEY_SIZE);
3381 }
3382 }
3383 }
3384
3385 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3386 {
3387 int pages;
3388
3389 pages = ring_size / desc_per_pg;
3390
3391 if (!pages)
3392 return 1;
3393
3394 pages++;
3395
3396 while (pages & (pages - 1))
3397 pages++;
3398
3399 return pages;
3400 }
3401
3402 void bnxt_set_tpa_flags(struct bnxt *bp)
3403 {
3404 bp->flags &= ~BNXT_FLAG_TPA;
3405 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3406 return;
3407 if (bp->dev->features & NETIF_F_LRO)
3408 bp->flags |= BNXT_FLAG_LRO;
3409 else if (bp->dev->features & NETIF_F_GRO_HW)
3410 bp->flags |= BNXT_FLAG_GRO;
3411 }
3412
3413 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3414 * be set on entry.
3415 */
3416 void bnxt_set_ring_params(struct bnxt *bp)
3417 {
3418 u32 ring_size, rx_size, rx_space;
3419 u32 agg_factor = 0, agg_ring_size = 0;
3420
3421 /* 8 for CRC and VLAN */
3422 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3423
3424 rx_space = rx_size + NET_SKB_PAD +
3425 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3426
3427 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3428 ring_size = bp->rx_ring_size;
3429 bp->rx_agg_ring_size = 0;
3430 bp->rx_agg_nr_pages = 0;
3431
3432 if (bp->flags & BNXT_FLAG_TPA)
3433 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3434
3435 bp->flags &= ~BNXT_FLAG_JUMBO;
3436 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3437 u32 jumbo_factor;
3438
3439 bp->flags |= BNXT_FLAG_JUMBO;
3440 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3441 if (jumbo_factor > agg_factor)
3442 agg_factor = jumbo_factor;
3443 }
3444 agg_ring_size = ring_size * agg_factor;
3445
3446 if (agg_ring_size) {
3447 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3448 RX_DESC_CNT);
3449 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3450 u32 tmp = agg_ring_size;
3451
3452 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3453 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3454 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3455 tmp, agg_ring_size);
3456 }
3457 bp->rx_agg_ring_size = agg_ring_size;
3458 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3459 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3460 rx_space = rx_size + NET_SKB_PAD +
3461 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3462 }
3463
3464 bp->rx_buf_use_size = rx_size;
3465 bp->rx_buf_size = rx_space;
3466
3467 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3468 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3469
3470 ring_size = bp->tx_ring_size;
3471 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3472 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3473
3474 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
3475 bp->cp_ring_size = ring_size;
3476
3477 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3478 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3479 bp->cp_nr_pages = MAX_CP_PAGES;
3480 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3481 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3482 ring_size, bp->cp_ring_size);
3483 }
3484 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3485 bp->cp_ring_mask = bp->cp_bit - 1;
3486 }
3487
3488 /* Changing allocation mode of RX rings.
3489 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3490 */
3491 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3492 {
3493 if (page_mode) {
3494 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3495 return -EOPNOTSUPP;
3496 bp->dev->max_mtu =
3497 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3498 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3499 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3500 bp->rx_dir = DMA_BIDIRECTIONAL;
3501 bp->rx_skb_func = bnxt_rx_page_skb;
3502 /* Disable LRO or GRO_HW */
3503 netdev_update_features(bp->dev);
3504 } else {
3505 bp->dev->max_mtu = bp->max_mtu;
3506 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3507 bp->rx_dir = DMA_FROM_DEVICE;
3508 bp->rx_skb_func = bnxt_rx_skb;
3509 }
3510 return 0;
3511 }
3512
3513 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3514 {
3515 int i;
3516 struct bnxt_vnic_info *vnic;
3517 struct pci_dev *pdev = bp->pdev;
3518
3519 if (!bp->vnic_info)
3520 return;
3521
3522 for (i = 0; i < bp->nr_vnics; i++) {
3523 vnic = &bp->vnic_info[i];
3524
3525 kfree(vnic->fw_grp_ids);
3526 vnic->fw_grp_ids = NULL;
3527
3528 kfree(vnic->uc_list);
3529 vnic->uc_list = NULL;
3530
3531 if (vnic->mc_list) {
3532 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3533 vnic->mc_list, vnic->mc_list_mapping);
3534 vnic->mc_list = NULL;
3535 }
3536
3537 if (vnic->rss_table) {
3538 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3539 vnic->rss_table,
3540 vnic->rss_table_dma_addr);
3541 vnic->rss_table = NULL;
3542 }
3543
3544 vnic->rss_hash_key = NULL;
3545 vnic->flags = 0;
3546 }
3547 }
3548
3549 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3550 {
3551 int i, rc = 0, size;
3552 struct bnxt_vnic_info *vnic;
3553 struct pci_dev *pdev = bp->pdev;
3554 int max_rings;
3555
3556 for (i = 0; i < bp->nr_vnics; i++) {
3557 vnic = &bp->vnic_info[i];
3558
3559 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3560 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3561
3562 if (mem_size > 0) {
3563 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3564 if (!vnic->uc_list) {
3565 rc = -ENOMEM;
3566 goto out;
3567 }
3568 }
3569 }
3570
3571 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3572 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3573 vnic->mc_list =
3574 dma_alloc_coherent(&pdev->dev,
3575 vnic->mc_list_size,
3576 &vnic->mc_list_mapping,
3577 GFP_KERNEL);
3578 if (!vnic->mc_list) {
3579 rc = -ENOMEM;
3580 goto out;
3581 }
3582 }
3583
3584 if (bp->flags & BNXT_FLAG_CHIP_P5)
3585 goto vnic_skip_grps;
3586
3587 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3588 max_rings = bp->rx_nr_rings;
3589 else
3590 max_rings = 1;
3591
3592 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3593 if (!vnic->fw_grp_ids) {
3594 rc = -ENOMEM;
3595 goto out;
3596 }
3597 vnic_skip_grps:
3598 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3599 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3600 continue;
3601
3602 /* Allocate rss table and hash key */
3603 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3604 &vnic->rss_table_dma_addr,
3605 GFP_KERNEL);
3606 if (!vnic->rss_table) {
3607 rc = -ENOMEM;
3608 goto out;
3609 }
3610
3611 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3612
3613 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3614 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3615 }
3616 return 0;
3617
3618 out:
3619 return rc;
3620 }
3621
3622 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3623 {
3624 struct pci_dev *pdev = bp->pdev;
3625
3626 if (bp->hwrm_cmd_resp_addr) {
3627 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3628 bp->hwrm_cmd_resp_dma_addr);
3629 bp->hwrm_cmd_resp_addr = NULL;
3630 }
3631
3632 if (bp->hwrm_cmd_kong_resp_addr) {
3633 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3634 bp->hwrm_cmd_kong_resp_addr,
3635 bp->hwrm_cmd_kong_resp_dma_addr);
3636 bp->hwrm_cmd_kong_resp_addr = NULL;
3637 }
3638 }
3639
3640 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3641 {
3642 struct pci_dev *pdev = bp->pdev;
3643
3644 if (bp->hwrm_cmd_kong_resp_addr)
3645 return 0;
3646
3647 bp->hwrm_cmd_kong_resp_addr =
3648 dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3649 &bp->hwrm_cmd_kong_resp_dma_addr,
3650 GFP_KERNEL);
3651 if (!bp->hwrm_cmd_kong_resp_addr)
3652 return -ENOMEM;
3653
3654 return 0;
3655 }
3656
3657 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3658 {
3659 struct pci_dev *pdev = bp->pdev;
3660
3661 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3662 &bp->hwrm_cmd_resp_dma_addr,
3663 GFP_KERNEL);
3664 if (!bp->hwrm_cmd_resp_addr)
3665 return -ENOMEM;
3666
3667 return 0;
3668 }
3669
3670 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3671 {
3672 if (bp->hwrm_short_cmd_req_addr) {
3673 struct pci_dev *pdev = bp->pdev;
3674
3675 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3676 bp->hwrm_short_cmd_req_addr,
3677 bp->hwrm_short_cmd_req_dma_addr);
3678 bp->hwrm_short_cmd_req_addr = NULL;
3679 }
3680 }
3681
3682 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3683 {
3684 struct pci_dev *pdev = bp->pdev;
3685
3686 if (bp->hwrm_short_cmd_req_addr)
3687 return 0;
3688
3689 bp->hwrm_short_cmd_req_addr =
3690 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3691 &bp->hwrm_short_cmd_req_dma_addr,
3692 GFP_KERNEL);
3693 if (!bp->hwrm_short_cmd_req_addr)
3694 return -ENOMEM;
3695
3696 return 0;
3697 }
3698
3699 static void bnxt_free_port_stats(struct bnxt *bp)
3700 {
3701 struct pci_dev *pdev = bp->pdev;
3702
3703 bp->flags &= ~BNXT_FLAG_PORT_STATS;
3704 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
3705
3706 if (bp->hw_rx_port_stats) {
3707 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
3708 bp->hw_rx_port_stats,
3709 bp->hw_rx_port_stats_map);
3710 bp->hw_rx_port_stats = NULL;
3711 }
3712
3713 if (bp->hw_tx_port_stats_ext) {
3714 dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext),
3715 bp->hw_tx_port_stats_ext,
3716 bp->hw_tx_port_stats_ext_map);
3717 bp->hw_tx_port_stats_ext = NULL;
3718 }
3719
3720 if (bp->hw_rx_port_stats_ext) {
3721 dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3722 bp->hw_rx_port_stats_ext,
3723 bp->hw_rx_port_stats_ext_map);
3724 bp->hw_rx_port_stats_ext = NULL;
3725 }
3726
3727 if (bp->hw_pcie_stats) {
3728 dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
3729 bp->hw_pcie_stats, bp->hw_pcie_stats_map);
3730 bp->hw_pcie_stats = NULL;
3731 }
3732 }
3733
3734 static void bnxt_free_ring_stats(struct bnxt *bp)
3735 {
3736 struct pci_dev *pdev = bp->pdev;
3737 int size, i;
3738
3739 if (!bp->bnapi)
3740 return;
3741
3742 size = bp->hw_ring_stats_size;
3743
3744 for (i = 0; i < bp->cp_nr_rings; i++) {
3745 struct bnxt_napi *bnapi = bp->bnapi[i];
3746 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3747
3748 if (cpr->hw_stats) {
3749 dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
3750 cpr->hw_stats_map);
3751 cpr->hw_stats = NULL;
3752 }
3753 }
3754 }
3755
3756 static int bnxt_alloc_stats(struct bnxt *bp)
3757 {
3758 u32 size, i;
3759 struct pci_dev *pdev = bp->pdev;
3760
3761 size = bp->hw_ring_stats_size;
3762
3763 for (i = 0; i < bp->cp_nr_rings; i++) {
3764 struct bnxt_napi *bnapi = bp->bnapi[i];
3765 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3766
3767 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
3768 &cpr->hw_stats_map,
3769 GFP_KERNEL);
3770 if (!cpr->hw_stats)
3771 return -ENOMEM;
3772
3773 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3774 }
3775
3776 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
3777 return 0;
3778
3779 if (bp->hw_rx_port_stats)
3780 goto alloc_ext_stats;
3781
3782 bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
3783 sizeof(struct tx_port_stats) + 1024;
3784
3785 bp->hw_rx_port_stats =
3786 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
3787 &bp->hw_rx_port_stats_map,
3788 GFP_KERNEL);
3789 if (!bp->hw_rx_port_stats)
3790 return -ENOMEM;
3791
3792 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512;
3793 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
3794 sizeof(struct rx_port_stats) + 512;
3795 bp->flags |= BNXT_FLAG_PORT_STATS;
3796
3797 alloc_ext_stats:
3798 /* Display extended statistics only if FW supports it */
3799 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
3800 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
3801 return 0;
3802
3803 if (bp->hw_rx_port_stats_ext)
3804 goto alloc_tx_ext_stats;
3805
3806 bp->hw_rx_port_stats_ext =
3807 dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3808 &bp->hw_rx_port_stats_ext_map, GFP_KERNEL);
3809 if (!bp->hw_rx_port_stats_ext)
3810 return 0;
3811
3812 alloc_tx_ext_stats:
3813 if (bp->hw_tx_port_stats_ext)
3814 goto alloc_pcie_stats;
3815
3816 if (bp->hwrm_spec_code >= 0x10902 ||
3817 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
3818 bp->hw_tx_port_stats_ext =
3819 dma_alloc_coherent(&pdev->dev,
3820 sizeof(struct tx_port_stats_ext),
3821 &bp->hw_tx_port_stats_ext_map,
3822 GFP_KERNEL);
3823 }
3824 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
3825
3826 alloc_pcie_stats:
3827 if (bp->hw_pcie_stats ||
3828 !(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
3829 return 0;
3830
3831 bp->hw_pcie_stats =
3832 dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
3833 &bp->hw_pcie_stats_map, GFP_KERNEL);
3834 if (!bp->hw_pcie_stats)
3835 return 0;
3836
3837 bp->flags |= BNXT_FLAG_PCIE_STATS;
3838 return 0;
3839 }
3840
3841 static void bnxt_clear_ring_indices(struct bnxt *bp)
3842 {
3843 int i;
3844
3845 if (!bp->bnapi)
3846 return;
3847
3848 for (i = 0; i < bp->cp_nr_rings; i++) {
3849 struct bnxt_napi *bnapi = bp->bnapi[i];
3850 struct bnxt_cp_ring_info *cpr;
3851 struct bnxt_rx_ring_info *rxr;
3852 struct bnxt_tx_ring_info *txr;
3853
3854 if (!bnapi)
3855 continue;
3856
3857 cpr = &bnapi->cp_ring;
3858 cpr->cp_raw_cons = 0;
3859
3860 txr = bnapi->tx_ring;
3861 if (txr) {
3862 txr->tx_prod = 0;
3863 txr->tx_cons = 0;
3864 }
3865
3866 rxr = bnapi->rx_ring;
3867 if (rxr) {
3868 rxr->rx_prod = 0;
3869 rxr->rx_agg_prod = 0;
3870 rxr->rx_sw_agg_prod = 0;
3871 rxr->rx_next_cons = 0;
3872 }
3873 }
3874 }
3875
3876 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
3877 {
3878 #ifdef CONFIG_RFS_ACCEL
3879 int i;
3880
3881 /* Under rtnl_lock and all our NAPIs have been disabled. It's
3882 * safe to delete the hash table.
3883 */
3884 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
3885 struct hlist_head *head;
3886 struct hlist_node *tmp;
3887 struct bnxt_ntuple_filter *fltr;
3888
3889 head = &bp->ntp_fltr_hash_tbl[i];
3890 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
3891 hlist_del(&fltr->hash);
3892 kfree(fltr);
3893 }
3894 }
3895 if (irq_reinit) {
3896 kfree(bp->ntp_fltr_bmap);
3897 bp->ntp_fltr_bmap = NULL;
3898 }
3899 bp->ntp_fltr_count = 0;
3900 #endif
3901 }
3902
3903 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
3904 {
3905 #ifdef CONFIG_RFS_ACCEL
3906 int i, rc = 0;
3907
3908 if (!(bp->flags & BNXT_FLAG_RFS))
3909 return 0;
3910
3911 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
3912 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
3913
3914 bp->ntp_fltr_count = 0;
3915 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
3916 sizeof(long),
3917 GFP_KERNEL);
3918
3919 if (!bp->ntp_fltr_bmap)
3920 rc = -ENOMEM;
3921
3922 return rc;
3923 #else
3924 return 0;
3925 #endif
3926 }
3927
3928 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
3929 {
3930 bnxt_free_vnic_attributes(bp);
3931 bnxt_free_tx_rings(bp);
3932 bnxt_free_rx_rings(bp);
3933 bnxt_free_cp_rings(bp);
3934 bnxt_free_ntp_fltrs(bp, irq_re_init);
3935 if (irq_re_init) {
3936 bnxt_free_ring_stats(bp);
3937 bnxt_free_ring_grps(bp);
3938 bnxt_free_vnics(bp);
3939 kfree(bp->tx_ring_map);
3940 bp->tx_ring_map = NULL;
3941 kfree(bp->tx_ring);
3942 bp->tx_ring = NULL;
3943 kfree(bp->rx_ring);
3944 bp->rx_ring = NULL;
3945 kfree(bp->bnapi);
3946 bp->bnapi = NULL;
3947 } else {
3948 bnxt_clear_ring_indices(bp);
3949 }
3950 }
3951
3952 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
3953 {
3954 int i, j, rc, size, arr_size;
3955 void *bnapi;
3956
3957 if (irq_re_init) {
3958 /* Allocate bnapi mem pointer array and mem block for
3959 * all queues
3960 */
3961 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
3962 bp->cp_nr_rings);
3963 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
3964 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
3965 if (!bnapi)
3966 return -ENOMEM;
3967
3968 bp->bnapi = bnapi;
3969 bnapi += arr_size;
3970 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
3971 bp->bnapi[i] = bnapi;
3972 bp->bnapi[i]->index = i;
3973 bp->bnapi[i]->bp = bp;
3974 if (bp->flags & BNXT_FLAG_CHIP_P5) {
3975 struct bnxt_cp_ring_info *cpr =
3976 &bp->bnapi[i]->cp_ring;
3977
3978 cpr->cp_ring_struct.ring_mem.flags =
3979 BNXT_RMEM_RING_PTE_FLAG;
3980 }
3981 }
3982
3983 bp->rx_ring = kcalloc(bp->rx_nr_rings,
3984 sizeof(struct bnxt_rx_ring_info),
3985 GFP_KERNEL);
3986 if (!bp->rx_ring)
3987 return -ENOMEM;
3988
3989 for (i = 0; i < bp->rx_nr_rings; i++) {
3990 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3991
3992 if (bp->flags & BNXT_FLAG_CHIP_P5) {
3993 rxr->rx_ring_struct.ring_mem.flags =
3994 BNXT_RMEM_RING_PTE_FLAG;
3995 rxr->rx_agg_ring_struct.ring_mem.flags =
3996 BNXT_RMEM_RING_PTE_FLAG;
3997 }
3998 rxr->bnapi = bp->bnapi[i];
3999 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4000 }
4001
4002 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4003 sizeof(struct bnxt_tx_ring_info),
4004 GFP_KERNEL);
4005 if (!bp->tx_ring)
4006 return -ENOMEM;
4007
4008 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4009 GFP_KERNEL);
4010
4011 if (!bp->tx_ring_map)
4012 return -ENOMEM;
4013
4014 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4015 j = 0;
4016 else
4017 j = bp->rx_nr_rings;
4018
4019 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4020 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4021
4022 if (bp->flags & BNXT_FLAG_CHIP_P5)
4023 txr->tx_ring_struct.ring_mem.flags =
4024 BNXT_RMEM_RING_PTE_FLAG;
4025 txr->bnapi = bp->bnapi[j];
4026 bp->bnapi[j]->tx_ring = txr;
4027 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4028 if (i >= bp->tx_nr_rings_xdp) {
4029 txr->txq_index = i - bp->tx_nr_rings_xdp;
4030 bp->bnapi[j]->tx_int = bnxt_tx_int;
4031 } else {
4032 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4033 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4034 }
4035 }
4036
4037 rc = bnxt_alloc_stats(bp);
4038 if (rc)
4039 goto alloc_mem_err;
4040
4041 rc = bnxt_alloc_ntp_fltrs(bp);
4042 if (rc)
4043 goto alloc_mem_err;
4044
4045 rc = bnxt_alloc_vnics(bp);
4046 if (rc)
4047 goto alloc_mem_err;
4048 }
4049
4050 bnxt_init_ring_struct(bp);
4051
4052 rc = bnxt_alloc_rx_rings(bp);
4053 if (rc)
4054 goto alloc_mem_err;
4055
4056 rc = bnxt_alloc_tx_rings(bp);
4057 if (rc)
4058 goto alloc_mem_err;
4059
4060 rc = bnxt_alloc_cp_rings(bp);
4061 if (rc)
4062 goto alloc_mem_err;
4063
4064 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4065 BNXT_VNIC_UCAST_FLAG;
4066 rc = bnxt_alloc_vnic_attributes(bp);
4067 if (rc)
4068 goto alloc_mem_err;
4069 return 0;
4070
4071 alloc_mem_err:
4072 bnxt_free_mem(bp, true);
4073 return rc;
4074 }
4075
4076 static void bnxt_disable_int(struct bnxt *bp)
4077 {
4078 int i;
4079
4080 if (!bp->bnapi)
4081 return;
4082
4083 for (i = 0; i < bp->cp_nr_rings; i++) {
4084 struct bnxt_napi *bnapi = bp->bnapi[i];
4085 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4086 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4087
4088 if (ring->fw_ring_id != INVALID_HW_RING_ID)
4089 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4090 }
4091 }
4092
4093 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4094 {
4095 struct bnxt_napi *bnapi = bp->bnapi[n];
4096 struct bnxt_cp_ring_info *cpr;
4097
4098 cpr = &bnapi->cp_ring;
4099 return cpr->cp_ring_struct.map_idx;
4100 }
4101
4102 static void bnxt_disable_int_sync(struct bnxt *bp)
4103 {
4104 int i;
4105
4106 atomic_inc(&bp->intr_sem);
4107
4108 bnxt_disable_int(bp);
4109 for (i = 0; i < bp->cp_nr_rings; i++) {
4110 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4111
4112 synchronize_irq(bp->irq_tbl[map_idx].vector);
4113 }
4114 }
4115
4116 static void bnxt_enable_int(struct bnxt *bp)
4117 {
4118 int i;
4119
4120 atomic_set(&bp->intr_sem, 0);
4121 for (i = 0; i < bp->cp_nr_rings; i++) {
4122 struct bnxt_napi *bnapi = bp->bnapi[i];
4123 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4124
4125 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4126 }
4127 }
4128
4129 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
4130 u16 cmpl_ring, u16 target_id)
4131 {
4132 struct input *req = request;
4133
4134 req->req_type = cpu_to_le16(req_type);
4135 req->cmpl_ring = cpu_to_le16(cmpl_ring);
4136 req->target_id = cpu_to_le16(target_id);
4137 if (bnxt_kong_hwrm_message(bp, req))
4138 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
4139 else
4140 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
4141 }
4142
4143 static int bnxt_hwrm_to_stderr(u32 hwrm_err)
4144 {
4145 switch (hwrm_err) {
4146 case HWRM_ERR_CODE_SUCCESS:
4147 return 0;
4148 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
4149 return -EACCES;
4150 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
4151 return -ENOSPC;
4152 case HWRM_ERR_CODE_INVALID_PARAMS:
4153 case HWRM_ERR_CODE_INVALID_FLAGS:
4154 case HWRM_ERR_CODE_INVALID_ENABLES:
4155 case HWRM_ERR_CODE_UNSUPPORTED_TLV:
4156 case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
4157 return -EINVAL;
4158 case HWRM_ERR_CODE_NO_BUFFER:
4159 return -ENOMEM;
4160 case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
4161 return -EAGAIN;
4162 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
4163 return -EOPNOTSUPP;
4164 default:
4165 return -EIO;
4166 }
4167 }
4168
4169 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
4170 int timeout, bool silent)
4171 {
4172 int i, intr_process, rc, tmo_count;
4173 struct input *req = msg;
4174 u32 *data = msg;
4175 __le32 *resp_len;
4176 u8 *valid;
4177 u16 cp_ring_id, len = 0;
4178 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
4179 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
4180 struct hwrm_short_input short_input = {0};
4181 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
4182 u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr;
4183 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
4184 u16 dst = BNXT_HWRM_CHNL_CHIMP;
4185
4186 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4187 return -EBUSY;
4188
4189 if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4190 if (msg_len > bp->hwrm_max_ext_req_len ||
4191 !bp->hwrm_short_cmd_req_addr)
4192 return -EINVAL;
4193 }
4194
4195 if (bnxt_hwrm_kong_chnl(bp, req)) {
4196 dst = BNXT_HWRM_CHNL_KONG;
4197 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
4198 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
4199 resp = bp->hwrm_cmd_kong_resp_addr;
4200 resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr;
4201 }
4202
4203 memset(resp, 0, PAGE_SIZE);
4204 cp_ring_id = le16_to_cpu(req->cmpl_ring);
4205 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
4206
4207 req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
4208 /* currently supports only one outstanding message */
4209 if (intr_process)
4210 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
4211
4212 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
4213 msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4214 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
4215 u16 max_msg_len;
4216
4217 /* Set boundary for maximum extended request length for short
4218 * cmd format. If passed up from device use the max supported
4219 * internal req length.
4220 */
4221 max_msg_len = bp->hwrm_max_ext_req_len;
4222
4223 memcpy(short_cmd_req, req, msg_len);
4224 if (msg_len < max_msg_len)
4225 memset(short_cmd_req + msg_len, 0,
4226 max_msg_len - msg_len);
4227
4228 short_input.req_type = req->req_type;
4229 short_input.signature =
4230 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
4231 short_input.size = cpu_to_le16(msg_len);
4232 short_input.req_addr =
4233 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
4234
4235 data = (u32 *)&short_input;
4236 msg_len = sizeof(short_input);
4237
4238 /* Sync memory write before updating doorbell */
4239 wmb();
4240
4241 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
4242 }
4243
4244 /* Write request msg to hwrm channel */
4245 __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
4246
4247 for (i = msg_len; i < max_req_len; i += 4)
4248 writel(0, bp->bar0 + bar_offset + i);
4249
4250 /* Ring channel doorbell */
4251 writel(1, bp->bar0 + doorbell_offset);
4252
4253 if (!pci_is_enabled(bp->pdev))
4254 return 0;
4255
4256 if (!timeout)
4257 timeout = DFLT_HWRM_CMD_TIMEOUT;
4258 /* convert timeout to usec */
4259 timeout *= 1000;
4260
4261 i = 0;
4262 /* Short timeout for the first few iterations:
4263 * number of loops = number of loops for short timeout +
4264 * number of loops for standard timeout.
4265 */
4266 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
4267 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
4268 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
4269 resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET);
4270
4271 if (intr_process) {
4272 u16 seq_id = bp->hwrm_intr_seq_id;
4273
4274 /* Wait until hwrm response cmpl interrupt is processed */
4275 while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
4276 i++ < tmo_count) {
4277 /* on first few passes, just barely sleep */
4278 if (i < HWRM_SHORT_TIMEOUT_COUNTER)
4279 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4280 HWRM_SHORT_MAX_TIMEOUT);
4281 else
4282 usleep_range(HWRM_MIN_TIMEOUT,
4283 HWRM_MAX_TIMEOUT);
4284 }
4285
4286 if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
4287 if (!silent)
4288 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
4289 le16_to_cpu(req->req_type));
4290 return -EBUSY;
4291 }
4292 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
4293 HWRM_RESP_LEN_SFT;
4294 valid = resp_addr + len - 1;
4295 } else {
4296 int j;
4297
4298 /* Check if response len is updated */
4299 for (i = 0; i < tmo_count; i++) {
4300 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
4301 HWRM_RESP_LEN_SFT;
4302 if (len)
4303 break;
4304 /* on first few passes, just barely sleep */
4305 if (i < HWRM_SHORT_TIMEOUT_COUNTER)
4306 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4307 HWRM_SHORT_MAX_TIMEOUT);
4308 else
4309 usleep_range(HWRM_MIN_TIMEOUT,
4310 HWRM_MAX_TIMEOUT);
4311 }
4312
4313 if (i >= tmo_count) {
4314 if (!silent)
4315 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
4316 HWRM_TOTAL_TIMEOUT(i),
4317 le16_to_cpu(req->req_type),
4318 le16_to_cpu(req->seq_id), len);
4319 return -EBUSY;
4320 }
4321
4322 /* Last byte of resp contains valid bit */
4323 valid = resp_addr + len - 1;
4324 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
4325 /* make sure we read from updated DMA memory */
4326 dma_rmb();
4327 if (*valid)
4328 break;
4329 usleep_range(1, 5);
4330 }
4331
4332 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
4333 if (!silent)
4334 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
4335 HWRM_TOTAL_TIMEOUT(i),
4336 le16_to_cpu(req->req_type),
4337 le16_to_cpu(req->seq_id), len,
4338 *valid);
4339 return -EBUSY;
4340 }
4341 }
4342
4343 /* Zero valid bit for compatibility. Valid bit in an older spec
4344 * may become a new field in a newer spec. We must make sure that
4345 * a new field not implemented by old spec will read zero.
4346 */
4347 *valid = 0;
4348 rc = le16_to_cpu(resp->error_code);
4349 if (rc && !silent)
4350 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
4351 le16_to_cpu(resp->req_type),
4352 le16_to_cpu(resp->seq_id), rc);
4353 return bnxt_hwrm_to_stderr(rc);
4354 }
4355
4356 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4357 {
4358 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
4359 }
4360
4361 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4362 int timeout)
4363 {
4364 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4365 }
4366
4367 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4368 {
4369 int rc;
4370
4371 mutex_lock(&bp->hwrm_cmd_lock);
4372 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
4373 mutex_unlock(&bp->hwrm_cmd_lock);
4374 return rc;
4375 }
4376
4377 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4378 int timeout)
4379 {
4380 int rc;
4381
4382 mutex_lock(&bp->hwrm_cmd_lock);
4383 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4384 mutex_unlock(&bp->hwrm_cmd_lock);
4385 return rc;
4386 }
4387
4388 int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
4389 int bmap_size)
4390 {
4391 struct hwrm_func_drv_rgtr_input req = {0};
4392 DECLARE_BITMAP(async_events_bmap, 256);
4393 u32 *events = (u32 *)async_events_bmap;
4394 int i;
4395
4396 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4397
4398 req.enables =
4399 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4400
4401 memset(async_events_bmap, 0, sizeof(async_events_bmap));
4402 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4403 u16 event_id = bnxt_async_events_arr[i];
4404
4405 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4406 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4407 continue;
4408 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4409 }
4410 if (bmap && bmap_size) {
4411 for (i = 0; i < bmap_size; i++) {
4412 if (test_bit(i, bmap))
4413 __set_bit(i, async_events_bmap);
4414 }
4415 }
4416
4417 for (i = 0; i < 8; i++)
4418 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4419
4420 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4421 }
4422
4423 static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
4424 {
4425 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
4426 struct hwrm_func_drv_rgtr_input req = {0};
4427 u32 flags;
4428 int rc;
4429
4430 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4431
4432 req.enables =
4433 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4434 FUNC_DRV_RGTR_REQ_ENABLES_VER);
4435
4436 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4437 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE |
4438 FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4439 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4440 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT;
4441 req.flags = cpu_to_le32(flags);
4442 req.ver_maj_8b = DRV_VER_MAJ;
4443 req.ver_min_8b = DRV_VER_MIN;
4444 req.ver_upd_8b = DRV_VER_UPD;
4445 req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4446 req.ver_min = cpu_to_le16(DRV_VER_MIN);
4447 req.ver_upd = cpu_to_le16(DRV_VER_UPD);
4448
4449 if (BNXT_PF(bp)) {
4450 u32 data[8];
4451 int i;
4452
4453 memset(data, 0, sizeof(data));
4454 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4455 u16 cmd = bnxt_vf_req_snif[i];
4456 unsigned int bit, idx;
4457
4458 idx = cmd / 32;
4459 bit = cmd % 32;
4460 data[idx] |= 1 << bit;
4461 }
4462
4463 for (i = 0; i < 8; i++)
4464 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4465
4466 req.enables |=
4467 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4468 }
4469
4470 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4471 req.flags |= cpu_to_le32(
4472 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4473
4474 mutex_lock(&bp->hwrm_cmd_lock);
4475 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4476 if (!rc && (resp->flags &
4477 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)))
4478 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4479 mutex_unlock(&bp->hwrm_cmd_lock);
4480 return rc;
4481 }
4482
4483 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4484 {
4485 struct hwrm_func_drv_unrgtr_input req = {0};
4486
4487 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4488 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4489 }
4490
4491 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4492 {
4493 u32 rc = 0;
4494 struct hwrm_tunnel_dst_port_free_input req = {0};
4495
4496 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4497 req.tunnel_type = tunnel_type;
4498
4499 switch (tunnel_type) {
4500 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4501 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
4502 break;
4503 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4504 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
4505 break;
4506 default:
4507 break;
4508 }
4509
4510 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4511 if (rc)
4512 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4513 rc);
4514 return rc;
4515 }
4516
4517 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4518 u8 tunnel_type)
4519 {
4520 u32 rc = 0;
4521 struct hwrm_tunnel_dst_port_alloc_input req = {0};
4522 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4523
4524 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4525
4526 req.tunnel_type = tunnel_type;
4527 req.tunnel_dst_port_val = port;
4528
4529 mutex_lock(&bp->hwrm_cmd_lock);
4530 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4531 if (rc) {
4532 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4533 rc);
4534 goto err_out;
4535 }
4536
4537 switch (tunnel_type) {
4538 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4539 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
4540 break;
4541 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4542 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
4543 break;
4544 default:
4545 break;
4546 }
4547
4548 err_out:
4549 mutex_unlock(&bp->hwrm_cmd_lock);
4550 return rc;
4551 }
4552
4553 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4554 {
4555 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4556 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4557
4558 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
4559 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4560
4561 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4562 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4563 req.mask = cpu_to_le32(vnic->rx_mask);
4564 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4565 }
4566
4567 #ifdef CONFIG_RFS_ACCEL
4568 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4569 struct bnxt_ntuple_filter *fltr)
4570 {
4571 struct hwrm_cfa_ntuple_filter_free_input req = {0};
4572
4573 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4574 req.ntuple_filter_id = fltr->filter_id;
4575 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4576 }
4577
4578 #define BNXT_NTP_FLTR_FLAGS \
4579 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4580 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
4581 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
4582 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
4583 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
4584 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
4585 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
4586 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
4587 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
4588 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
4589 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
4590 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
4591 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
4592 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4593
4594 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
4595 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4596
4597 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4598 struct bnxt_ntuple_filter *fltr)
4599 {
4600 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
4601 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4602 struct flow_keys *keys = &fltr->fkeys;
4603 struct bnxt_vnic_info *vnic;
4604 u32 dst_ena = 0;
4605 int rc = 0;
4606
4607 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
4608 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4609
4610 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX) {
4611 dst_ena = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
4612 req.rfs_ring_tbl_idx = cpu_to_le16(fltr->rxq);
4613 vnic = &bp->vnic_info[0];
4614 } else {
4615 vnic = &bp->vnic_info[fltr->rxq + 1];
4616 }
4617 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
4618 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS | dst_ena);
4619
4620 req.ethertype = htons(ETH_P_IP);
4621 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4622 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4623 req.ip_protocol = keys->basic.ip_proto;
4624
4625 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4626 int i;
4627
4628 req.ethertype = htons(ETH_P_IPV6);
4629 req.ip_addr_type =
4630 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4631 *(struct in6_addr *)&req.src_ipaddr[0] =
4632 keys->addrs.v6addrs.src;
4633 *(struct in6_addr *)&req.dst_ipaddr[0] =
4634 keys->addrs.v6addrs.dst;
4635 for (i = 0; i < 4; i++) {
4636 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4637 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4638 }
4639 } else {
4640 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4641 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4642 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4643 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4644 }
4645 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4646 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4647 req.tunnel_type =
4648 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4649 }
4650
4651 req.src_port = keys->ports.src;
4652 req.src_port_mask = cpu_to_be16(0xffff);
4653 req.dst_port = keys->ports.dst;
4654 req.dst_port_mask = cpu_to_be16(0xffff);
4655
4656 mutex_lock(&bp->hwrm_cmd_lock);
4657 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4658 if (!rc) {
4659 resp = bnxt_get_hwrm_resp_addr(bp, &req);
4660 fltr->filter_id = resp->ntuple_filter_id;
4661 }
4662 mutex_unlock(&bp->hwrm_cmd_lock);
4663 return rc;
4664 }
4665 #endif
4666
4667 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4668 u8 *mac_addr)
4669 {
4670 u32 rc = 0;
4671 struct hwrm_cfa_l2_filter_alloc_input req = {0};
4672 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4673
4674 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
4675 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4676 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4677 req.flags |=
4678 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
4679 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4680 req.enables =
4681 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
4682 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
4683 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4684 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
4685 req.l2_addr_mask[0] = 0xff;
4686 req.l2_addr_mask[1] = 0xff;
4687 req.l2_addr_mask[2] = 0xff;
4688 req.l2_addr_mask[3] = 0xff;
4689 req.l2_addr_mask[4] = 0xff;
4690 req.l2_addr_mask[5] = 0xff;
4691
4692 mutex_lock(&bp->hwrm_cmd_lock);
4693 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4694 if (!rc)
4695 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4696 resp->l2_filter_id;
4697 mutex_unlock(&bp->hwrm_cmd_lock);
4698 return rc;
4699 }
4700
4701 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4702 {
4703 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4704 int rc = 0;
4705
4706 /* Any associated ntuple filters will also be cleared by firmware. */
4707 mutex_lock(&bp->hwrm_cmd_lock);
4708 for (i = 0; i < num_of_vnics; i++) {
4709 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4710
4711 for (j = 0; j < vnic->uc_filter_count; j++) {
4712 struct hwrm_cfa_l2_filter_free_input req = {0};
4713
4714 bnxt_hwrm_cmd_hdr_init(bp, &req,
4715 HWRM_CFA_L2_FILTER_FREE, -1, -1);
4716
4717 req.l2_filter_id = vnic->fw_l2_filter_id[j];
4718
4719 rc = _hwrm_send_message(bp, &req, sizeof(req),
4720 HWRM_CMD_TIMEOUT);
4721 }
4722 vnic->uc_filter_count = 0;
4723 }
4724 mutex_unlock(&bp->hwrm_cmd_lock);
4725
4726 return rc;
4727 }
4728
4729 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4730 {
4731 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4732 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
4733 struct hwrm_vnic_tpa_cfg_input req = {0};
4734
4735 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4736 return 0;
4737
4738 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
4739
4740 if (tpa_flags) {
4741 u16 mss = bp->dev->mtu - 40;
4742 u32 nsegs, n, segs = 0, flags;
4743
4744 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4745 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4746 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4747 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4748 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4749 if (tpa_flags & BNXT_FLAG_GRO)
4750 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4751
4752 req.flags = cpu_to_le32(flags);
4753
4754 req.enables =
4755 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
4756 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4757 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
4758
4759 /* Number of segs are log2 units, and first packet is not
4760 * included as part of this units.
4761 */
4762 if (mss <= BNXT_RX_PAGE_SIZE) {
4763 n = BNXT_RX_PAGE_SIZE / mss;
4764 nsegs = (MAX_SKB_FRAGS - 1) * n;
4765 } else {
4766 n = mss / BNXT_RX_PAGE_SIZE;
4767 if (mss & (BNXT_RX_PAGE_SIZE - 1))
4768 n++;
4769 nsegs = (MAX_SKB_FRAGS - n) / n;
4770 }
4771
4772 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4773 segs = MAX_TPA_SEGS_P5;
4774 max_aggs = bp->max_tpa;
4775 } else {
4776 segs = ilog2(nsegs);
4777 }
4778 req.max_agg_segs = cpu_to_le16(segs);
4779 req.max_aggs = cpu_to_le16(max_aggs);
4780
4781 req.min_agg_len = cpu_to_le32(512);
4782 }
4783 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4784
4785 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4786 }
4787
4788 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4789 {
4790 struct bnxt_ring_grp_info *grp_info;
4791
4792 grp_info = &bp->grp_info[ring->grp_idx];
4793 return grp_info->cp_fw_ring_id;
4794 }
4795
4796 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
4797 {
4798 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4799 struct bnxt_napi *bnapi = rxr->bnapi;
4800 struct bnxt_cp_ring_info *cpr;
4801
4802 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
4803 return cpr->cp_ring_struct.fw_ring_id;
4804 } else {
4805 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
4806 }
4807 }
4808
4809 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
4810 {
4811 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4812 struct bnxt_napi *bnapi = txr->bnapi;
4813 struct bnxt_cp_ring_info *cpr;
4814
4815 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
4816 return cpr->cp_ring_struct.fw_ring_id;
4817 } else {
4818 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
4819 }
4820 }
4821
4822 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
4823 {
4824 u32 i, j, max_rings;
4825 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4826 struct hwrm_vnic_rss_cfg_input req = {0};
4827
4828 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
4829 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
4830 return 0;
4831
4832 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4833 if (set_rss) {
4834 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4835 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4836 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
4837 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4838 max_rings = bp->rx_nr_rings - 1;
4839 else
4840 max_rings = bp->rx_nr_rings;
4841 } else {
4842 max_rings = 1;
4843 }
4844
4845 /* Fill the RSS indirection table with ring group ids */
4846 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
4847 if (j == max_rings)
4848 j = 0;
4849 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
4850 }
4851
4852 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4853 req.hash_key_tbl_addr =
4854 cpu_to_le64(vnic->rss_hash_key_dma_addr);
4855 }
4856 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
4857 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4858 }
4859
4860 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
4861 {
4862 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4863 u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings;
4864 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4865 struct hwrm_vnic_rss_cfg_input req = {0};
4866
4867 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4868 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4869 if (!set_rss) {
4870 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4871 return 0;
4872 }
4873 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4874 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4875 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4876 req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
4877 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
4878 for (i = 0, k = 0; i < nr_ctxs; i++) {
4879 __le16 *ring_tbl = vnic->rss_table;
4880 int rc;
4881
4882 req.ring_table_pair_index = i;
4883 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
4884 for (j = 0; j < 64; j++) {
4885 u16 ring_id;
4886
4887 ring_id = rxr->rx_ring_struct.fw_ring_id;
4888 *ring_tbl++ = cpu_to_le16(ring_id);
4889 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
4890 *ring_tbl++ = cpu_to_le16(ring_id);
4891 rxr++;
4892 k++;
4893 if (k == max_rings) {
4894 k = 0;
4895 rxr = &bp->rx_ring[0];
4896 }
4897 }
4898 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4899 if (rc)
4900 return rc;
4901 }
4902 return 0;
4903 }
4904
4905 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
4906 {
4907 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4908 struct hwrm_vnic_plcmodes_cfg_input req = {0};
4909
4910 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
4911 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
4912 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
4913 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
4914 req.enables =
4915 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
4916 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
4917 /* thresholds not implemented in firmware yet */
4918 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
4919 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
4920 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4921 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4922 }
4923
4924 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
4925 u16 ctx_idx)
4926 {
4927 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
4928
4929 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
4930 req.rss_cos_lb_ctx_id =
4931 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
4932
4933 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4934 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
4935 }
4936
4937 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
4938 {
4939 int i, j;
4940
4941 for (i = 0; i < bp->nr_vnics; i++) {
4942 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4943
4944 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
4945 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
4946 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
4947 }
4948 }
4949 bp->rsscos_nr_ctxs = 0;
4950 }
4951
4952 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
4953 {
4954 int rc;
4955 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
4956 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
4957 bp->hwrm_cmd_resp_addr;
4958
4959 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
4960 -1);
4961
4962 mutex_lock(&bp->hwrm_cmd_lock);
4963 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4964 if (!rc)
4965 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
4966 le16_to_cpu(resp->rss_cos_lb_ctx_id);
4967 mutex_unlock(&bp->hwrm_cmd_lock);
4968
4969 return rc;
4970 }
4971
4972 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
4973 {
4974 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
4975 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
4976 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
4977 }
4978
4979 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
4980 {
4981 unsigned int ring = 0, grp_idx;
4982 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4983 struct hwrm_vnic_cfg_input req = {0};
4984 u16 def_vlan = 0;
4985
4986 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
4987
4988 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4989 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4990
4991 req.default_rx_ring_id =
4992 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
4993 req.default_cmpl_ring_id =
4994 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
4995 req.enables =
4996 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
4997 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
4998 goto vnic_mru;
4999 }
5000 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5001 /* Only RSS support for now TBD: COS & LB */
5002 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5003 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5004 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5005 VNIC_CFG_REQ_ENABLES_MRU);
5006 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5007 req.rss_rule =
5008 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5009 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5010 VNIC_CFG_REQ_ENABLES_MRU);
5011 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5012 } else {
5013 req.rss_rule = cpu_to_le16(0xffff);
5014 }
5015
5016 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5017 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5018 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5019 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5020 } else {
5021 req.cos_rule = cpu_to_le16(0xffff);
5022 }
5023
5024 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5025 ring = 0;
5026 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5027 ring = vnic_id - 1;
5028 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5029 ring = bp->rx_nr_rings - 1;
5030
5031 grp_idx = bp->rx_ring[ring].bnapi->index;
5032 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5033 req.lb_rule = cpu_to_le16(0xffff);
5034 vnic_mru:
5035 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
5036 VLAN_HLEN);
5037
5038 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5039 #ifdef CONFIG_BNXT_SRIOV
5040 if (BNXT_VF(bp))
5041 def_vlan = bp->vf.vlan;
5042 #endif
5043 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5044 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5045 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
5046 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5047
5048 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5049 }
5050
5051 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5052 {
5053 u32 rc = 0;
5054
5055 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5056 struct hwrm_vnic_free_input req = {0};
5057
5058 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
5059 req.vnic_id =
5060 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5061
5062 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5063 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5064 }
5065 return rc;
5066 }
5067
5068 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5069 {
5070 u16 i;
5071
5072 for (i = 0; i < bp->nr_vnics; i++)
5073 bnxt_hwrm_vnic_free_one(bp, i);
5074 }
5075
5076 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5077 unsigned int start_rx_ring_idx,
5078 unsigned int nr_rings)
5079 {
5080 int rc = 0;
5081 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5082 struct hwrm_vnic_alloc_input req = {0};
5083 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5084 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5085
5086 if (bp->flags & BNXT_FLAG_CHIP_P5)
5087 goto vnic_no_ring_grps;
5088
5089 /* map ring groups to this vnic */
5090 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5091 grp_idx = bp->rx_ring[i].bnapi->index;
5092 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5093 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5094 j, nr_rings);
5095 break;
5096 }
5097 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5098 }
5099
5100 vnic_no_ring_grps:
5101 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5102 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5103 if (vnic_id == 0)
5104 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5105
5106 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
5107
5108 mutex_lock(&bp->hwrm_cmd_lock);
5109 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5110 if (!rc)
5111 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5112 mutex_unlock(&bp->hwrm_cmd_lock);
5113 return rc;
5114 }
5115
5116 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5117 {
5118 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5119 struct hwrm_vnic_qcaps_input req = {0};
5120 int rc;
5121
5122 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5123 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5124 if (bp->hwrm_spec_code < 0x10600)
5125 return 0;
5126
5127 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
5128 mutex_lock(&bp->hwrm_cmd_lock);
5129 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5130 if (!rc) {
5131 u32 flags = le32_to_cpu(resp->flags);
5132
5133 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5134 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5135 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5136 if (flags &
5137 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5138 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5139 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5140 if (bp->max_tpa_v2)
5141 bp->hw_ring_stats_size =
5142 sizeof(struct ctx_hw_stats_ext);
5143 }
5144 mutex_unlock(&bp->hwrm_cmd_lock);
5145 return rc;
5146 }
5147
5148 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5149 {
5150 u16 i;
5151 u32 rc = 0;
5152
5153 if (bp->flags & BNXT_FLAG_CHIP_P5)
5154 return 0;
5155
5156 mutex_lock(&bp->hwrm_cmd_lock);
5157 for (i = 0; i < bp->rx_nr_rings; i++) {
5158 struct hwrm_ring_grp_alloc_input req = {0};
5159 struct hwrm_ring_grp_alloc_output *resp =
5160 bp->hwrm_cmd_resp_addr;
5161 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5162
5163 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
5164
5165 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5166 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5167 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5168 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5169
5170 rc = _hwrm_send_message(bp, &req, sizeof(req),
5171 HWRM_CMD_TIMEOUT);
5172 if (rc)
5173 break;
5174
5175 bp->grp_info[grp_idx].fw_grp_id =
5176 le32_to_cpu(resp->ring_group_id);
5177 }
5178 mutex_unlock(&bp->hwrm_cmd_lock);
5179 return rc;
5180 }
5181
5182 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5183 {
5184 u16 i;
5185 u32 rc = 0;
5186 struct hwrm_ring_grp_free_input req = {0};
5187
5188 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5189 return 0;
5190
5191 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
5192
5193 mutex_lock(&bp->hwrm_cmd_lock);
5194 for (i = 0; i < bp->cp_nr_rings; i++) {
5195 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5196 continue;
5197 req.ring_group_id =
5198 cpu_to_le32(bp->grp_info[i].fw_grp_id);
5199
5200 rc = _hwrm_send_message(bp, &req, sizeof(req),
5201 HWRM_CMD_TIMEOUT);
5202 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5203 }
5204 mutex_unlock(&bp->hwrm_cmd_lock);
5205 return rc;
5206 }
5207
5208 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5209 struct bnxt_ring_struct *ring,
5210 u32 ring_type, u32 map_index)
5211 {
5212 int rc = 0, err = 0;
5213 struct hwrm_ring_alloc_input req = {0};
5214 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5215 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5216 struct bnxt_ring_grp_info *grp_info;
5217 u16 ring_id;
5218
5219 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
5220
5221 req.enables = 0;
5222 if (rmem->nr_pages > 1) {
5223 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5224 /* Page size is in log2 units */
5225 req.page_size = BNXT_PAGE_SHIFT;
5226 req.page_tbl_depth = 1;
5227 } else {
5228 req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
5229 }
5230 req.fbo = 0;
5231 /* Association of ring index with doorbell index and MSIX number */
5232 req.logical_id = cpu_to_le16(map_index);
5233
5234 switch (ring_type) {
5235 case HWRM_RING_ALLOC_TX: {
5236 struct bnxt_tx_ring_info *txr;
5237
5238 txr = container_of(ring, struct bnxt_tx_ring_info,
5239 tx_ring_struct);
5240 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5241 /* Association of transmit ring with completion ring */
5242 grp_info = &bp->grp_info[ring->grp_idx];
5243 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5244 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
5245 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5246 req.queue_id = cpu_to_le16(ring->queue_id);
5247 break;
5248 }
5249 case HWRM_RING_ALLOC_RX:
5250 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5251 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
5252 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5253 u16 flags = 0;
5254
5255 /* Association of rx ring with stats context */
5256 grp_info = &bp->grp_info[ring->grp_idx];
5257 req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5258 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5259 req.enables |= cpu_to_le32(
5260 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5261 if (NET_IP_ALIGN == 2)
5262 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5263 req.flags = cpu_to_le16(flags);
5264 }
5265 break;
5266 case HWRM_RING_ALLOC_AGG:
5267 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5268 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5269 /* Association of agg ring with rx ring */
5270 grp_info = &bp->grp_info[ring->grp_idx];
5271 req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5272 req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5273 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5274 req.enables |= cpu_to_le32(
5275 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5276 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5277 } else {
5278 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5279 }
5280 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5281 break;
5282 case HWRM_RING_ALLOC_CMPL:
5283 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5284 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5285 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5286 /* Association of cp ring with nq */
5287 grp_info = &bp->grp_info[map_index];
5288 req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5289 req.cq_handle = cpu_to_le64(ring->handle);
5290 req.enables |= cpu_to_le32(
5291 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5292 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5293 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5294 }
5295 break;
5296 case HWRM_RING_ALLOC_NQ:
5297 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5298 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5299 if (bp->flags & BNXT_FLAG_USING_MSIX)
5300 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5301 break;
5302 default:
5303 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5304 ring_type);
5305 return -1;
5306 }
5307
5308 mutex_lock(&bp->hwrm_cmd_lock);
5309 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5310 err = le16_to_cpu(resp->error_code);
5311 ring_id = le16_to_cpu(resp->ring_id);
5312 mutex_unlock(&bp->hwrm_cmd_lock);
5313
5314 if (rc || err) {
5315 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5316 ring_type, rc, err);
5317 return -EIO;
5318 }
5319 ring->fw_ring_id = ring_id;
5320 return rc;
5321 }
5322
5323 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5324 {
5325 int rc;
5326
5327 if (BNXT_PF(bp)) {
5328 struct hwrm_func_cfg_input req = {0};
5329
5330 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5331 req.fid = cpu_to_le16(0xffff);
5332 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5333 req.async_event_cr = cpu_to_le16(idx);
5334 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5335 } else {
5336 struct hwrm_func_vf_cfg_input req = {0};
5337
5338 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
5339 req.enables =
5340 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5341 req.async_event_cr = cpu_to_le16(idx);
5342 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5343 }
5344 return rc;
5345 }
5346
5347 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5348 u32 map_idx, u32 xid)
5349 {
5350 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5351 if (BNXT_PF(bp))
5352 db->doorbell = bp->bar1 + 0x10000;
5353 else
5354 db->doorbell = bp->bar1 + 0x4000;
5355 switch (ring_type) {
5356 case HWRM_RING_ALLOC_TX:
5357 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5358 break;
5359 case HWRM_RING_ALLOC_RX:
5360 case HWRM_RING_ALLOC_AGG:
5361 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5362 break;
5363 case HWRM_RING_ALLOC_CMPL:
5364 db->db_key64 = DBR_PATH_L2;
5365 break;
5366 case HWRM_RING_ALLOC_NQ:
5367 db->db_key64 = DBR_PATH_L2;
5368 break;
5369 }
5370 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5371 } else {
5372 db->doorbell = bp->bar1 + map_idx * 0x80;
5373 switch (ring_type) {
5374 case HWRM_RING_ALLOC_TX:
5375 db->db_key32 = DB_KEY_TX;
5376 break;
5377 case HWRM_RING_ALLOC_RX:
5378 case HWRM_RING_ALLOC_AGG:
5379 db->db_key32 = DB_KEY_RX;
5380 break;
5381 case HWRM_RING_ALLOC_CMPL:
5382 db->db_key32 = DB_KEY_CP;
5383 break;
5384 }
5385 }
5386 }
5387
5388 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5389 {
5390 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5391 int i, rc = 0;
5392 u32 type;
5393
5394 if (bp->flags & BNXT_FLAG_CHIP_P5)
5395 type = HWRM_RING_ALLOC_NQ;
5396 else
5397 type = HWRM_RING_ALLOC_CMPL;
5398 for (i = 0; i < bp->cp_nr_rings; i++) {
5399 struct bnxt_napi *bnapi = bp->bnapi[i];
5400 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5401 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5402 u32 map_idx = ring->map_idx;
5403 unsigned int vector;
5404
5405 vector = bp->irq_tbl[map_idx].vector;
5406 disable_irq_nosync(vector);
5407 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5408 if (rc) {
5409 enable_irq(vector);
5410 goto err_out;
5411 }
5412 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5413 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5414 enable_irq(vector);
5415 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5416
5417 if (!i) {
5418 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5419 if (rc)
5420 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5421 }
5422 }
5423
5424 type = HWRM_RING_ALLOC_TX;
5425 for (i = 0; i < bp->tx_nr_rings; i++) {
5426 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5427 struct bnxt_ring_struct *ring;
5428 u32 map_idx;
5429
5430 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5431 struct bnxt_napi *bnapi = txr->bnapi;
5432 struct bnxt_cp_ring_info *cpr, *cpr2;
5433 u32 type2 = HWRM_RING_ALLOC_CMPL;
5434
5435 cpr = &bnapi->cp_ring;
5436 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5437 ring = &cpr2->cp_ring_struct;
5438 ring->handle = BNXT_TX_HDL;
5439 map_idx = bnapi->index;
5440 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5441 if (rc)
5442 goto err_out;
5443 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5444 ring->fw_ring_id);
5445 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5446 }
5447 ring = &txr->tx_ring_struct;
5448 map_idx = i;
5449 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5450 if (rc)
5451 goto err_out;
5452 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5453 }
5454
5455 type = HWRM_RING_ALLOC_RX;
5456 for (i = 0; i < bp->rx_nr_rings; i++) {
5457 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5458 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5459 struct bnxt_napi *bnapi = rxr->bnapi;
5460 u32 map_idx = bnapi->index;
5461
5462 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5463 if (rc)
5464 goto err_out;
5465 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5466 /* If we have agg rings, post agg buffers first. */
5467 if (!agg_rings)
5468 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5469 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5470 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5471 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5472 u32 type2 = HWRM_RING_ALLOC_CMPL;
5473 struct bnxt_cp_ring_info *cpr2;
5474
5475 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5476 ring = &cpr2->cp_ring_struct;
5477 ring->handle = BNXT_RX_HDL;
5478 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5479 if (rc)
5480 goto err_out;
5481 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5482 ring->fw_ring_id);
5483 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5484 }
5485 }
5486
5487 if (agg_rings) {
5488 type = HWRM_RING_ALLOC_AGG;
5489 for (i = 0; i < bp->rx_nr_rings; i++) {
5490 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5491 struct bnxt_ring_struct *ring =
5492 &rxr->rx_agg_ring_struct;
5493 u32 grp_idx = ring->grp_idx;
5494 u32 map_idx = grp_idx + bp->rx_nr_rings;
5495
5496 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5497 if (rc)
5498 goto err_out;
5499
5500 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5501 ring->fw_ring_id);
5502 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5503 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5504 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5505 }
5506 }
5507 err_out:
5508 return rc;
5509 }
5510
5511 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5512 struct bnxt_ring_struct *ring,
5513 u32 ring_type, int cmpl_ring_id)
5514 {
5515 int rc;
5516 struct hwrm_ring_free_input req = {0};
5517 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5518 u16 error_code;
5519
5520 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
5521 return 0;
5522
5523 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
5524 req.ring_type = ring_type;
5525 req.ring_id = cpu_to_le16(ring->fw_ring_id);
5526
5527 mutex_lock(&bp->hwrm_cmd_lock);
5528 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5529 error_code = le16_to_cpu(resp->error_code);
5530 mutex_unlock(&bp->hwrm_cmd_lock);
5531
5532 if (rc || error_code) {
5533 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5534 ring_type, rc, error_code);
5535 return -EIO;
5536 }
5537 return 0;
5538 }
5539
5540 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5541 {
5542 u32 type;
5543 int i;
5544
5545 if (!bp->bnapi)
5546 return;
5547
5548 for (i = 0; i < bp->tx_nr_rings; i++) {
5549 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5550 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5551
5552 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5553 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5554
5555 hwrm_ring_free_send_msg(bp, ring,
5556 RING_FREE_REQ_RING_TYPE_TX,
5557 close_path ? cmpl_ring_id :
5558 INVALID_HW_RING_ID);
5559 ring->fw_ring_id = INVALID_HW_RING_ID;
5560 }
5561 }
5562
5563 for (i = 0; i < bp->rx_nr_rings; i++) {
5564 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5565 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5566 u32 grp_idx = rxr->bnapi->index;
5567
5568 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5569 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5570
5571 hwrm_ring_free_send_msg(bp, ring,
5572 RING_FREE_REQ_RING_TYPE_RX,
5573 close_path ? cmpl_ring_id :
5574 INVALID_HW_RING_ID);
5575 ring->fw_ring_id = INVALID_HW_RING_ID;
5576 bp->grp_info[grp_idx].rx_fw_ring_id =
5577 INVALID_HW_RING_ID;
5578 }
5579 }
5580
5581 if (bp->flags & BNXT_FLAG_CHIP_P5)
5582 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5583 else
5584 type = RING_FREE_REQ_RING_TYPE_RX;
5585 for (i = 0; i < bp->rx_nr_rings; i++) {
5586 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5587 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5588 u32 grp_idx = rxr->bnapi->index;
5589
5590 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5591 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5592
5593 hwrm_ring_free_send_msg(bp, ring, type,
5594 close_path ? cmpl_ring_id :
5595 INVALID_HW_RING_ID);
5596 ring->fw_ring_id = INVALID_HW_RING_ID;
5597 bp->grp_info[grp_idx].agg_fw_ring_id =
5598 INVALID_HW_RING_ID;
5599 }
5600 }
5601
5602 /* The completion rings are about to be freed. After that the
5603 * IRQ doorbell will not work anymore. So we need to disable
5604 * IRQ here.
5605 */
5606 bnxt_disable_int_sync(bp);
5607
5608 if (bp->flags & BNXT_FLAG_CHIP_P5)
5609 type = RING_FREE_REQ_RING_TYPE_NQ;
5610 else
5611 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
5612 for (i = 0; i < bp->cp_nr_rings; i++) {
5613 struct bnxt_napi *bnapi = bp->bnapi[i];
5614 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5615 struct bnxt_ring_struct *ring;
5616 int j;
5617
5618 for (j = 0; j < 2; j++) {
5619 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5620
5621 if (cpr2) {
5622 ring = &cpr2->cp_ring_struct;
5623 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5624 continue;
5625 hwrm_ring_free_send_msg(bp, ring,
5626 RING_FREE_REQ_RING_TYPE_L2_CMPL,
5627 INVALID_HW_RING_ID);
5628 ring->fw_ring_id = INVALID_HW_RING_ID;
5629 }
5630 }
5631 ring = &cpr->cp_ring_struct;
5632 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5633 hwrm_ring_free_send_msg(bp, ring, type,
5634 INVALID_HW_RING_ID);
5635 ring->fw_ring_id = INVALID_HW_RING_ID;
5636 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
5637 }
5638 }
5639 }
5640
5641 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5642 bool shared);
5643
5644 static int bnxt_hwrm_get_rings(struct bnxt *bp)
5645 {
5646 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5647 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5648 struct hwrm_func_qcfg_input req = {0};
5649 int rc;
5650
5651 if (bp->hwrm_spec_code < 0x10601)
5652 return 0;
5653
5654 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5655 req.fid = cpu_to_le16(0xffff);
5656 mutex_lock(&bp->hwrm_cmd_lock);
5657 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5658 if (rc) {
5659 mutex_unlock(&bp->hwrm_cmd_lock);
5660 return rc;
5661 }
5662
5663 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5664 if (BNXT_NEW_RM(bp)) {
5665 u16 cp, stats;
5666
5667 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5668 hw_resc->resv_hw_ring_grps =
5669 le32_to_cpu(resp->alloc_hw_ring_grps);
5670 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
5671 cp = le16_to_cpu(resp->alloc_cmpl_rings);
5672 stats = le16_to_cpu(resp->alloc_stat_ctx);
5673 hw_resc->resv_irqs = cp;
5674 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5675 int rx = hw_resc->resv_rx_rings;
5676 int tx = hw_resc->resv_tx_rings;
5677
5678 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5679 rx >>= 1;
5680 if (cp < (rx + tx)) {
5681 bnxt_trim_rings(bp, &rx, &tx, cp, false);
5682 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5683 rx <<= 1;
5684 hw_resc->resv_rx_rings = rx;
5685 hw_resc->resv_tx_rings = tx;
5686 }
5687 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
5688 hw_resc->resv_hw_ring_grps = rx;
5689 }
5690 hw_resc->resv_cp_rings = cp;
5691 hw_resc->resv_stat_ctxs = stats;
5692 }
5693 mutex_unlock(&bp->hwrm_cmd_lock);
5694 return 0;
5695 }
5696
5697 /* Caller must hold bp->hwrm_cmd_lock */
5698 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
5699 {
5700 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5701 struct hwrm_func_qcfg_input req = {0};
5702 int rc;
5703
5704 if (bp->hwrm_spec_code < 0x10601)
5705 return 0;
5706
5707 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5708 req.fid = cpu_to_le16(fid);
5709 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5710 if (!rc)
5711 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5712
5713 return rc;
5714 }
5715
5716 static bool bnxt_rfs_supported(struct bnxt *bp);
5717
5718 static void
5719 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
5720 int tx_rings, int rx_rings, int ring_grps,
5721 int cp_rings, int stats, int vnics)
5722 {
5723 u32 enables = 0;
5724
5725 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
5726 req->fid = cpu_to_le16(0xffff);
5727 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
5728 req->num_tx_rings = cpu_to_le16(tx_rings);
5729 if (BNXT_NEW_RM(bp)) {
5730 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
5731 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5732 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5733 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
5734 enables |= tx_rings + ring_grps ?
5735 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5736 enables |= rx_rings ?
5737 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5738 } else {
5739 enables |= cp_rings ?
5740 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5741 enables |= ring_grps ?
5742 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
5743 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5744 }
5745 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
5746
5747 req->num_rx_rings = cpu_to_le16(rx_rings);
5748 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5749 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5750 req->num_msix = cpu_to_le16(cp_rings);
5751 req->num_rsscos_ctxs =
5752 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5753 } else {
5754 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5755 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5756 req->num_rsscos_ctxs = cpu_to_le16(1);
5757 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
5758 bnxt_rfs_supported(bp))
5759 req->num_rsscos_ctxs =
5760 cpu_to_le16(ring_grps + 1);
5761 }
5762 req->num_stat_ctxs = cpu_to_le16(stats);
5763 req->num_vnics = cpu_to_le16(vnics);
5764 }
5765 req->enables = cpu_to_le32(enables);
5766 }
5767
5768 static void
5769 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
5770 struct hwrm_func_vf_cfg_input *req, int tx_rings,
5771 int rx_rings, int ring_grps, int cp_rings,
5772 int stats, int vnics)
5773 {
5774 u32 enables = 0;
5775
5776 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
5777 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
5778 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
5779 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5780 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5781 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5782 enables |= tx_rings + ring_grps ?
5783 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5784 } else {
5785 enables |= cp_rings ?
5786 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5787 enables |= ring_grps ?
5788 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
5789 }
5790 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
5791 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
5792
5793 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
5794 req->num_tx_rings = cpu_to_le16(tx_rings);
5795 req->num_rx_rings = cpu_to_le16(rx_rings);
5796 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5797 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5798 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5799 } else {
5800 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5801 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5802 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
5803 }
5804 req->num_stat_ctxs = cpu_to_le16(stats);
5805 req->num_vnics = cpu_to_le16(vnics);
5806
5807 req->enables = cpu_to_le32(enables);
5808 }
5809
5810 static int
5811 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5812 int ring_grps, int cp_rings, int stats, int vnics)
5813 {
5814 struct hwrm_func_cfg_input req = {0};
5815 int rc;
5816
5817 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5818 cp_rings, stats, vnics);
5819 if (!req.enables)
5820 return 0;
5821
5822 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5823 if (rc)
5824 return rc;
5825
5826 if (bp->hwrm_spec_code < 0x10601)
5827 bp->hw_resc.resv_tx_rings = tx_rings;
5828
5829 rc = bnxt_hwrm_get_rings(bp);
5830 return rc;
5831 }
5832
5833 static int
5834 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5835 int ring_grps, int cp_rings, int stats, int vnics)
5836 {
5837 struct hwrm_func_vf_cfg_input req = {0};
5838 int rc;
5839
5840 if (!BNXT_NEW_RM(bp)) {
5841 bp->hw_resc.resv_tx_rings = tx_rings;
5842 return 0;
5843 }
5844
5845 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5846 cp_rings, stats, vnics);
5847 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5848 if (rc)
5849 return rc;
5850
5851 rc = bnxt_hwrm_get_rings(bp);
5852 return rc;
5853 }
5854
5855 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
5856 int cp, int stat, int vnic)
5857 {
5858 if (BNXT_PF(bp))
5859 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
5860 vnic);
5861 else
5862 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
5863 vnic);
5864 }
5865
5866 int bnxt_nq_rings_in_use(struct bnxt *bp)
5867 {
5868 int cp = bp->cp_nr_rings;
5869 int ulp_msix, ulp_base;
5870
5871 ulp_msix = bnxt_get_ulp_msix_num(bp);
5872 if (ulp_msix) {
5873 ulp_base = bnxt_get_ulp_msix_base(bp);
5874 cp += ulp_msix;
5875 if ((ulp_base + ulp_msix) > cp)
5876 cp = ulp_base + ulp_msix;
5877 }
5878 return cp;
5879 }
5880
5881 static int bnxt_cp_rings_in_use(struct bnxt *bp)
5882 {
5883 int cp;
5884
5885 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5886 return bnxt_nq_rings_in_use(bp);
5887
5888 cp = bp->tx_nr_rings + bp->rx_nr_rings;
5889 return cp;
5890 }
5891
5892 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
5893 {
5894 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
5895 int cp = bp->cp_nr_rings;
5896
5897 if (!ulp_stat)
5898 return cp;
5899
5900 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
5901 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
5902
5903 return cp + ulp_stat;
5904 }
5905
5906 static bool bnxt_need_reserve_rings(struct bnxt *bp)
5907 {
5908 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5909 int cp = bnxt_cp_rings_in_use(bp);
5910 int nq = bnxt_nq_rings_in_use(bp);
5911 int rx = bp->rx_nr_rings, stat;
5912 int vnic = 1, grp = rx;
5913
5914 if (bp->hwrm_spec_code < 0x10601)
5915 return false;
5916
5917 if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
5918 return true;
5919
5920 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
5921 vnic = rx + 1;
5922 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5923 rx <<= 1;
5924 stat = bnxt_get_func_stat_ctxs(bp);
5925 if (BNXT_NEW_RM(bp) &&
5926 (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
5927 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
5928 (hw_resc->resv_hw_ring_grps != grp &&
5929 !(bp->flags & BNXT_FLAG_CHIP_P5))))
5930 return true;
5931 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
5932 hw_resc->resv_irqs != nq)
5933 return true;
5934 return false;
5935 }
5936
5937 static int __bnxt_reserve_rings(struct bnxt *bp)
5938 {
5939 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5940 int cp = bnxt_nq_rings_in_use(bp);
5941 int tx = bp->tx_nr_rings;
5942 int rx = bp->rx_nr_rings;
5943 int grp, rx_rings, rc;
5944 int vnic = 1, stat;
5945 bool sh = false;
5946
5947 if (!bnxt_need_reserve_rings(bp))
5948 return 0;
5949
5950 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5951 sh = true;
5952 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
5953 vnic = rx + 1;
5954 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5955 rx <<= 1;
5956 grp = bp->rx_nr_rings;
5957 stat = bnxt_get_func_stat_ctxs(bp);
5958
5959 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
5960 if (rc)
5961 return rc;
5962
5963 tx = hw_resc->resv_tx_rings;
5964 if (BNXT_NEW_RM(bp)) {
5965 rx = hw_resc->resv_rx_rings;
5966 cp = hw_resc->resv_irqs;
5967 grp = hw_resc->resv_hw_ring_grps;
5968 vnic = hw_resc->resv_vnics;
5969 stat = hw_resc->resv_stat_ctxs;
5970 }
5971
5972 rx_rings = rx;
5973 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
5974 if (rx >= 2) {
5975 rx_rings = rx >> 1;
5976 } else {
5977 if (netif_running(bp->dev))
5978 return -ENOMEM;
5979
5980 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
5981 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
5982 bp->dev->hw_features &= ~NETIF_F_LRO;
5983 bp->dev->features &= ~NETIF_F_LRO;
5984 bnxt_set_ring_params(bp);
5985 }
5986 }
5987 rx_rings = min_t(int, rx_rings, grp);
5988 cp = min_t(int, cp, bp->cp_nr_rings);
5989 if (stat > bnxt_get_ulp_stat_ctxs(bp))
5990 stat -= bnxt_get_ulp_stat_ctxs(bp);
5991 cp = min_t(int, cp, stat);
5992 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
5993 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5994 rx = rx_rings << 1;
5995 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
5996 bp->tx_nr_rings = tx;
5997 bp->rx_nr_rings = rx_rings;
5998 bp->cp_nr_rings = cp;
5999
6000 if (!tx || !rx || !cp || !grp || !vnic || !stat)
6001 return -ENOMEM;
6002
6003 return rc;
6004 }
6005
6006 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6007 int ring_grps, int cp_rings, int stats,
6008 int vnics)
6009 {
6010 struct hwrm_func_vf_cfg_input req = {0};
6011 u32 flags;
6012 int rc;
6013
6014 if (!BNXT_NEW_RM(bp))
6015 return 0;
6016
6017 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6018 cp_rings, stats, vnics);
6019 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6020 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6021 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6022 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6023 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6024 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6025 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6026 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6027
6028 req.flags = cpu_to_le32(flags);
6029 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6030 return rc;
6031 }
6032
6033 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6034 int ring_grps, int cp_rings, int stats,
6035 int vnics)
6036 {
6037 struct hwrm_func_cfg_input req = {0};
6038 u32 flags;
6039 int rc;
6040
6041 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6042 cp_rings, stats, vnics);
6043 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6044 if (BNXT_NEW_RM(bp)) {
6045 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6046 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6047 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6048 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6049 if (bp->flags & BNXT_FLAG_CHIP_P5)
6050 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6051 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6052 else
6053 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6054 }
6055
6056 req.flags = cpu_to_le32(flags);
6057 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6058 return rc;
6059 }
6060
6061 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6062 int ring_grps, int cp_rings, int stats,
6063 int vnics)
6064 {
6065 if (bp->hwrm_spec_code < 0x10801)
6066 return 0;
6067
6068 if (BNXT_PF(bp))
6069 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6070 ring_grps, cp_rings, stats,
6071 vnics);
6072
6073 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6074 cp_rings, stats, vnics);
6075 }
6076
6077 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6078 {
6079 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6080 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6081 struct hwrm_ring_aggint_qcaps_input req = {0};
6082 int rc;
6083
6084 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6085 coal_cap->num_cmpl_dma_aggr_max = 63;
6086 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6087 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6088 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6089 coal_cap->int_lat_tmr_min_max = 65535;
6090 coal_cap->int_lat_tmr_max_max = 65535;
6091 coal_cap->num_cmpl_aggr_int_max = 65535;
6092 coal_cap->timer_units = 80;
6093
6094 if (bp->hwrm_spec_code < 0x10902)
6095 return;
6096
6097 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
6098 mutex_lock(&bp->hwrm_cmd_lock);
6099 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6100 if (!rc) {
6101 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6102 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6103 coal_cap->num_cmpl_dma_aggr_max =
6104 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6105 coal_cap->num_cmpl_dma_aggr_during_int_max =
6106 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6107 coal_cap->cmpl_aggr_dma_tmr_max =
6108 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6109 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6110 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6111 coal_cap->int_lat_tmr_min_max =
6112 le16_to_cpu(resp->int_lat_tmr_min_max);
6113 coal_cap->int_lat_tmr_max_max =
6114 le16_to_cpu(resp->int_lat_tmr_max_max);
6115 coal_cap->num_cmpl_aggr_int_max =
6116 le16_to_cpu(resp->num_cmpl_aggr_int_max);
6117 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6118 }
6119 mutex_unlock(&bp->hwrm_cmd_lock);
6120 }
6121
6122 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6123 {
6124 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6125
6126 return usec * 1000 / coal_cap->timer_units;
6127 }
6128
6129 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6130 struct bnxt_coal *hw_coal,
6131 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6132 {
6133 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6134 u32 cmpl_params = coal_cap->cmpl_params;
6135 u16 val, tmr, max, flags = 0;
6136
6137 max = hw_coal->bufs_per_record * 128;
6138 if (hw_coal->budget)
6139 max = hw_coal->bufs_per_record * hw_coal->budget;
6140 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6141
6142 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6143 req->num_cmpl_aggr_int = cpu_to_le16(val);
6144
6145 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6146 req->num_cmpl_dma_aggr = cpu_to_le16(val);
6147
6148 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6149 coal_cap->num_cmpl_dma_aggr_during_int_max);
6150 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6151
6152 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6153 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6154 req->int_lat_tmr_max = cpu_to_le16(tmr);
6155
6156 /* min timer set to 1/2 of interrupt timer */
6157 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6158 val = tmr / 2;
6159 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6160 req->int_lat_tmr_min = cpu_to_le16(val);
6161 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6162 }
6163
6164 /* buf timer set to 1/4 of interrupt timer */
6165 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6166 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6167
6168 if (cmpl_params &
6169 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6170 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6171 val = clamp_t(u16, tmr, 1,
6172 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6173 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
6174 req->enables |=
6175 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6176 }
6177
6178 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6179 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6180 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6181 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6182 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6183 req->flags = cpu_to_le16(flags);
6184 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6185 }
6186
6187 /* Caller holds bp->hwrm_cmd_lock */
6188 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6189 struct bnxt_coal *hw_coal)
6190 {
6191 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
6192 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6193 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6194 u32 nq_params = coal_cap->nq_params;
6195 u16 tmr;
6196
6197 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6198 return 0;
6199
6200 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
6201 -1, -1);
6202 req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6203 req.flags =
6204 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6205
6206 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6207 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6208 req.int_lat_tmr_min = cpu_to_le16(tmr);
6209 req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6210 return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6211 }
6212
6213 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6214 {
6215 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
6216 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6217 struct bnxt_coal coal;
6218
6219 /* Tick values in micro seconds.
6220 * 1 coal_buf x bufs_per_record = 1 completion record.
6221 */
6222 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6223
6224 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6225 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6226
6227 if (!bnapi->rx_ring)
6228 return -ENODEV;
6229
6230 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6231 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6232
6233 bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
6234
6235 req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6236
6237 return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
6238 HWRM_CMD_TIMEOUT);
6239 }
6240
6241 int bnxt_hwrm_set_coal(struct bnxt *bp)
6242 {
6243 int i, rc = 0;
6244 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
6245 req_tx = {0}, *req;
6246
6247 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6248 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6249 bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
6250 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6251
6252 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
6253 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
6254
6255 mutex_lock(&bp->hwrm_cmd_lock);
6256 for (i = 0; i < bp->cp_nr_rings; i++) {
6257 struct bnxt_napi *bnapi = bp->bnapi[i];
6258 struct bnxt_coal *hw_coal;
6259 u16 ring_id;
6260
6261 req = &req_rx;
6262 if (!bnapi->rx_ring) {
6263 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6264 req = &req_tx;
6265 } else {
6266 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6267 }
6268 req->ring_id = cpu_to_le16(ring_id);
6269
6270 rc = _hwrm_send_message(bp, req, sizeof(*req),
6271 HWRM_CMD_TIMEOUT);
6272 if (rc)
6273 break;
6274
6275 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6276 continue;
6277
6278 if (bnapi->rx_ring && bnapi->tx_ring) {
6279 req = &req_tx;
6280 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6281 req->ring_id = cpu_to_le16(ring_id);
6282 rc = _hwrm_send_message(bp, req, sizeof(*req),
6283 HWRM_CMD_TIMEOUT);
6284 if (rc)
6285 break;
6286 }
6287 if (bnapi->rx_ring)
6288 hw_coal = &bp->rx_coal;
6289 else
6290 hw_coal = &bp->tx_coal;
6291 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6292 }
6293 mutex_unlock(&bp->hwrm_cmd_lock);
6294 return rc;
6295 }
6296
6297 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6298 {
6299 int rc = 0, i;
6300 struct hwrm_stat_ctx_free_input req = {0};
6301
6302 if (!bp->bnapi)
6303 return 0;
6304
6305 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6306 return 0;
6307
6308 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
6309
6310 mutex_lock(&bp->hwrm_cmd_lock);
6311 for (i = 0; i < bp->cp_nr_rings; i++) {
6312 struct bnxt_napi *bnapi = bp->bnapi[i];
6313 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6314
6315 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6316 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6317
6318 rc = _hwrm_send_message(bp, &req, sizeof(req),
6319 HWRM_CMD_TIMEOUT);
6320
6321 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6322 }
6323 }
6324 mutex_unlock(&bp->hwrm_cmd_lock);
6325 return rc;
6326 }
6327
6328 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6329 {
6330 int rc = 0, i;
6331 struct hwrm_stat_ctx_alloc_input req = {0};
6332 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6333
6334 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6335 return 0;
6336
6337 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
6338
6339 req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6340 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6341
6342 mutex_lock(&bp->hwrm_cmd_lock);
6343 for (i = 0; i < bp->cp_nr_rings; i++) {
6344 struct bnxt_napi *bnapi = bp->bnapi[i];
6345 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6346
6347 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
6348
6349 rc = _hwrm_send_message(bp, &req, sizeof(req),
6350 HWRM_CMD_TIMEOUT);
6351 if (rc)
6352 break;
6353
6354 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6355
6356 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6357 }
6358 mutex_unlock(&bp->hwrm_cmd_lock);
6359 return rc;
6360 }
6361
6362 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6363 {
6364 struct hwrm_func_qcfg_input req = {0};
6365 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6366 u16 flags;
6367 int rc;
6368
6369 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6370 req.fid = cpu_to_le16(0xffff);
6371 mutex_lock(&bp->hwrm_cmd_lock);
6372 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6373 if (rc)
6374 goto func_qcfg_exit;
6375
6376 #ifdef CONFIG_BNXT_SRIOV
6377 if (BNXT_VF(bp)) {
6378 struct bnxt_vf_info *vf = &bp->vf;
6379
6380 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6381 } else {
6382 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6383 }
6384 #endif
6385 flags = le16_to_cpu(resp->flags);
6386 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6387 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6388 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6389 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6390 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6391 }
6392 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6393 bp->flags |= BNXT_FLAG_MULTI_HOST;
6394
6395 switch (resp->port_partition_type) {
6396 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6397 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6398 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6399 bp->port_partition_type = resp->port_partition_type;
6400 break;
6401 }
6402 if (bp->hwrm_spec_code < 0x10707 ||
6403 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6404 bp->br_mode = BRIDGE_MODE_VEB;
6405 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6406 bp->br_mode = BRIDGE_MODE_VEPA;
6407 else
6408 bp->br_mode = BRIDGE_MODE_UNDEF;
6409
6410 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6411 if (!bp->max_mtu)
6412 bp->max_mtu = BNXT_MAX_MTU;
6413
6414 func_qcfg_exit:
6415 mutex_unlock(&bp->hwrm_cmd_lock);
6416 return rc;
6417 }
6418
6419 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6420 {
6421 struct hwrm_func_backing_store_qcaps_input req = {0};
6422 struct hwrm_func_backing_store_qcaps_output *resp =
6423 bp->hwrm_cmd_resp_addr;
6424 int rc;
6425
6426 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6427 return 0;
6428
6429 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
6430 mutex_lock(&bp->hwrm_cmd_lock);
6431 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6432 if (!rc) {
6433 struct bnxt_ctx_pg_info *ctx_pg;
6434 struct bnxt_ctx_mem_info *ctx;
6435 int i;
6436
6437 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6438 if (!ctx) {
6439 rc = -ENOMEM;
6440 goto ctx_err;
6441 }
6442 ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL);
6443 if (!ctx_pg) {
6444 kfree(ctx);
6445 rc = -ENOMEM;
6446 goto ctx_err;
6447 }
6448 for (i = 0; i < bp->max_q + 1; i++, ctx_pg++)
6449 ctx->tqm_mem[i] = ctx_pg;
6450
6451 bp->ctx = ctx;
6452 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6453 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6454 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6455 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6456 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6457 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6458 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6459 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6460 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6461 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6462 ctx->vnic_max_vnic_entries =
6463 le16_to_cpu(resp->vnic_max_vnic_entries);
6464 ctx->vnic_max_ring_table_entries =
6465 le16_to_cpu(resp->vnic_max_ring_table_entries);
6466 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6467 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6468 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6469 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6470 ctx->tqm_min_entries_per_ring =
6471 le32_to_cpu(resp->tqm_min_entries_per_ring);
6472 ctx->tqm_max_entries_per_ring =
6473 le32_to_cpu(resp->tqm_max_entries_per_ring);
6474 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6475 if (!ctx->tqm_entries_multiple)
6476 ctx->tqm_entries_multiple = 1;
6477 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6478 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6479 ctx->mrav_num_entries_units =
6480 le16_to_cpu(resp->mrav_num_entries_units);
6481 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6482 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6483 } else {
6484 rc = 0;
6485 }
6486 ctx_err:
6487 mutex_unlock(&bp->hwrm_cmd_lock);
6488 return rc;
6489 }
6490
6491 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6492 __le64 *pg_dir)
6493 {
6494 u8 pg_size = 0;
6495
6496 if (BNXT_PAGE_SHIFT == 13)
6497 pg_size = 1 << 4;
6498 else if (BNXT_PAGE_SIZE == 16)
6499 pg_size = 2 << 4;
6500
6501 *pg_attr = pg_size;
6502 if (rmem->depth >= 1) {
6503 if (rmem->depth == 2)
6504 *pg_attr |= 2;
6505 else
6506 *pg_attr |= 1;
6507 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6508 } else {
6509 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6510 }
6511 }
6512
6513 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
6514 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
6515 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
6516 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
6517 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
6518 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6519
6520 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6521 {
6522 struct hwrm_func_backing_store_cfg_input req = {0};
6523 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6524 struct bnxt_ctx_pg_info *ctx_pg;
6525 __le32 *num_entries;
6526 __le64 *pg_dir;
6527 u32 flags = 0;
6528 u8 *pg_attr;
6529 int i, rc;
6530 u32 ena;
6531
6532 if (!ctx)
6533 return 0;
6534
6535 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
6536 req.enables = cpu_to_le32(enables);
6537
6538 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
6539 ctx_pg = &ctx->qp_mem;
6540 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
6541 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
6542 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
6543 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
6544 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6545 &req.qpc_pg_size_qpc_lvl,
6546 &req.qpc_page_dir);
6547 }
6548 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
6549 ctx_pg = &ctx->srq_mem;
6550 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
6551 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
6552 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
6553 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6554 &req.srq_pg_size_srq_lvl,
6555 &req.srq_page_dir);
6556 }
6557 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
6558 ctx_pg = &ctx->cq_mem;
6559 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
6560 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
6561 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
6562 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
6563 &req.cq_page_dir);
6564 }
6565 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
6566 ctx_pg = &ctx->vnic_mem;
6567 req.vnic_num_vnic_entries =
6568 cpu_to_le16(ctx->vnic_max_vnic_entries);
6569 req.vnic_num_ring_table_entries =
6570 cpu_to_le16(ctx->vnic_max_ring_table_entries);
6571 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
6572 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6573 &req.vnic_pg_size_vnic_lvl,
6574 &req.vnic_page_dir);
6575 }
6576 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
6577 ctx_pg = &ctx->stat_mem;
6578 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
6579 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
6580 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6581 &req.stat_pg_size_stat_lvl,
6582 &req.stat_page_dir);
6583 }
6584 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
6585 ctx_pg = &ctx->mrav_mem;
6586 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
6587 if (ctx->mrav_num_entries_units)
6588 flags |=
6589 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
6590 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
6591 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6592 &req.mrav_pg_size_mrav_lvl,
6593 &req.mrav_page_dir);
6594 }
6595 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
6596 ctx_pg = &ctx->tim_mem;
6597 req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
6598 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
6599 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6600 &req.tim_pg_size_tim_lvl,
6601 &req.tim_page_dir);
6602 }
6603 for (i = 0, num_entries = &req.tqm_sp_num_entries,
6604 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
6605 pg_dir = &req.tqm_sp_page_dir,
6606 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
6607 i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
6608 if (!(enables & ena))
6609 continue;
6610
6611 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
6612 ctx_pg = ctx->tqm_mem[i];
6613 *num_entries = cpu_to_le32(ctx_pg->entries);
6614 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
6615 }
6616 req.flags = cpu_to_le32(flags);
6617 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6618 return rc;
6619 }
6620
6621 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
6622 struct bnxt_ctx_pg_info *ctx_pg)
6623 {
6624 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6625
6626 rmem->page_size = BNXT_PAGE_SIZE;
6627 rmem->pg_arr = ctx_pg->ctx_pg_arr;
6628 rmem->dma_arr = ctx_pg->ctx_dma_arr;
6629 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
6630 if (rmem->depth >= 1)
6631 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
6632 return bnxt_alloc_ring(bp, rmem);
6633 }
6634
6635 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
6636 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
6637 u8 depth)
6638 {
6639 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6640 int rc;
6641
6642 if (!mem_size)
6643 return 0;
6644
6645 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6646 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
6647 ctx_pg->nr_pages = 0;
6648 return -EINVAL;
6649 }
6650 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
6651 int nr_tbls, i;
6652
6653 rmem->depth = 2;
6654 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
6655 GFP_KERNEL);
6656 if (!ctx_pg->ctx_pg_tbl)
6657 return -ENOMEM;
6658 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
6659 rmem->nr_pages = nr_tbls;
6660 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6661 if (rc)
6662 return rc;
6663 for (i = 0; i < nr_tbls; i++) {
6664 struct bnxt_ctx_pg_info *pg_tbl;
6665
6666 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
6667 if (!pg_tbl)
6668 return -ENOMEM;
6669 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
6670 rmem = &pg_tbl->ring_mem;
6671 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
6672 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
6673 rmem->depth = 1;
6674 rmem->nr_pages = MAX_CTX_PAGES;
6675 if (i == (nr_tbls - 1)) {
6676 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
6677
6678 if (rem)
6679 rmem->nr_pages = rem;
6680 }
6681 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
6682 if (rc)
6683 break;
6684 }
6685 } else {
6686 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6687 if (rmem->nr_pages > 1 || depth)
6688 rmem->depth = 1;
6689 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6690 }
6691 return rc;
6692 }
6693
6694 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
6695 struct bnxt_ctx_pg_info *ctx_pg)
6696 {
6697 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6698
6699 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
6700 ctx_pg->ctx_pg_tbl) {
6701 int i, nr_tbls = rmem->nr_pages;
6702
6703 for (i = 0; i < nr_tbls; i++) {
6704 struct bnxt_ctx_pg_info *pg_tbl;
6705 struct bnxt_ring_mem_info *rmem2;
6706
6707 pg_tbl = ctx_pg->ctx_pg_tbl[i];
6708 if (!pg_tbl)
6709 continue;
6710 rmem2 = &pg_tbl->ring_mem;
6711 bnxt_free_ring(bp, rmem2);
6712 ctx_pg->ctx_pg_arr[i] = NULL;
6713 kfree(pg_tbl);
6714 ctx_pg->ctx_pg_tbl[i] = NULL;
6715 }
6716 kfree(ctx_pg->ctx_pg_tbl);
6717 ctx_pg->ctx_pg_tbl = NULL;
6718 }
6719 bnxt_free_ring(bp, rmem);
6720 ctx_pg->nr_pages = 0;
6721 }
6722
6723 static void bnxt_free_ctx_mem(struct bnxt *bp)
6724 {
6725 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6726 int i;
6727
6728 if (!ctx)
6729 return;
6730
6731 if (ctx->tqm_mem[0]) {
6732 for (i = 0; i < bp->max_q + 1; i++)
6733 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
6734 kfree(ctx->tqm_mem[0]);
6735 ctx->tqm_mem[0] = NULL;
6736 }
6737
6738 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
6739 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
6740 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
6741 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
6742 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
6743 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
6744 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
6745 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
6746 }
6747
6748 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
6749 {
6750 struct bnxt_ctx_pg_info *ctx_pg;
6751 struct bnxt_ctx_mem_info *ctx;
6752 u32 mem_size, ena, entries;
6753 u32 num_mr, num_ah;
6754 u32 extra_srqs = 0;
6755 u32 extra_qps = 0;
6756 u8 pg_lvl = 1;
6757 int i, rc;
6758
6759 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
6760 if (rc) {
6761 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
6762 rc);
6763 return rc;
6764 }
6765 ctx = bp->ctx;
6766 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
6767 return 0;
6768
6769 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
6770 pg_lvl = 2;
6771 extra_qps = 65536;
6772 extra_srqs = 8192;
6773 }
6774
6775 ctx_pg = &ctx->qp_mem;
6776 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
6777 extra_qps;
6778 mem_size = ctx->qp_entry_size * ctx_pg->entries;
6779 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
6780 if (rc)
6781 return rc;
6782
6783 ctx_pg = &ctx->srq_mem;
6784 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
6785 mem_size = ctx->srq_entry_size * ctx_pg->entries;
6786 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
6787 if (rc)
6788 return rc;
6789
6790 ctx_pg = &ctx->cq_mem;
6791 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
6792 mem_size = ctx->cq_entry_size * ctx_pg->entries;
6793 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
6794 if (rc)
6795 return rc;
6796
6797 ctx_pg = &ctx->vnic_mem;
6798 ctx_pg->entries = ctx->vnic_max_vnic_entries +
6799 ctx->vnic_max_ring_table_entries;
6800 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
6801 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6802 if (rc)
6803 return rc;
6804
6805 ctx_pg = &ctx->stat_mem;
6806 ctx_pg->entries = ctx->stat_max_entries;
6807 mem_size = ctx->stat_entry_size * ctx_pg->entries;
6808 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6809 if (rc)
6810 return rc;
6811
6812 ena = 0;
6813 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
6814 goto skip_rdma;
6815
6816 ctx_pg = &ctx->mrav_mem;
6817 /* 128K extra is needed to accommodate static AH context
6818 * allocation by f/w.
6819 */
6820 num_mr = 1024 * 256;
6821 num_ah = 1024 * 128;
6822 ctx_pg->entries = num_mr + num_ah;
6823 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
6824 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2);
6825 if (rc)
6826 return rc;
6827 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
6828 if (ctx->mrav_num_entries_units)
6829 ctx_pg->entries =
6830 ((num_mr / ctx->mrav_num_entries_units) << 16) |
6831 (num_ah / ctx->mrav_num_entries_units);
6832
6833 ctx_pg = &ctx->tim_mem;
6834 ctx_pg->entries = ctx->qp_mem.entries;
6835 mem_size = ctx->tim_entry_size * ctx_pg->entries;
6836 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6837 if (rc)
6838 return rc;
6839 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
6840
6841 skip_rdma:
6842 entries = ctx->qp_max_l2_entries + extra_qps;
6843 entries = roundup(entries, ctx->tqm_entries_multiple);
6844 entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
6845 ctx->tqm_max_entries_per_ring);
6846 for (i = 0; i < bp->max_q + 1; i++) {
6847 ctx_pg = ctx->tqm_mem[i];
6848 ctx_pg->entries = entries;
6849 mem_size = ctx->tqm_entry_size * entries;
6850 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6851 if (rc)
6852 return rc;
6853 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
6854 }
6855 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
6856 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
6857 if (rc)
6858 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
6859 rc);
6860 else
6861 ctx->flags |= BNXT_CTX_FLAG_INITED;
6862
6863 return 0;
6864 }
6865
6866 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
6867 {
6868 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6869 struct hwrm_func_resource_qcaps_input req = {0};
6870 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6871 int rc;
6872
6873 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
6874 req.fid = cpu_to_le16(0xffff);
6875
6876 mutex_lock(&bp->hwrm_cmd_lock);
6877 rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
6878 HWRM_CMD_TIMEOUT);
6879 if (rc)
6880 goto hwrm_func_resc_qcaps_exit;
6881
6882 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
6883 if (!all)
6884 goto hwrm_func_resc_qcaps_exit;
6885
6886 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
6887 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6888 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
6889 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6890 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
6891 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6892 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
6893 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6894 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
6895 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
6896 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
6897 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6898 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
6899 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6900 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
6901 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6902
6903 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6904 u16 max_msix = le16_to_cpu(resp->max_msix);
6905
6906 hw_resc->max_nqs = max_msix;
6907 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
6908 }
6909
6910 if (BNXT_PF(bp)) {
6911 struct bnxt_pf_info *pf = &bp->pf;
6912
6913 pf->vf_resv_strategy =
6914 le16_to_cpu(resp->vf_reservation_strategy);
6915 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
6916 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
6917 }
6918 hwrm_func_resc_qcaps_exit:
6919 mutex_unlock(&bp->hwrm_cmd_lock);
6920 return rc;
6921 }
6922
6923 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
6924 {
6925 int rc = 0;
6926 struct hwrm_func_qcaps_input req = {0};
6927 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6928 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6929 u32 flags;
6930
6931 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
6932 req.fid = cpu_to_le16(0xffff);
6933
6934 mutex_lock(&bp->hwrm_cmd_lock);
6935 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6936 if (rc)
6937 goto hwrm_func_qcaps_exit;
6938
6939 flags = le32_to_cpu(resp->flags);
6940 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
6941 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
6942 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
6943 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
6944 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
6945 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
6946 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
6947 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
6948 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
6949 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
6950 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
6951 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
6952
6953 bp->tx_push_thresh = 0;
6954 if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
6955 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
6956
6957 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6958 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6959 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6960 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6961 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
6962 if (!hw_resc->max_hw_ring_grps)
6963 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
6964 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6965 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6966 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6967
6968 if (BNXT_PF(bp)) {
6969 struct bnxt_pf_info *pf = &bp->pf;
6970
6971 pf->fw_fid = le16_to_cpu(resp->fid);
6972 pf->port_id = le16_to_cpu(resp->port_id);
6973 bp->dev->dev_port = pf->port_id;
6974 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
6975 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
6976 pf->max_vfs = le16_to_cpu(resp->max_vfs);
6977 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
6978 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
6979 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
6980 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
6981 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
6982 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
6983 bp->flags &= ~BNXT_FLAG_WOL_CAP;
6984 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
6985 bp->flags |= BNXT_FLAG_WOL_CAP;
6986 } else {
6987 #ifdef CONFIG_BNXT_SRIOV
6988 struct bnxt_vf_info *vf = &bp->vf;
6989
6990 vf->fw_fid = le16_to_cpu(resp->fid);
6991 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
6992 #endif
6993 }
6994
6995 hwrm_func_qcaps_exit:
6996 mutex_unlock(&bp->hwrm_cmd_lock);
6997 return rc;
6998 }
6999
7000 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7001
7002 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7003 {
7004 int rc;
7005
7006 rc = __bnxt_hwrm_func_qcaps(bp);
7007 if (rc)
7008 return rc;
7009 rc = bnxt_hwrm_queue_qportcfg(bp);
7010 if (rc) {
7011 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7012 return rc;
7013 }
7014 if (bp->hwrm_spec_code >= 0x10803) {
7015 rc = bnxt_alloc_ctx_mem(bp);
7016 if (rc)
7017 return rc;
7018 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7019 if (!rc)
7020 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7021 }
7022 return 0;
7023 }
7024
7025 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7026 {
7027 struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
7028 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7029 int rc = 0;
7030 u32 flags;
7031
7032 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7033 return 0;
7034
7035 resp = bp->hwrm_cmd_resp_addr;
7036 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
7037
7038 mutex_lock(&bp->hwrm_cmd_lock);
7039 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7040 if (rc)
7041 goto hwrm_cfa_adv_qcaps_exit;
7042
7043 flags = le32_to_cpu(resp->flags);
7044 if (flags &
7045 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED)
7046 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX;
7047
7048 hwrm_cfa_adv_qcaps_exit:
7049 mutex_unlock(&bp->hwrm_cmd_lock);
7050 return rc;
7051 }
7052
7053 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7054 {
7055 struct bnxt_fw_health *fw_health = bp->fw_health;
7056 u32 reg_base = 0xffffffff;
7057 int i;
7058
7059 /* Only pre-map the monitoring GRC registers using window 3 */
7060 for (i = 0; i < 4; i++) {
7061 u32 reg = fw_health->regs[i];
7062
7063 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7064 continue;
7065 if (reg_base == 0xffffffff)
7066 reg_base = reg & BNXT_GRC_BASE_MASK;
7067 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7068 return -ERANGE;
7069 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_BASE +
7070 (reg & BNXT_GRC_OFFSET_MASK);
7071 }
7072 if (reg_base == 0xffffffff)
7073 return 0;
7074
7075 writel(reg_base, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7076 BNXT_FW_HEALTH_WIN_MAP_OFF);
7077 return 0;
7078 }
7079
7080 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7081 {
7082 struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7083 struct bnxt_fw_health *fw_health = bp->fw_health;
7084 struct hwrm_error_recovery_qcfg_input req = {0};
7085 int rc, i;
7086
7087 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7088 return 0;
7089
7090 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
7091 mutex_lock(&bp->hwrm_cmd_lock);
7092 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7093 if (rc)
7094 goto err_recovery_out;
7095 if (!fw_health) {
7096 fw_health = kzalloc(sizeof(*fw_health), GFP_KERNEL);
7097 bp->fw_health = fw_health;
7098 if (!fw_health) {
7099 rc = -ENOMEM;
7100 goto err_recovery_out;
7101 }
7102 }
7103 fw_health->flags = le32_to_cpu(resp->flags);
7104 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7105 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7106 rc = -EINVAL;
7107 goto err_recovery_out;
7108 }
7109 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7110 fw_health->master_func_wait_dsecs =
7111 le32_to_cpu(resp->master_func_wait_period);
7112 fw_health->normal_func_wait_dsecs =
7113 le32_to_cpu(resp->normal_func_wait_period);
7114 fw_health->post_reset_wait_dsecs =
7115 le32_to_cpu(resp->master_func_wait_period_after_reset);
7116 fw_health->post_reset_max_wait_dsecs =
7117 le32_to_cpu(resp->max_bailout_time_after_reset);
7118 fw_health->regs[BNXT_FW_HEALTH_REG] =
7119 le32_to_cpu(resp->fw_health_status_reg);
7120 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7121 le32_to_cpu(resp->fw_heartbeat_reg);
7122 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7123 le32_to_cpu(resp->fw_reset_cnt_reg);
7124 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7125 le32_to_cpu(resp->reset_inprogress_reg);
7126 fw_health->fw_reset_inprog_reg_mask =
7127 le32_to_cpu(resp->reset_inprogress_reg_mask);
7128 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7129 if (fw_health->fw_reset_seq_cnt >= 16) {
7130 rc = -EINVAL;
7131 goto err_recovery_out;
7132 }
7133 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7134 fw_health->fw_reset_seq_regs[i] =
7135 le32_to_cpu(resp->reset_reg[i]);
7136 fw_health->fw_reset_seq_vals[i] =
7137 le32_to_cpu(resp->reset_reg_val[i]);
7138 fw_health->fw_reset_seq_delay_msec[i] =
7139 resp->delay_after_reset[i];
7140 }
7141 err_recovery_out:
7142 mutex_unlock(&bp->hwrm_cmd_lock);
7143 if (!rc)
7144 rc = bnxt_map_fw_health_regs(bp);
7145 if (rc)
7146 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7147 return rc;
7148 }
7149
7150 static int bnxt_hwrm_func_reset(struct bnxt *bp)
7151 {
7152 struct hwrm_func_reset_input req = {0};
7153
7154 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
7155 req.enables = 0;
7156
7157 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
7158 }
7159
7160 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7161 {
7162 int rc = 0;
7163 struct hwrm_queue_qportcfg_input req = {0};
7164 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
7165 u8 i, j, *qptr;
7166 bool no_rdma;
7167
7168 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
7169
7170 mutex_lock(&bp->hwrm_cmd_lock);
7171 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7172 if (rc)
7173 goto qportcfg_exit;
7174
7175 if (!resp->max_configurable_queues) {
7176 rc = -EINVAL;
7177 goto qportcfg_exit;
7178 }
7179 bp->max_tc = resp->max_configurable_queues;
7180 bp->max_lltc = resp->max_configurable_lossless_queues;
7181 if (bp->max_tc > BNXT_MAX_QUEUE)
7182 bp->max_tc = BNXT_MAX_QUEUE;
7183
7184 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7185 qptr = &resp->queue_id0;
7186 for (i = 0, j = 0; i < bp->max_tc; i++) {
7187 bp->q_info[j].queue_id = *qptr;
7188 bp->q_ids[i] = *qptr++;
7189 bp->q_info[j].queue_profile = *qptr++;
7190 bp->tc_to_qidx[j] = j;
7191 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7192 (no_rdma && BNXT_PF(bp)))
7193 j++;
7194 }
7195 bp->max_q = bp->max_tc;
7196 bp->max_tc = max_t(u8, j, 1);
7197
7198 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7199 bp->max_tc = 1;
7200
7201 if (bp->max_lltc > bp->max_tc)
7202 bp->max_lltc = bp->max_tc;
7203
7204 qportcfg_exit:
7205 mutex_unlock(&bp->hwrm_cmd_lock);
7206 return rc;
7207 }
7208
7209 static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
7210 {
7211 struct hwrm_ver_get_input req = {0};
7212 int rc;
7213
7214 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
7215 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
7216 req.hwrm_intf_min = HWRM_VERSION_MINOR;
7217 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
7218
7219 rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT,
7220 silent);
7221 return rc;
7222 }
7223
7224 static int bnxt_hwrm_ver_get(struct bnxt *bp)
7225 {
7226 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
7227 u32 dev_caps_cfg;
7228 int rc;
7229
7230 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
7231 mutex_lock(&bp->hwrm_cmd_lock);
7232 rc = __bnxt_hwrm_ver_get(bp, false);
7233 if (rc)
7234 goto hwrm_ver_get_exit;
7235
7236 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7237
7238 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7239 resp->hwrm_intf_min_8b << 8 |
7240 resp->hwrm_intf_upd_8b;
7241 if (resp->hwrm_intf_maj_8b < 1) {
7242 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
7243 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7244 resp->hwrm_intf_upd_8b);
7245 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
7246 }
7247 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
7248 resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
7249 resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
7250
7251 if (strlen(resp->active_pkg_name)) {
7252 int fw_ver_len = strlen(bp->fw_ver_str);
7253
7254 snprintf(bp->fw_ver_str + fw_ver_len,
7255 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
7256 resp->active_pkg_name);
7257 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
7258 }
7259
7260 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
7261 if (!bp->hwrm_cmd_timeout)
7262 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
7263
7264 if (resp->hwrm_intf_maj_8b >= 1) {
7265 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
7266 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
7267 }
7268 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
7269 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
7270
7271 bp->chip_num = le16_to_cpu(resp->chip_num);
7272 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
7273 !resp->chip_metal)
7274 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
7275
7276 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
7277 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
7278 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
7279 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
7280
7281 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
7282 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
7283
7284 if (dev_caps_cfg &
7285 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
7286 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
7287
7288 if (dev_caps_cfg &
7289 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
7290 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
7291
7292 if (dev_caps_cfg &
7293 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
7294 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
7295
7296 hwrm_ver_get_exit:
7297 mutex_unlock(&bp->hwrm_cmd_lock);
7298 return rc;
7299 }
7300
7301 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
7302 {
7303 struct hwrm_fw_set_time_input req = {0};
7304 struct tm tm;
7305 time64_t now = ktime_get_real_seconds();
7306
7307 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
7308 bp->hwrm_spec_code < 0x10400)
7309 return -EOPNOTSUPP;
7310
7311 time64_to_tm(now, 0, &tm);
7312 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
7313 req.year = cpu_to_le16(1900 + tm.tm_year);
7314 req.month = 1 + tm.tm_mon;
7315 req.day = tm.tm_mday;
7316 req.hour = tm.tm_hour;
7317 req.minute = tm.tm_min;
7318 req.second = tm.tm_sec;
7319 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7320 }
7321
7322 static int bnxt_hwrm_port_qstats(struct bnxt *bp)
7323 {
7324 int rc;
7325 struct bnxt_pf_info *pf = &bp->pf;
7326 struct hwrm_port_qstats_input req = {0};
7327
7328 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
7329 return 0;
7330
7331 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
7332 req.port_id = cpu_to_le16(pf->port_id);
7333 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
7334 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
7335 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7336 return rc;
7337 }
7338
7339 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
7340 {
7341 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
7342 struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
7343 struct hwrm_port_qstats_ext_input req = {0};
7344 struct bnxt_pf_info *pf = &bp->pf;
7345 u32 tx_stat_size;
7346 int rc;
7347
7348 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
7349 return 0;
7350
7351 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
7352 req.port_id = cpu_to_le16(pf->port_id);
7353 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
7354 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
7355 tx_stat_size = bp->hw_tx_port_stats_ext ?
7356 sizeof(*bp->hw_tx_port_stats_ext) : 0;
7357 req.tx_stat_size = cpu_to_le16(tx_stat_size);
7358 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
7359 mutex_lock(&bp->hwrm_cmd_lock);
7360 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7361 if (!rc) {
7362 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
7363 bp->fw_tx_stats_ext_size = tx_stat_size ?
7364 le16_to_cpu(resp->tx_stat_size) / 8 : 0;
7365 } else {
7366 bp->fw_rx_stats_ext_size = 0;
7367 bp->fw_tx_stats_ext_size = 0;
7368 }
7369 if (bp->fw_tx_stats_ext_size <=
7370 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
7371 mutex_unlock(&bp->hwrm_cmd_lock);
7372 bp->pri2cos_valid = 0;
7373 return rc;
7374 }
7375
7376 bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
7377 req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
7378
7379 rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
7380 if (!rc) {
7381 struct hwrm_queue_pri2cos_qcfg_output *resp2;
7382 u8 *pri2cos;
7383 int i, j;
7384
7385 resp2 = bp->hwrm_cmd_resp_addr;
7386 pri2cos = &resp2->pri0_cos_queue_id;
7387 for (i = 0; i < 8; i++) {
7388 u8 queue_id = pri2cos[i];
7389
7390 for (j = 0; j < bp->max_q; j++) {
7391 if (bp->q_ids[j] == queue_id)
7392 bp->pri2cos[i] = j;
7393 }
7394 }
7395 bp->pri2cos_valid = 1;
7396 }
7397 mutex_unlock(&bp->hwrm_cmd_lock);
7398 return rc;
7399 }
7400
7401 static int bnxt_hwrm_pcie_qstats(struct bnxt *bp)
7402 {
7403 struct hwrm_pcie_qstats_input req = {0};
7404
7405 if (!(bp->flags & BNXT_FLAG_PCIE_STATS))
7406 return 0;
7407
7408 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
7409 req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats));
7410 req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map);
7411 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7412 }
7413
7414 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
7415 {
7416 if (bp->vxlan_port_cnt) {
7417 bnxt_hwrm_tunnel_dst_port_free(
7418 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
7419 }
7420 bp->vxlan_port_cnt = 0;
7421 if (bp->nge_port_cnt) {
7422 bnxt_hwrm_tunnel_dst_port_free(
7423 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
7424 }
7425 bp->nge_port_cnt = 0;
7426 }
7427
7428 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
7429 {
7430 int rc, i;
7431 u32 tpa_flags = 0;
7432
7433 if (set_tpa)
7434 tpa_flags = bp->flags & BNXT_FLAG_TPA;
7435 else if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
7436 return 0;
7437 for (i = 0; i < bp->nr_vnics; i++) {
7438 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
7439 if (rc) {
7440 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
7441 i, rc);
7442 return rc;
7443 }
7444 }
7445 return 0;
7446 }
7447
7448 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
7449 {
7450 int i;
7451
7452 for (i = 0; i < bp->nr_vnics; i++)
7453 bnxt_hwrm_vnic_set_rss(bp, i, false);
7454 }
7455
7456 static void bnxt_clear_vnic(struct bnxt *bp)
7457 {
7458 if (!bp->vnic_info)
7459 return;
7460
7461 bnxt_hwrm_clear_vnic_filter(bp);
7462 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
7463 /* clear all RSS setting before free vnic ctx */
7464 bnxt_hwrm_clear_vnic_rss(bp);
7465 bnxt_hwrm_vnic_ctx_free(bp);
7466 }
7467 /* before free the vnic, undo the vnic tpa settings */
7468 if (bp->flags & BNXT_FLAG_TPA)
7469 bnxt_set_tpa(bp, false);
7470 bnxt_hwrm_vnic_free(bp);
7471 if (bp->flags & BNXT_FLAG_CHIP_P5)
7472 bnxt_hwrm_vnic_ctx_free(bp);
7473 }
7474
7475 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
7476 bool irq_re_init)
7477 {
7478 bnxt_clear_vnic(bp);
7479 bnxt_hwrm_ring_free(bp, close_path);
7480 bnxt_hwrm_ring_grp_free(bp);
7481 if (irq_re_init) {
7482 bnxt_hwrm_stat_ctx_free(bp);
7483 bnxt_hwrm_free_tunnel_ports(bp);
7484 }
7485 }
7486
7487 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
7488 {
7489 struct hwrm_func_cfg_input req = {0};
7490 int rc;
7491
7492 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
7493 req.fid = cpu_to_le16(0xffff);
7494 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
7495 if (br_mode == BRIDGE_MODE_VEB)
7496 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
7497 else if (br_mode == BRIDGE_MODE_VEPA)
7498 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
7499 else
7500 return -EINVAL;
7501 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7502 return rc;
7503 }
7504
7505 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
7506 {
7507 struct hwrm_func_cfg_input req = {0};
7508 int rc;
7509
7510 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
7511 return 0;
7512
7513 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
7514 req.fid = cpu_to_le16(0xffff);
7515 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
7516 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
7517 if (size == 128)
7518 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
7519
7520 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7521 return rc;
7522 }
7523
7524 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
7525 {
7526 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
7527 int rc;
7528
7529 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
7530 goto skip_rss_ctx;
7531
7532 /* allocate context for vnic */
7533 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
7534 if (rc) {
7535 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
7536 vnic_id, rc);
7537 goto vnic_setup_err;
7538 }
7539 bp->rsscos_nr_ctxs++;
7540
7541 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7542 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
7543 if (rc) {
7544 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
7545 vnic_id, rc);
7546 goto vnic_setup_err;
7547 }
7548 bp->rsscos_nr_ctxs++;
7549 }
7550
7551 skip_rss_ctx:
7552 /* configure default vnic, ring grp */
7553 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
7554 if (rc) {
7555 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
7556 vnic_id, rc);
7557 goto vnic_setup_err;
7558 }
7559
7560 /* Enable RSS hashing on vnic */
7561 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
7562 if (rc) {
7563 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
7564 vnic_id, rc);
7565 goto vnic_setup_err;
7566 }
7567
7568 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7569 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
7570 if (rc) {
7571 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
7572 vnic_id, rc);
7573 }
7574 }
7575
7576 vnic_setup_err:
7577 return rc;
7578 }
7579
7580 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
7581 {
7582 int rc, i, nr_ctxs;
7583
7584 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
7585 for (i = 0; i < nr_ctxs; i++) {
7586 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
7587 if (rc) {
7588 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
7589 vnic_id, i, rc);
7590 break;
7591 }
7592 bp->rsscos_nr_ctxs++;
7593 }
7594 if (i < nr_ctxs)
7595 return -ENOMEM;
7596
7597 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
7598 if (rc) {
7599 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
7600 vnic_id, rc);
7601 return rc;
7602 }
7603 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
7604 if (rc) {
7605 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
7606 vnic_id, rc);
7607 return rc;
7608 }
7609 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7610 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
7611 if (rc) {
7612 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
7613 vnic_id, rc);
7614 }
7615 }
7616 return rc;
7617 }
7618
7619 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
7620 {
7621 if (bp->flags & BNXT_FLAG_CHIP_P5)
7622 return __bnxt_setup_vnic_p5(bp, vnic_id);
7623 else
7624 return __bnxt_setup_vnic(bp, vnic_id);
7625 }
7626
7627 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
7628 {
7629 #ifdef CONFIG_RFS_ACCEL
7630 int i, rc = 0;
7631
7632 if (bp->flags & BNXT_FLAG_CHIP_P5)
7633 return 0;
7634
7635 for (i = 0; i < bp->rx_nr_rings; i++) {
7636 struct bnxt_vnic_info *vnic;
7637 u16 vnic_id = i + 1;
7638 u16 ring_id = i;
7639
7640 if (vnic_id >= bp->nr_vnics)
7641 break;
7642
7643 vnic = &bp->vnic_info[vnic_id];
7644 vnic->flags |= BNXT_VNIC_RFS_FLAG;
7645 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
7646 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
7647 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
7648 if (rc) {
7649 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
7650 vnic_id, rc);
7651 break;
7652 }
7653 rc = bnxt_setup_vnic(bp, vnic_id);
7654 if (rc)
7655 break;
7656 }
7657 return rc;
7658 #else
7659 return 0;
7660 #endif
7661 }
7662
7663 /* Allow PF and VF with default VLAN to be in promiscuous mode */
7664 static bool bnxt_promisc_ok(struct bnxt *bp)
7665 {
7666 #ifdef CONFIG_BNXT_SRIOV
7667 if (BNXT_VF(bp) && !bp->vf.vlan)
7668 return false;
7669 #endif
7670 return true;
7671 }
7672
7673 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
7674 {
7675 unsigned int rc = 0;
7676
7677 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
7678 if (rc) {
7679 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7680 rc);
7681 return rc;
7682 }
7683
7684 rc = bnxt_hwrm_vnic_cfg(bp, 1);
7685 if (rc) {
7686 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7687 rc);
7688 return rc;
7689 }
7690 return rc;
7691 }
7692
7693 static int bnxt_cfg_rx_mode(struct bnxt *);
7694 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
7695
7696 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
7697 {
7698 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
7699 int rc = 0;
7700 unsigned int rx_nr_rings = bp->rx_nr_rings;
7701
7702 if (irq_re_init) {
7703 rc = bnxt_hwrm_stat_ctx_alloc(bp);
7704 if (rc) {
7705 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
7706 rc);
7707 goto err_out;
7708 }
7709 }
7710
7711 rc = bnxt_hwrm_ring_alloc(bp);
7712 if (rc) {
7713 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
7714 goto err_out;
7715 }
7716
7717 rc = bnxt_hwrm_ring_grp_alloc(bp);
7718 if (rc) {
7719 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
7720 goto err_out;
7721 }
7722
7723 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7724 rx_nr_rings--;
7725
7726 /* default vnic 0 */
7727 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
7728 if (rc) {
7729 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
7730 goto err_out;
7731 }
7732
7733 rc = bnxt_setup_vnic(bp, 0);
7734 if (rc)
7735 goto err_out;
7736
7737 if (bp->flags & BNXT_FLAG_RFS) {
7738 rc = bnxt_alloc_rfs_vnics(bp);
7739 if (rc)
7740 goto err_out;
7741 }
7742
7743 if (bp->flags & BNXT_FLAG_TPA) {
7744 rc = bnxt_set_tpa(bp, true);
7745 if (rc)
7746 goto err_out;
7747 }
7748
7749 if (BNXT_VF(bp))
7750 bnxt_update_vf_mac(bp);
7751
7752 /* Filter for default vnic 0 */
7753 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
7754 if (rc) {
7755 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
7756 goto err_out;
7757 }
7758 vnic->uc_filter_count = 1;
7759
7760 vnic->rx_mask = 0;
7761 if (bp->dev->flags & IFF_BROADCAST)
7762 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
7763
7764 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
7765 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
7766
7767 if (bp->dev->flags & IFF_ALLMULTI) {
7768 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
7769 vnic->mc_list_count = 0;
7770 } else {
7771 u32 mask = 0;
7772
7773 bnxt_mc_list_updated(bp, &mask);
7774 vnic->rx_mask |= mask;
7775 }
7776
7777 rc = bnxt_cfg_rx_mode(bp);
7778 if (rc)
7779 goto err_out;
7780
7781 rc = bnxt_hwrm_set_coal(bp);
7782 if (rc)
7783 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
7784 rc);
7785
7786 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7787 rc = bnxt_setup_nitroa0_vnic(bp);
7788 if (rc)
7789 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
7790 rc);
7791 }
7792
7793 if (BNXT_VF(bp)) {
7794 bnxt_hwrm_func_qcfg(bp);
7795 netdev_update_features(bp->dev);
7796 }
7797
7798 return 0;
7799
7800 err_out:
7801 bnxt_hwrm_resource_free(bp, 0, true);
7802
7803 return rc;
7804 }
7805
7806 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
7807 {
7808 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
7809 return 0;
7810 }
7811
7812 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
7813 {
7814 bnxt_init_cp_rings(bp);
7815 bnxt_init_rx_rings(bp);
7816 bnxt_init_tx_rings(bp);
7817 bnxt_init_ring_grps(bp, irq_re_init);
7818 bnxt_init_vnics(bp);
7819
7820 return bnxt_init_chip(bp, irq_re_init);
7821 }
7822
7823 static int bnxt_set_real_num_queues(struct bnxt *bp)
7824 {
7825 int rc;
7826 struct net_device *dev = bp->dev;
7827
7828 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
7829 bp->tx_nr_rings_xdp);
7830 if (rc)
7831 return rc;
7832
7833 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
7834 if (rc)
7835 return rc;
7836
7837 #ifdef CONFIG_RFS_ACCEL
7838 if (bp->flags & BNXT_FLAG_RFS)
7839 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
7840 #endif
7841
7842 return rc;
7843 }
7844
7845 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7846 bool shared)
7847 {
7848 int _rx = *rx, _tx = *tx;
7849
7850 if (shared) {
7851 *rx = min_t(int, _rx, max);
7852 *tx = min_t(int, _tx, max);
7853 } else {
7854 if (max < 2)
7855 return -ENOMEM;
7856
7857 while (_rx + _tx > max) {
7858 if (_rx > _tx && _rx > 1)
7859 _rx--;
7860 else if (_tx > 1)
7861 _tx--;
7862 }
7863 *rx = _rx;
7864 *tx = _tx;
7865 }
7866 return 0;
7867 }
7868
7869 static void bnxt_setup_msix(struct bnxt *bp)
7870 {
7871 const int len = sizeof(bp->irq_tbl[0].name);
7872 struct net_device *dev = bp->dev;
7873 int tcs, i;
7874
7875 tcs = netdev_get_num_tc(dev);
7876 if (tcs > 1) {
7877 int i, off, count;
7878
7879 for (i = 0; i < tcs; i++) {
7880 count = bp->tx_nr_rings_per_tc;
7881 off = i * count;
7882 netdev_set_tc_queue(dev, i, count, off);
7883 }
7884 }
7885
7886 for (i = 0; i < bp->cp_nr_rings; i++) {
7887 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7888 char *attr;
7889
7890 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7891 attr = "TxRx";
7892 else if (i < bp->rx_nr_rings)
7893 attr = "rx";
7894 else
7895 attr = "tx";
7896
7897 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
7898 attr, i);
7899 bp->irq_tbl[map_idx].handler = bnxt_msix;
7900 }
7901 }
7902
7903 static void bnxt_setup_inta(struct bnxt *bp)
7904 {
7905 const int len = sizeof(bp->irq_tbl[0].name);
7906
7907 if (netdev_get_num_tc(bp->dev))
7908 netdev_reset_tc(bp->dev);
7909
7910 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
7911 0);
7912 bp->irq_tbl[0].handler = bnxt_inta;
7913 }
7914
7915 static int bnxt_setup_int_mode(struct bnxt *bp)
7916 {
7917 int rc;
7918
7919 if (bp->flags & BNXT_FLAG_USING_MSIX)
7920 bnxt_setup_msix(bp);
7921 else
7922 bnxt_setup_inta(bp);
7923
7924 rc = bnxt_set_real_num_queues(bp);
7925 return rc;
7926 }
7927
7928 #ifdef CONFIG_RFS_ACCEL
7929 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
7930 {
7931 return bp->hw_resc.max_rsscos_ctxs;
7932 }
7933
7934 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
7935 {
7936 return bp->hw_resc.max_vnics;
7937 }
7938 #endif
7939
7940 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
7941 {
7942 return bp->hw_resc.max_stat_ctxs;
7943 }
7944
7945 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
7946 {
7947 return bp->hw_resc.max_cp_rings;
7948 }
7949
7950 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
7951 {
7952 unsigned int cp = bp->hw_resc.max_cp_rings;
7953
7954 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
7955 cp -= bnxt_get_ulp_msix_num(bp);
7956
7957 return cp;
7958 }
7959
7960 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
7961 {
7962 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7963
7964 if (bp->flags & BNXT_FLAG_CHIP_P5)
7965 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
7966
7967 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
7968 }
7969
7970 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
7971 {
7972 bp->hw_resc.max_irqs = max_irqs;
7973 }
7974
7975 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
7976 {
7977 unsigned int cp;
7978
7979 cp = bnxt_get_max_func_cp_rings_for_en(bp);
7980 if (bp->flags & BNXT_FLAG_CHIP_P5)
7981 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
7982 else
7983 return cp - bp->cp_nr_rings;
7984 }
7985
7986 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
7987 {
7988 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
7989 }
7990
7991 int bnxt_get_avail_msix(struct bnxt *bp, int num)
7992 {
7993 int max_cp = bnxt_get_max_func_cp_rings(bp);
7994 int max_irq = bnxt_get_max_func_irqs(bp);
7995 int total_req = bp->cp_nr_rings + num;
7996 int max_idx, avail_msix;
7997
7998 max_idx = bp->total_irqs;
7999 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8000 max_idx = min_t(int, bp->total_irqs, max_cp);
8001 avail_msix = max_idx - bp->cp_nr_rings;
8002 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
8003 return avail_msix;
8004
8005 if (max_irq < total_req) {
8006 num = max_irq - bp->cp_nr_rings;
8007 if (num <= 0)
8008 return 0;
8009 }
8010 return num;
8011 }
8012
8013 static int bnxt_get_num_msix(struct bnxt *bp)
8014 {
8015 if (!BNXT_NEW_RM(bp))
8016 return bnxt_get_max_func_irqs(bp);
8017
8018 return bnxt_nq_rings_in_use(bp);
8019 }
8020
8021 static int bnxt_init_msix(struct bnxt *bp)
8022 {
8023 int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
8024 struct msix_entry *msix_ent;
8025
8026 total_vecs = bnxt_get_num_msix(bp);
8027 max = bnxt_get_max_func_irqs(bp);
8028 if (total_vecs > max)
8029 total_vecs = max;
8030
8031 if (!total_vecs)
8032 return 0;
8033
8034 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8035 if (!msix_ent)
8036 return -ENOMEM;
8037
8038 for (i = 0; i < total_vecs; i++) {
8039 msix_ent[i].entry = i;
8040 msix_ent[i].vector = 0;
8041 }
8042
8043 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8044 min = 2;
8045
8046 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
8047 ulp_msix = bnxt_get_ulp_msix_num(bp);
8048 if (total_vecs < 0 || total_vecs < ulp_msix) {
8049 rc = -ENODEV;
8050 goto msix_setup_exit;
8051 }
8052
8053 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8054 if (bp->irq_tbl) {
8055 for (i = 0; i < total_vecs; i++)
8056 bp->irq_tbl[i].vector = msix_ent[i].vector;
8057
8058 bp->total_irqs = total_vecs;
8059 /* Trim rings based upon num of vectors allocated */
8060 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
8061 total_vecs - ulp_msix, min == 1);
8062 if (rc)
8063 goto msix_setup_exit;
8064
8065 bp->cp_nr_rings = (min == 1) ?
8066 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8067 bp->tx_nr_rings + bp->rx_nr_rings;
8068
8069 } else {
8070 rc = -ENOMEM;
8071 goto msix_setup_exit;
8072 }
8073 bp->flags |= BNXT_FLAG_USING_MSIX;
8074 kfree(msix_ent);
8075 return 0;
8076
8077 msix_setup_exit:
8078 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8079 kfree(bp->irq_tbl);
8080 bp->irq_tbl = NULL;
8081 pci_disable_msix(bp->pdev);
8082 kfree(msix_ent);
8083 return rc;
8084 }
8085
8086 static int bnxt_init_inta(struct bnxt *bp)
8087 {
8088 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
8089 if (!bp->irq_tbl)
8090 return -ENOMEM;
8091
8092 bp->total_irqs = 1;
8093 bp->rx_nr_rings = 1;
8094 bp->tx_nr_rings = 1;
8095 bp->cp_nr_rings = 1;
8096 bp->flags |= BNXT_FLAG_SHARED_RINGS;
8097 bp->irq_tbl[0].vector = bp->pdev->irq;
8098 return 0;
8099 }
8100
8101 static int bnxt_init_int_mode(struct bnxt *bp)
8102 {
8103 int rc = 0;
8104
8105 if (bp->flags & BNXT_FLAG_MSIX_CAP)
8106 rc = bnxt_init_msix(bp);
8107
8108 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
8109 /* fallback to INTA */
8110 rc = bnxt_init_inta(bp);
8111 }
8112 return rc;
8113 }
8114
8115 static void bnxt_clear_int_mode(struct bnxt *bp)
8116 {
8117 if (bp->flags & BNXT_FLAG_USING_MSIX)
8118 pci_disable_msix(bp->pdev);
8119
8120 kfree(bp->irq_tbl);
8121 bp->irq_tbl = NULL;
8122 bp->flags &= ~BNXT_FLAG_USING_MSIX;
8123 }
8124
8125 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
8126 {
8127 int tcs = netdev_get_num_tc(bp->dev);
8128 bool irq_cleared = false;
8129 int rc;
8130
8131 if (!bnxt_need_reserve_rings(bp))
8132 return 0;
8133
8134 if (irq_re_init && BNXT_NEW_RM(bp) &&
8135 bnxt_get_num_msix(bp) != bp->total_irqs) {
8136 bnxt_ulp_irq_stop(bp);
8137 bnxt_clear_int_mode(bp);
8138 irq_cleared = true;
8139 }
8140 rc = __bnxt_reserve_rings(bp);
8141 if (irq_cleared) {
8142 if (!rc)
8143 rc = bnxt_init_int_mode(bp);
8144 bnxt_ulp_irq_restart(bp, rc);
8145 }
8146 if (rc) {
8147 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
8148 return rc;
8149 }
8150 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
8151 netdev_err(bp->dev, "tx ring reservation failure\n");
8152 netdev_reset_tc(bp->dev);
8153 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8154 return -ENOMEM;
8155 }
8156 return 0;
8157 }
8158
8159 static void bnxt_free_irq(struct bnxt *bp)
8160 {
8161 struct bnxt_irq *irq;
8162 int i;
8163
8164 #ifdef CONFIG_RFS_ACCEL
8165 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
8166 bp->dev->rx_cpu_rmap = NULL;
8167 #endif
8168 if (!bp->irq_tbl || !bp->bnapi)
8169 return;
8170
8171 for (i = 0; i < bp->cp_nr_rings; i++) {
8172 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8173
8174 irq = &bp->irq_tbl[map_idx];
8175 if (irq->requested) {
8176 if (irq->have_cpumask) {
8177 irq_set_affinity_hint(irq->vector, NULL);
8178 free_cpumask_var(irq->cpu_mask);
8179 irq->have_cpumask = 0;
8180 }
8181 free_irq(irq->vector, bp->bnapi[i]);
8182 }
8183
8184 irq->requested = 0;
8185 }
8186 }
8187
8188 static int bnxt_request_irq(struct bnxt *bp)
8189 {
8190 int i, j, rc = 0;
8191 unsigned long flags = 0;
8192 #ifdef CONFIG_RFS_ACCEL
8193 struct cpu_rmap *rmap;
8194 #endif
8195
8196 rc = bnxt_setup_int_mode(bp);
8197 if (rc) {
8198 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
8199 rc);
8200 return rc;
8201 }
8202 #ifdef CONFIG_RFS_ACCEL
8203 rmap = bp->dev->rx_cpu_rmap;
8204 #endif
8205 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
8206 flags = IRQF_SHARED;
8207
8208 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
8209 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8210 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
8211
8212 #ifdef CONFIG_RFS_ACCEL
8213 if (rmap && bp->bnapi[i]->rx_ring) {
8214 rc = irq_cpu_rmap_add(rmap, irq->vector);
8215 if (rc)
8216 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
8217 j);
8218 j++;
8219 }
8220 #endif
8221 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
8222 bp->bnapi[i]);
8223 if (rc)
8224 break;
8225
8226 irq->requested = 1;
8227
8228 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
8229 int numa_node = dev_to_node(&bp->pdev->dev);
8230
8231 irq->have_cpumask = 1;
8232 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
8233 irq->cpu_mask);
8234 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
8235 if (rc) {
8236 netdev_warn(bp->dev,
8237 "Set affinity failed, IRQ = %d\n",
8238 irq->vector);
8239 break;
8240 }
8241 }
8242 }
8243 return rc;
8244 }
8245
8246 static void bnxt_del_napi(struct bnxt *bp)
8247 {
8248 int i;
8249
8250 if (!bp->bnapi)
8251 return;
8252
8253 for (i = 0; i < bp->cp_nr_rings; i++) {
8254 struct bnxt_napi *bnapi = bp->bnapi[i];
8255
8256 napi_hash_del(&bnapi->napi);
8257 netif_napi_del(&bnapi->napi);
8258 }
8259 /* We called napi_hash_del() before netif_napi_del(), we need
8260 * to respect an RCU grace period before freeing napi structures.
8261 */
8262 synchronize_net();
8263 }
8264
8265 static void bnxt_init_napi(struct bnxt *bp)
8266 {
8267 int i;
8268 unsigned int cp_nr_rings = bp->cp_nr_rings;
8269 struct bnxt_napi *bnapi;
8270
8271 if (bp->flags & BNXT_FLAG_USING_MSIX) {
8272 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
8273
8274 if (bp->flags & BNXT_FLAG_CHIP_P5)
8275 poll_fn = bnxt_poll_p5;
8276 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8277 cp_nr_rings--;
8278 for (i = 0; i < cp_nr_rings; i++) {
8279 bnapi = bp->bnapi[i];
8280 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
8281 }
8282 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8283 bnapi = bp->bnapi[cp_nr_rings];
8284 netif_napi_add(bp->dev, &bnapi->napi,
8285 bnxt_poll_nitroa0, 64);
8286 }
8287 } else {
8288 bnapi = bp->bnapi[0];
8289 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
8290 }
8291 }
8292
8293 static void bnxt_disable_napi(struct bnxt *bp)
8294 {
8295 int i;
8296
8297 if (!bp->bnapi)
8298 return;
8299
8300 for (i = 0; i < bp->cp_nr_rings; i++) {
8301 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
8302
8303 if (bp->bnapi[i]->rx_ring)
8304 cancel_work_sync(&cpr->dim.work);
8305
8306 napi_disable(&bp->bnapi[i]->napi);
8307 }
8308 }
8309
8310 static void bnxt_enable_napi(struct bnxt *bp)
8311 {
8312 int i;
8313
8314 for (i = 0; i < bp->cp_nr_rings; i++) {
8315 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
8316 bp->bnapi[i]->in_reset = false;
8317
8318 if (bp->bnapi[i]->rx_ring) {
8319 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
8320 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
8321 }
8322 napi_enable(&bp->bnapi[i]->napi);
8323 }
8324 }
8325
8326 void bnxt_tx_disable(struct bnxt *bp)
8327 {
8328 int i;
8329 struct bnxt_tx_ring_info *txr;
8330
8331 if (bp->tx_ring) {
8332 for (i = 0; i < bp->tx_nr_rings; i++) {
8333 txr = &bp->tx_ring[i];
8334 txr->dev_state = BNXT_DEV_STATE_CLOSING;
8335 }
8336 }
8337 /* Stop all TX queues */
8338 netif_tx_disable(bp->dev);
8339 netif_carrier_off(bp->dev);
8340 }
8341
8342 void bnxt_tx_enable(struct bnxt *bp)
8343 {
8344 int i;
8345 struct bnxt_tx_ring_info *txr;
8346
8347 for (i = 0; i < bp->tx_nr_rings; i++) {
8348 txr = &bp->tx_ring[i];
8349 txr->dev_state = 0;
8350 }
8351 netif_tx_wake_all_queues(bp->dev);
8352 if (bp->link_info.link_up)
8353 netif_carrier_on(bp->dev);
8354 }
8355
8356 static void bnxt_report_link(struct bnxt *bp)
8357 {
8358 if (bp->link_info.link_up) {
8359 const char *duplex;
8360 const char *flow_ctrl;
8361 u32 speed;
8362 u16 fec;
8363
8364 netif_carrier_on(bp->dev);
8365 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
8366 duplex = "full";
8367 else
8368 duplex = "half";
8369 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
8370 flow_ctrl = "ON - receive & transmit";
8371 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
8372 flow_ctrl = "ON - transmit";
8373 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
8374 flow_ctrl = "ON - receive";
8375 else
8376 flow_ctrl = "none";
8377 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
8378 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
8379 speed, duplex, flow_ctrl);
8380 if (bp->flags & BNXT_FLAG_EEE_CAP)
8381 netdev_info(bp->dev, "EEE is %s\n",
8382 bp->eee.eee_active ? "active" :
8383 "not active");
8384 fec = bp->link_info.fec_cfg;
8385 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
8386 netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
8387 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
8388 (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
8389 (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
8390 } else {
8391 netif_carrier_off(bp->dev);
8392 netdev_err(bp->dev, "NIC Link is Down\n");
8393 }
8394 }
8395
8396 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
8397 {
8398 int rc = 0;
8399 struct hwrm_port_phy_qcaps_input req = {0};
8400 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
8401 struct bnxt_link_info *link_info = &bp->link_info;
8402
8403 bp->flags &= ~BNXT_FLAG_EEE_CAP;
8404 if (bp->test_info)
8405 bp->test_info->flags &= ~BNXT_TEST_FL_EXT_LPBK;
8406 if (bp->hwrm_spec_code < 0x10201)
8407 return 0;
8408
8409 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
8410
8411 mutex_lock(&bp->hwrm_cmd_lock);
8412 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8413 if (rc)
8414 goto hwrm_phy_qcaps_exit;
8415
8416 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
8417 struct ethtool_eee *eee = &bp->eee;
8418 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
8419
8420 bp->flags |= BNXT_FLAG_EEE_CAP;
8421 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8422 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
8423 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
8424 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
8425 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
8426 }
8427 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
8428 if (bp->test_info)
8429 bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
8430 }
8431 if (resp->supported_speeds_auto_mode)
8432 link_info->support_auto_speeds =
8433 le16_to_cpu(resp->supported_speeds_auto_mode);
8434
8435 bp->port_count = resp->port_cnt;
8436
8437 hwrm_phy_qcaps_exit:
8438 mutex_unlock(&bp->hwrm_cmd_lock);
8439 return rc;
8440 }
8441
8442 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
8443 {
8444 int rc = 0;
8445 struct bnxt_link_info *link_info = &bp->link_info;
8446 struct hwrm_port_phy_qcfg_input req = {0};
8447 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8448 u8 link_up = link_info->link_up;
8449 u16 diff;
8450
8451 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
8452
8453 mutex_lock(&bp->hwrm_cmd_lock);
8454 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8455 if (rc) {
8456 mutex_unlock(&bp->hwrm_cmd_lock);
8457 return rc;
8458 }
8459
8460 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
8461 link_info->phy_link_status = resp->link;
8462 link_info->duplex = resp->duplex_cfg;
8463 if (bp->hwrm_spec_code >= 0x10800)
8464 link_info->duplex = resp->duplex_state;
8465 link_info->pause = resp->pause;
8466 link_info->auto_mode = resp->auto_mode;
8467 link_info->auto_pause_setting = resp->auto_pause;
8468 link_info->lp_pause = resp->link_partner_adv_pause;
8469 link_info->force_pause_setting = resp->force_pause;
8470 link_info->duplex_setting = resp->duplex_cfg;
8471 if (link_info->phy_link_status == BNXT_LINK_LINK)
8472 link_info->link_speed = le16_to_cpu(resp->link_speed);
8473 else
8474 link_info->link_speed = 0;
8475 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
8476 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
8477 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
8478 link_info->lp_auto_link_speeds =
8479 le16_to_cpu(resp->link_partner_adv_speeds);
8480 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
8481 link_info->phy_ver[0] = resp->phy_maj;
8482 link_info->phy_ver[1] = resp->phy_min;
8483 link_info->phy_ver[2] = resp->phy_bld;
8484 link_info->media_type = resp->media_type;
8485 link_info->phy_type = resp->phy_type;
8486 link_info->transceiver = resp->xcvr_pkg_type;
8487 link_info->phy_addr = resp->eee_config_phy_addr &
8488 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
8489 link_info->module_status = resp->module_status;
8490
8491 if (bp->flags & BNXT_FLAG_EEE_CAP) {
8492 struct ethtool_eee *eee = &bp->eee;
8493 u16 fw_speeds;
8494
8495 eee->eee_active = 0;
8496 if (resp->eee_config_phy_addr &
8497 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
8498 eee->eee_active = 1;
8499 fw_speeds = le16_to_cpu(
8500 resp->link_partner_adv_eee_link_speed_mask);
8501 eee->lp_advertised =
8502 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8503 }
8504
8505 /* Pull initial EEE config */
8506 if (!chng_link_state) {
8507 if (resp->eee_config_phy_addr &
8508 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
8509 eee->eee_enabled = 1;
8510
8511 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
8512 eee->advertised =
8513 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8514
8515 if (resp->eee_config_phy_addr &
8516 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
8517 __le32 tmr;
8518
8519 eee->tx_lpi_enabled = 1;
8520 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
8521 eee->tx_lpi_timer = le32_to_cpu(tmr) &
8522 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
8523 }
8524 }
8525 }
8526
8527 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
8528 if (bp->hwrm_spec_code >= 0x10504)
8529 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
8530
8531 /* TODO: need to add more logic to report VF link */
8532 if (chng_link_state) {
8533 if (link_info->phy_link_status == BNXT_LINK_LINK)
8534 link_info->link_up = 1;
8535 else
8536 link_info->link_up = 0;
8537 if (link_up != link_info->link_up)
8538 bnxt_report_link(bp);
8539 } else {
8540 /* alwasy link down if not require to update link state */
8541 link_info->link_up = 0;
8542 }
8543 mutex_unlock(&bp->hwrm_cmd_lock);
8544
8545 if (!BNXT_SINGLE_PF(bp))
8546 return 0;
8547
8548 diff = link_info->support_auto_speeds ^ link_info->advertising;
8549 if ((link_info->support_auto_speeds | diff) !=
8550 link_info->support_auto_speeds) {
8551 /* An advertised speed is no longer supported, so we need to
8552 * update the advertisement settings. Caller holds RTNL
8553 * so we can modify link settings.
8554 */
8555 link_info->advertising = link_info->support_auto_speeds;
8556 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
8557 bnxt_hwrm_set_link_setting(bp, true, false);
8558 }
8559 return 0;
8560 }
8561
8562 static void bnxt_get_port_module_status(struct bnxt *bp)
8563 {
8564 struct bnxt_link_info *link_info = &bp->link_info;
8565 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
8566 u8 module_status;
8567
8568 if (bnxt_update_link(bp, true))
8569 return;
8570
8571 module_status = link_info->module_status;
8572 switch (module_status) {
8573 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
8574 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
8575 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
8576 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
8577 bp->pf.port_id);
8578 if (bp->hwrm_spec_code >= 0x10201) {
8579 netdev_warn(bp->dev, "Module part number %s\n",
8580 resp->phy_vendor_partnumber);
8581 }
8582 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
8583 netdev_warn(bp->dev, "TX is disabled\n");
8584 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
8585 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
8586 }
8587 }
8588
8589 static void
8590 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
8591 {
8592 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
8593 if (bp->hwrm_spec_code >= 0x10201)
8594 req->auto_pause =
8595 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
8596 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
8597 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
8598 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
8599 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
8600 req->enables |=
8601 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
8602 } else {
8603 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
8604 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
8605 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
8606 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
8607 req->enables |=
8608 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
8609 if (bp->hwrm_spec_code >= 0x10201) {
8610 req->auto_pause = req->force_pause;
8611 req->enables |= cpu_to_le32(
8612 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
8613 }
8614 }
8615 }
8616
8617 static void bnxt_hwrm_set_link_common(struct bnxt *bp,
8618 struct hwrm_port_phy_cfg_input *req)
8619 {
8620 u8 autoneg = bp->link_info.autoneg;
8621 u16 fw_link_speed = bp->link_info.req_link_speed;
8622 u16 advertising = bp->link_info.advertising;
8623
8624 if (autoneg & BNXT_AUTONEG_SPEED) {
8625 req->auto_mode |=
8626 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
8627
8628 req->enables |= cpu_to_le32(
8629 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
8630 req->auto_link_speed_mask = cpu_to_le16(advertising);
8631
8632 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
8633 req->flags |=
8634 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
8635 } else {
8636 req->force_link_speed = cpu_to_le16(fw_link_speed);
8637 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
8638 }
8639
8640 /* tell chimp that the setting takes effect immediately */
8641 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
8642 }
8643
8644 int bnxt_hwrm_set_pause(struct bnxt *bp)
8645 {
8646 struct hwrm_port_phy_cfg_input req = {0};
8647 int rc;
8648
8649 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8650 bnxt_hwrm_set_pause_common(bp, &req);
8651
8652 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
8653 bp->link_info.force_link_chng)
8654 bnxt_hwrm_set_link_common(bp, &req);
8655
8656 mutex_lock(&bp->hwrm_cmd_lock);
8657 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8658 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
8659 /* since changing of pause setting doesn't trigger any link
8660 * change event, the driver needs to update the current pause
8661 * result upon successfully return of the phy_cfg command
8662 */
8663 bp->link_info.pause =
8664 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
8665 bp->link_info.auto_pause_setting = 0;
8666 if (!bp->link_info.force_link_chng)
8667 bnxt_report_link(bp);
8668 }
8669 bp->link_info.force_link_chng = false;
8670 mutex_unlock(&bp->hwrm_cmd_lock);
8671 return rc;
8672 }
8673
8674 static void bnxt_hwrm_set_eee(struct bnxt *bp,
8675 struct hwrm_port_phy_cfg_input *req)
8676 {
8677 struct ethtool_eee *eee = &bp->eee;
8678
8679 if (eee->eee_enabled) {
8680 u16 eee_speeds;
8681 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
8682
8683 if (eee->tx_lpi_enabled)
8684 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
8685 else
8686 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
8687
8688 req->flags |= cpu_to_le32(flags);
8689 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
8690 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
8691 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
8692 } else {
8693 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
8694 }
8695 }
8696
8697 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
8698 {
8699 struct hwrm_port_phy_cfg_input req = {0};
8700
8701 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8702 if (set_pause)
8703 bnxt_hwrm_set_pause_common(bp, &req);
8704
8705 bnxt_hwrm_set_link_common(bp, &req);
8706
8707 if (set_eee)
8708 bnxt_hwrm_set_eee(bp, &req);
8709 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8710 }
8711
8712 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
8713 {
8714 struct hwrm_port_phy_cfg_input req = {0};
8715
8716 if (!BNXT_SINGLE_PF(bp))
8717 return 0;
8718
8719 if (pci_num_vf(bp->pdev))
8720 return 0;
8721
8722 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8723 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
8724 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8725 }
8726
8727 static int bnxt_fw_init_one(struct bnxt *bp);
8728
8729 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
8730 {
8731 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
8732 struct hwrm_func_drv_if_change_input req = {0};
8733 bool resc_reinit = false, fw_reset = false;
8734 u32 flags = 0;
8735 int rc;
8736
8737 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
8738 return 0;
8739
8740 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
8741 if (up)
8742 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
8743 mutex_lock(&bp->hwrm_cmd_lock);
8744 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8745 if (!rc)
8746 flags = le32_to_cpu(resp->flags);
8747 mutex_unlock(&bp->hwrm_cmd_lock);
8748 if (rc)
8749 return rc;
8750
8751 if (!up)
8752 return 0;
8753
8754 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
8755 resc_reinit = true;
8756 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
8757 fw_reset = true;
8758
8759 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
8760 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
8761 return -ENODEV;
8762 }
8763 if (resc_reinit || fw_reset) {
8764 if (fw_reset) {
8765 rc = bnxt_fw_init_one(bp);
8766 if (rc) {
8767 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
8768 return rc;
8769 }
8770 bnxt_clear_int_mode(bp);
8771 rc = bnxt_init_int_mode(bp);
8772 if (rc) {
8773 netdev_err(bp->dev, "init int mode failed\n");
8774 return rc;
8775 }
8776 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
8777 }
8778 if (BNXT_NEW_RM(bp)) {
8779 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8780
8781 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
8782 hw_resc->resv_cp_rings = 0;
8783 hw_resc->resv_stat_ctxs = 0;
8784 hw_resc->resv_irqs = 0;
8785 hw_resc->resv_tx_rings = 0;
8786 hw_resc->resv_rx_rings = 0;
8787 hw_resc->resv_hw_ring_grps = 0;
8788 hw_resc->resv_vnics = 0;
8789 if (!fw_reset) {
8790 bp->tx_nr_rings = 0;
8791 bp->rx_nr_rings = 0;
8792 }
8793 }
8794 }
8795 return 0;
8796 }
8797
8798 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
8799 {
8800 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
8801 struct hwrm_port_led_qcaps_input req = {0};
8802 struct bnxt_pf_info *pf = &bp->pf;
8803 int rc;
8804
8805 bp->num_leds = 0;
8806 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
8807 return 0;
8808
8809 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
8810 req.port_id = cpu_to_le16(pf->port_id);
8811 mutex_lock(&bp->hwrm_cmd_lock);
8812 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8813 if (rc) {
8814 mutex_unlock(&bp->hwrm_cmd_lock);
8815 return rc;
8816 }
8817 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
8818 int i;
8819
8820 bp->num_leds = resp->num_leds;
8821 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
8822 bp->num_leds);
8823 for (i = 0; i < bp->num_leds; i++) {
8824 struct bnxt_led_info *led = &bp->leds[i];
8825 __le16 caps = led->led_state_caps;
8826
8827 if (!led->led_group_id ||
8828 !BNXT_LED_ALT_BLINK_CAP(caps)) {
8829 bp->num_leds = 0;
8830 break;
8831 }
8832 }
8833 }
8834 mutex_unlock(&bp->hwrm_cmd_lock);
8835 return 0;
8836 }
8837
8838 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
8839 {
8840 struct hwrm_wol_filter_alloc_input req = {0};
8841 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
8842 int rc;
8843
8844 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
8845 req.port_id = cpu_to_le16(bp->pf.port_id);
8846 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
8847 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
8848 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
8849 mutex_lock(&bp->hwrm_cmd_lock);
8850 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8851 if (!rc)
8852 bp->wol_filter_id = resp->wol_filter_id;
8853 mutex_unlock(&bp->hwrm_cmd_lock);
8854 return rc;
8855 }
8856
8857 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
8858 {
8859 struct hwrm_wol_filter_free_input req = {0};
8860 int rc;
8861
8862 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
8863 req.port_id = cpu_to_le16(bp->pf.port_id);
8864 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
8865 req.wol_filter_id = bp->wol_filter_id;
8866 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8867 return rc;
8868 }
8869
8870 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
8871 {
8872 struct hwrm_wol_filter_qcfg_input req = {0};
8873 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8874 u16 next_handle = 0;
8875 int rc;
8876
8877 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
8878 req.port_id = cpu_to_le16(bp->pf.port_id);
8879 req.handle = cpu_to_le16(handle);
8880 mutex_lock(&bp->hwrm_cmd_lock);
8881 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8882 if (!rc) {
8883 next_handle = le16_to_cpu(resp->next_handle);
8884 if (next_handle != 0) {
8885 if (resp->wol_type ==
8886 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
8887 bp->wol = 1;
8888 bp->wol_filter_id = resp->wol_filter_id;
8889 }
8890 }
8891 }
8892 mutex_unlock(&bp->hwrm_cmd_lock);
8893 return next_handle;
8894 }
8895
8896 static void bnxt_get_wol_settings(struct bnxt *bp)
8897 {
8898 u16 handle = 0;
8899
8900 bp->wol = 0;
8901 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
8902 return;
8903
8904 do {
8905 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
8906 } while (handle && handle != 0xffff);
8907 }
8908
8909 #ifdef CONFIG_BNXT_HWMON
8910 static ssize_t bnxt_show_temp(struct device *dev,
8911 struct device_attribute *devattr, char *buf)
8912 {
8913 struct hwrm_temp_monitor_query_input req = {0};
8914 struct hwrm_temp_monitor_query_output *resp;
8915 struct bnxt *bp = dev_get_drvdata(dev);
8916 u32 temp = 0;
8917
8918 resp = bp->hwrm_cmd_resp_addr;
8919 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
8920 mutex_lock(&bp->hwrm_cmd_lock);
8921 if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
8922 temp = resp->temp * 1000; /* display millidegree */
8923 mutex_unlock(&bp->hwrm_cmd_lock);
8924
8925 return sprintf(buf, "%u\n", temp);
8926 }
8927 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
8928
8929 static struct attribute *bnxt_attrs[] = {
8930 &sensor_dev_attr_temp1_input.dev_attr.attr,
8931 NULL
8932 };
8933 ATTRIBUTE_GROUPS(bnxt);
8934
8935 static void bnxt_hwmon_close(struct bnxt *bp)
8936 {
8937 if (bp->hwmon_dev) {
8938 hwmon_device_unregister(bp->hwmon_dev);
8939 bp->hwmon_dev = NULL;
8940 }
8941 }
8942
8943 static void bnxt_hwmon_open(struct bnxt *bp)
8944 {
8945 struct pci_dev *pdev = bp->pdev;
8946
8947 if (bp->hwmon_dev)
8948 return;
8949
8950 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
8951 DRV_MODULE_NAME, bp,
8952 bnxt_groups);
8953 if (IS_ERR(bp->hwmon_dev)) {
8954 bp->hwmon_dev = NULL;
8955 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
8956 }
8957 }
8958 #else
8959 static void bnxt_hwmon_close(struct bnxt *bp)
8960 {
8961 }
8962
8963 static void bnxt_hwmon_open(struct bnxt *bp)
8964 {
8965 }
8966 #endif
8967
8968 static bool bnxt_eee_config_ok(struct bnxt *bp)
8969 {
8970 struct ethtool_eee *eee = &bp->eee;
8971 struct bnxt_link_info *link_info = &bp->link_info;
8972
8973 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
8974 return true;
8975
8976 if (eee->eee_enabled) {
8977 u32 advertising =
8978 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
8979
8980 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
8981 eee->eee_enabled = 0;
8982 return false;
8983 }
8984 if (eee->advertised & ~advertising) {
8985 eee->advertised = advertising & eee->supported;
8986 return false;
8987 }
8988 }
8989 return true;
8990 }
8991
8992 static int bnxt_update_phy_setting(struct bnxt *bp)
8993 {
8994 int rc;
8995 bool update_link = false;
8996 bool update_pause = false;
8997 bool update_eee = false;
8998 struct bnxt_link_info *link_info = &bp->link_info;
8999
9000 rc = bnxt_update_link(bp, true);
9001 if (rc) {
9002 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
9003 rc);
9004 return rc;
9005 }
9006 if (!BNXT_SINGLE_PF(bp))
9007 return 0;
9008
9009 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
9010 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
9011 link_info->req_flow_ctrl)
9012 update_pause = true;
9013 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
9014 link_info->force_pause_setting != link_info->req_flow_ctrl)
9015 update_pause = true;
9016 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9017 if (BNXT_AUTO_MODE(link_info->auto_mode))
9018 update_link = true;
9019 if (link_info->req_link_speed != link_info->force_link_speed)
9020 update_link = true;
9021 if (link_info->req_duplex != link_info->duplex_setting)
9022 update_link = true;
9023 } else {
9024 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
9025 update_link = true;
9026 if (link_info->advertising != link_info->auto_link_speeds)
9027 update_link = true;
9028 }
9029
9030 /* The last close may have shutdown the link, so need to call
9031 * PHY_CFG to bring it back up.
9032 */
9033 if (!netif_carrier_ok(bp->dev))
9034 update_link = true;
9035
9036 if (!bnxt_eee_config_ok(bp))
9037 update_eee = true;
9038
9039 if (update_link)
9040 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
9041 else if (update_pause)
9042 rc = bnxt_hwrm_set_pause(bp);
9043 if (rc) {
9044 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
9045 rc);
9046 return rc;
9047 }
9048
9049 return rc;
9050 }
9051
9052 /* Common routine to pre-map certain register block to different GRC window.
9053 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
9054 * in PF and 3 windows in VF that can be customized to map in different
9055 * register blocks.
9056 */
9057 static void bnxt_preset_reg_win(struct bnxt *bp)
9058 {
9059 if (BNXT_PF(bp)) {
9060 /* CAG registers map to GRC window #4 */
9061 writel(BNXT_CAG_REG_BASE,
9062 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
9063 }
9064 }
9065
9066 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
9067
9068 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9069 {
9070 int rc = 0;
9071
9072 bnxt_preset_reg_win(bp);
9073 netif_carrier_off(bp->dev);
9074 if (irq_re_init) {
9075 /* Reserve rings now if none were reserved at driver probe. */
9076 rc = bnxt_init_dflt_ring_mode(bp);
9077 if (rc) {
9078 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
9079 return rc;
9080 }
9081 }
9082 rc = bnxt_reserve_rings(bp, irq_re_init);
9083 if (rc)
9084 return rc;
9085 if ((bp->flags & BNXT_FLAG_RFS) &&
9086 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
9087 /* disable RFS if falling back to INTA */
9088 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
9089 bp->flags &= ~BNXT_FLAG_RFS;
9090 }
9091
9092 rc = bnxt_alloc_mem(bp, irq_re_init);
9093 if (rc) {
9094 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
9095 goto open_err_free_mem;
9096 }
9097
9098 if (irq_re_init) {
9099 bnxt_init_napi(bp);
9100 rc = bnxt_request_irq(bp);
9101 if (rc) {
9102 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
9103 goto open_err_irq;
9104 }
9105 }
9106
9107 bnxt_enable_napi(bp);
9108 bnxt_debug_dev_init(bp);
9109
9110 rc = bnxt_init_nic(bp, irq_re_init);
9111 if (rc) {
9112 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
9113 goto open_err;
9114 }
9115
9116 if (link_re_init) {
9117 mutex_lock(&bp->link_lock);
9118 rc = bnxt_update_phy_setting(bp);
9119 mutex_unlock(&bp->link_lock);
9120 if (rc) {
9121 netdev_warn(bp->dev, "failed to update phy settings\n");
9122 if (BNXT_SINGLE_PF(bp)) {
9123 bp->link_info.phy_retry = true;
9124 bp->link_info.phy_retry_expires =
9125 jiffies + 5 * HZ;
9126 }
9127 }
9128 }
9129
9130 if (irq_re_init)
9131 udp_tunnel_get_rx_info(bp->dev);
9132
9133 set_bit(BNXT_STATE_OPEN, &bp->state);
9134 bnxt_enable_int(bp);
9135 /* Enable TX queues */
9136 bnxt_tx_enable(bp);
9137 mod_timer(&bp->timer, jiffies + bp->current_interval);
9138 /* Poll link status and check for SFP+ module status */
9139 bnxt_get_port_module_status(bp);
9140
9141 /* VF-reps may need to be re-opened after the PF is re-opened */
9142 if (BNXT_PF(bp))
9143 bnxt_vf_reps_open(bp);
9144 return 0;
9145
9146 open_err:
9147 bnxt_debug_dev_exit(bp);
9148 bnxt_disable_napi(bp);
9149
9150 open_err_irq:
9151 bnxt_del_napi(bp);
9152
9153 open_err_free_mem:
9154 bnxt_free_skbs(bp);
9155 bnxt_free_irq(bp);
9156 bnxt_free_mem(bp, true);
9157 return rc;
9158 }
9159
9160 /* rtnl_lock held */
9161 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9162 {
9163 int rc = 0;
9164
9165 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
9166 if (rc) {
9167 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
9168 dev_close(bp->dev);
9169 }
9170 return rc;
9171 }
9172
9173 /* rtnl_lock held, open the NIC half way by allocating all resources, but
9174 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
9175 * self tests.
9176 */
9177 int bnxt_half_open_nic(struct bnxt *bp)
9178 {
9179 int rc = 0;
9180
9181 rc = bnxt_alloc_mem(bp, false);
9182 if (rc) {
9183 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
9184 goto half_open_err;
9185 }
9186 rc = bnxt_init_nic(bp, false);
9187 if (rc) {
9188 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
9189 goto half_open_err;
9190 }
9191 return 0;
9192
9193 half_open_err:
9194 bnxt_free_skbs(bp);
9195 bnxt_free_mem(bp, false);
9196 dev_close(bp->dev);
9197 return rc;
9198 }
9199
9200 /* rtnl_lock held, this call can only be made after a previous successful
9201 * call to bnxt_half_open_nic().
9202 */
9203 void bnxt_half_close_nic(struct bnxt *bp)
9204 {
9205 bnxt_hwrm_resource_free(bp, false, false);
9206 bnxt_free_skbs(bp);
9207 bnxt_free_mem(bp, false);
9208 }
9209
9210 static int bnxt_open(struct net_device *dev)
9211 {
9212 struct bnxt *bp = netdev_priv(dev);
9213 int rc;
9214
9215 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
9216 netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n");
9217 return -ENODEV;
9218 }
9219
9220 rc = bnxt_hwrm_if_change(bp, true);
9221 if (rc)
9222 return rc;
9223 rc = __bnxt_open_nic(bp, true, true);
9224 if (rc) {
9225 bnxt_hwrm_if_change(bp, false);
9226 } else {
9227 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state) &&
9228 BNXT_PF(bp)) {
9229 struct bnxt_pf_info *pf = &bp->pf;
9230 int n = pf->active_vfs;
9231
9232 if (n)
9233 bnxt_cfg_hw_sriov(bp, &n, true);
9234 }
9235 bnxt_hwmon_open(bp);
9236 }
9237
9238 return rc;
9239 }
9240
9241 static bool bnxt_drv_busy(struct bnxt *bp)
9242 {
9243 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
9244 test_bit(BNXT_STATE_READ_STATS, &bp->state));
9245 }
9246
9247 static void bnxt_get_ring_stats(struct bnxt *bp,
9248 struct rtnl_link_stats64 *stats);
9249
9250 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
9251 bool link_re_init)
9252 {
9253 /* Close the VF-reps before closing PF */
9254 if (BNXT_PF(bp))
9255 bnxt_vf_reps_close(bp);
9256
9257 /* Change device state to avoid TX queue wake up's */
9258 bnxt_tx_disable(bp);
9259
9260 clear_bit(BNXT_STATE_OPEN, &bp->state);
9261 smp_mb__after_atomic();
9262 while (bnxt_drv_busy(bp))
9263 msleep(20);
9264
9265 /* Flush rings and and disable interrupts */
9266 bnxt_shutdown_nic(bp, irq_re_init);
9267
9268 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
9269
9270 bnxt_debug_dev_exit(bp);
9271 bnxt_disable_napi(bp);
9272 del_timer_sync(&bp->timer);
9273 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) &&
9274 pci_is_enabled(bp->pdev))
9275 pci_disable_device(bp->pdev);
9276
9277 bnxt_free_skbs(bp);
9278
9279 /* Save ring stats before shutdown */
9280 if (bp->bnapi)
9281 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
9282 if (irq_re_init) {
9283 bnxt_free_irq(bp);
9284 bnxt_del_napi(bp);
9285 }
9286 bnxt_free_mem(bp, irq_re_init);
9287 }
9288
9289 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9290 {
9291 int rc = 0;
9292
9293 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
9294 /* If we get here, it means firmware reset is in progress
9295 * while we are trying to close. We can safely proceed with
9296 * the close because we are holding rtnl_lock(). Some firmware
9297 * messages may fail as we proceed to close. We set the
9298 * ABORT_ERR flag here so that the FW reset thread will later
9299 * abort when it gets the rtnl_lock() and sees the flag.
9300 */
9301 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
9302 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9303 }
9304
9305 #ifdef CONFIG_BNXT_SRIOV
9306 if (bp->sriov_cfg) {
9307 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
9308 !bp->sriov_cfg,
9309 BNXT_SRIOV_CFG_WAIT_TMO);
9310 if (rc)
9311 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
9312 }
9313 #endif
9314 __bnxt_close_nic(bp, irq_re_init, link_re_init);
9315 return rc;
9316 }
9317
9318 static int bnxt_close(struct net_device *dev)
9319 {
9320 struct bnxt *bp = netdev_priv(dev);
9321
9322 bnxt_hwmon_close(bp);
9323 bnxt_close_nic(bp, true, true);
9324 bnxt_hwrm_shutdown_link(bp);
9325 bnxt_hwrm_if_change(bp, false);
9326 return 0;
9327 }
9328
9329 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
9330 u16 *val)
9331 {
9332 struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
9333 struct hwrm_port_phy_mdio_read_input req = {0};
9334 int rc;
9335
9336 if (bp->hwrm_spec_code < 0x10a00)
9337 return -EOPNOTSUPP;
9338
9339 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
9340 req.port_id = cpu_to_le16(bp->pf.port_id);
9341 req.phy_addr = phy_addr;
9342 req.reg_addr = cpu_to_le16(reg & 0x1f);
9343 if (mdio_phy_id_is_c45(phy_addr)) {
9344 req.cl45_mdio = 1;
9345 req.phy_addr = mdio_phy_id_prtad(phy_addr);
9346 req.dev_addr = mdio_phy_id_devad(phy_addr);
9347 req.reg_addr = cpu_to_le16(reg);
9348 }
9349
9350 mutex_lock(&bp->hwrm_cmd_lock);
9351 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9352 if (!rc)
9353 *val = le16_to_cpu(resp->reg_data);
9354 mutex_unlock(&bp->hwrm_cmd_lock);
9355 return rc;
9356 }
9357
9358 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
9359 u16 val)
9360 {
9361 struct hwrm_port_phy_mdio_write_input req = {0};
9362
9363 if (bp->hwrm_spec_code < 0x10a00)
9364 return -EOPNOTSUPP;
9365
9366 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
9367 req.port_id = cpu_to_le16(bp->pf.port_id);
9368 req.phy_addr = phy_addr;
9369 req.reg_addr = cpu_to_le16(reg & 0x1f);
9370 if (mdio_phy_id_is_c45(phy_addr)) {
9371 req.cl45_mdio = 1;
9372 req.phy_addr = mdio_phy_id_prtad(phy_addr);
9373 req.dev_addr = mdio_phy_id_devad(phy_addr);
9374 req.reg_addr = cpu_to_le16(reg);
9375 }
9376 req.reg_data = cpu_to_le16(val);
9377
9378 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9379 }
9380
9381 /* rtnl_lock held */
9382 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9383 {
9384 struct mii_ioctl_data *mdio = if_mii(ifr);
9385 struct bnxt *bp = netdev_priv(dev);
9386 int rc;
9387
9388 switch (cmd) {
9389 case SIOCGMIIPHY:
9390 mdio->phy_id = bp->link_info.phy_addr;
9391
9392 /* fallthru */
9393 case SIOCGMIIREG: {
9394 u16 mii_regval = 0;
9395
9396 if (!netif_running(dev))
9397 return -EAGAIN;
9398
9399 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
9400 &mii_regval);
9401 mdio->val_out = mii_regval;
9402 return rc;
9403 }
9404
9405 case SIOCSMIIREG:
9406 if (!netif_running(dev))
9407 return -EAGAIN;
9408
9409 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
9410 mdio->val_in);
9411
9412 default:
9413 /* do nothing */
9414 break;
9415 }
9416 return -EOPNOTSUPP;
9417 }
9418
9419 static void bnxt_get_ring_stats(struct bnxt *bp,
9420 struct rtnl_link_stats64 *stats)
9421 {
9422 int i;
9423
9424
9425 for (i = 0; i < bp->cp_nr_rings; i++) {
9426 struct bnxt_napi *bnapi = bp->bnapi[i];
9427 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
9428 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
9429
9430 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
9431 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
9432 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
9433
9434 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
9435 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
9436 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
9437
9438 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
9439 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
9440 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
9441
9442 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
9443 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
9444 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
9445
9446 stats->rx_missed_errors +=
9447 le64_to_cpu(hw_stats->rx_discard_pkts);
9448
9449 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
9450
9451 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
9452 }
9453 }
9454
9455 static void bnxt_add_prev_stats(struct bnxt *bp,
9456 struct rtnl_link_stats64 *stats)
9457 {
9458 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
9459
9460 stats->rx_packets += prev_stats->rx_packets;
9461 stats->tx_packets += prev_stats->tx_packets;
9462 stats->rx_bytes += prev_stats->rx_bytes;
9463 stats->tx_bytes += prev_stats->tx_bytes;
9464 stats->rx_missed_errors += prev_stats->rx_missed_errors;
9465 stats->multicast += prev_stats->multicast;
9466 stats->tx_dropped += prev_stats->tx_dropped;
9467 }
9468
9469 static void
9470 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
9471 {
9472 struct bnxt *bp = netdev_priv(dev);
9473
9474 set_bit(BNXT_STATE_READ_STATS, &bp->state);
9475 /* Make sure bnxt_close_nic() sees that we are reading stats before
9476 * we check the BNXT_STATE_OPEN flag.
9477 */
9478 smp_mb__after_atomic();
9479 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9480 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
9481 *stats = bp->net_stats_prev;
9482 return;
9483 }
9484
9485 bnxt_get_ring_stats(bp, stats);
9486 bnxt_add_prev_stats(bp, stats);
9487
9488 if (bp->flags & BNXT_FLAG_PORT_STATS) {
9489 struct rx_port_stats *rx = bp->hw_rx_port_stats;
9490 struct tx_port_stats *tx = bp->hw_tx_port_stats;
9491
9492 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
9493 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
9494 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
9495 le64_to_cpu(rx->rx_ovrsz_frames) +
9496 le64_to_cpu(rx->rx_runt_frames);
9497 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
9498 le64_to_cpu(rx->rx_jbr_frames);
9499 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
9500 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
9501 stats->tx_errors = le64_to_cpu(tx->tx_err);
9502 }
9503 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
9504 }
9505
9506 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
9507 {
9508 struct net_device *dev = bp->dev;
9509 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9510 struct netdev_hw_addr *ha;
9511 u8 *haddr;
9512 int mc_count = 0;
9513 bool update = false;
9514 int off = 0;
9515
9516 netdev_for_each_mc_addr(ha, dev) {
9517 if (mc_count >= BNXT_MAX_MC_ADDRS) {
9518 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9519 vnic->mc_list_count = 0;
9520 return false;
9521 }
9522 haddr = ha->addr;
9523 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
9524 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
9525 update = true;
9526 }
9527 off += ETH_ALEN;
9528 mc_count++;
9529 }
9530 if (mc_count)
9531 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
9532
9533 if (mc_count != vnic->mc_list_count) {
9534 vnic->mc_list_count = mc_count;
9535 update = true;
9536 }
9537 return update;
9538 }
9539
9540 static bool bnxt_uc_list_updated(struct bnxt *bp)
9541 {
9542 struct net_device *dev = bp->dev;
9543 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9544 struct netdev_hw_addr *ha;
9545 int off = 0;
9546
9547 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
9548 return true;
9549
9550 netdev_for_each_uc_addr(ha, dev) {
9551 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
9552 return true;
9553
9554 off += ETH_ALEN;
9555 }
9556 return false;
9557 }
9558
9559 static void bnxt_set_rx_mode(struct net_device *dev)
9560 {
9561 struct bnxt *bp = netdev_priv(dev);
9562 struct bnxt_vnic_info *vnic;
9563 bool mc_update = false;
9564 bool uc_update;
9565 u32 mask;
9566
9567 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
9568 return;
9569
9570 vnic = &bp->vnic_info[0];
9571 mask = vnic->rx_mask;
9572 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
9573 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
9574 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
9575 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
9576
9577 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
9578 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
9579
9580 uc_update = bnxt_uc_list_updated(bp);
9581
9582 if (dev->flags & IFF_BROADCAST)
9583 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
9584 if (dev->flags & IFF_ALLMULTI) {
9585 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9586 vnic->mc_list_count = 0;
9587 } else {
9588 mc_update = bnxt_mc_list_updated(bp, &mask);
9589 }
9590
9591 if (mask != vnic->rx_mask || uc_update || mc_update) {
9592 vnic->rx_mask = mask;
9593
9594 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
9595 bnxt_queue_sp_work(bp);
9596 }
9597 }
9598
9599 static int bnxt_cfg_rx_mode(struct bnxt *bp)
9600 {
9601 struct net_device *dev = bp->dev;
9602 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9603 struct netdev_hw_addr *ha;
9604 int i, off = 0, rc;
9605 bool uc_update;
9606
9607 netif_addr_lock_bh(dev);
9608 uc_update = bnxt_uc_list_updated(bp);
9609 netif_addr_unlock_bh(dev);
9610
9611 if (!uc_update)
9612 goto skip_uc;
9613
9614 mutex_lock(&bp->hwrm_cmd_lock);
9615 for (i = 1; i < vnic->uc_filter_count; i++) {
9616 struct hwrm_cfa_l2_filter_free_input req = {0};
9617
9618 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
9619 -1);
9620
9621 req.l2_filter_id = vnic->fw_l2_filter_id[i];
9622
9623 rc = _hwrm_send_message(bp, &req, sizeof(req),
9624 HWRM_CMD_TIMEOUT);
9625 }
9626 mutex_unlock(&bp->hwrm_cmd_lock);
9627
9628 vnic->uc_filter_count = 1;
9629
9630 netif_addr_lock_bh(dev);
9631 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
9632 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
9633 } else {
9634 netdev_for_each_uc_addr(ha, dev) {
9635 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
9636 off += ETH_ALEN;
9637 vnic->uc_filter_count++;
9638 }
9639 }
9640 netif_addr_unlock_bh(dev);
9641
9642 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
9643 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
9644 if (rc) {
9645 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
9646 rc);
9647 vnic->uc_filter_count = i;
9648 return rc;
9649 }
9650 }
9651
9652 skip_uc:
9653 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
9654 if (rc && vnic->mc_list_count) {
9655 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
9656 rc);
9657 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9658 vnic->mc_list_count = 0;
9659 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
9660 }
9661 if (rc)
9662 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
9663 rc);
9664
9665 return rc;
9666 }
9667
9668 static bool bnxt_can_reserve_rings(struct bnxt *bp)
9669 {
9670 #ifdef CONFIG_BNXT_SRIOV
9671 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
9672 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9673
9674 /* No minimum rings were provisioned by the PF. Don't
9675 * reserve rings by default when device is down.
9676 */
9677 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
9678 return true;
9679
9680 if (!netif_running(bp->dev))
9681 return false;
9682 }
9683 #endif
9684 return true;
9685 }
9686
9687 /* If the chip and firmware supports RFS */
9688 static bool bnxt_rfs_supported(struct bnxt *bp)
9689 {
9690 if (bp->flags & BNXT_FLAG_CHIP_P5) {
9691 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX)
9692 return true;
9693 return false;
9694 }
9695 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
9696 return true;
9697 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
9698 return true;
9699 return false;
9700 }
9701
9702 /* If runtime conditions support RFS */
9703 static bool bnxt_rfs_capable(struct bnxt *bp)
9704 {
9705 #ifdef CONFIG_RFS_ACCEL
9706 int vnics, max_vnics, max_rss_ctxs;
9707
9708 if (bp->flags & BNXT_FLAG_CHIP_P5)
9709 return bnxt_rfs_supported(bp);
9710 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
9711 return false;
9712
9713 vnics = 1 + bp->rx_nr_rings;
9714 max_vnics = bnxt_get_max_func_vnics(bp);
9715 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
9716
9717 /* RSS contexts not a limiting factor */
9718 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
9719 max_rss_ctxs = max_vnics;
9720 if (vnics > max_vnics || vnics > max_rss_ctxs) {
9721 if (bp->rx_nr_rings > 1)
9722 netdev_warn(bp->dev,
9723 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
9724 min(max_rss_ctxs - 1, max_vnics - 1));
9725 return false;
9726 }
9727
9728 if (!BNXT_NEW_RM(bp))
9729 return true;
9730
9731 if (vnics == bp->hw_resc.resv_vnics)
9732 return true;
9733
9734 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
9735 if (vnics <= bp->hw_resc.resv_vnics)
9736 return true;
9737
9738 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
9739 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
9740 return false;
9741 #else
9742 return false;
9743 #endif
9744 }
9745
9746 static netdev_features_t bnxt_fix_features(struct net_device *dev,
9747 netdev_features_t features)
9748 {
9749 struct bnxt *bp = netdev_priv(dev);
9750
9751 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
9752 features &= ~NETIF_F_NTUPLE;
9753
9754 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
9755 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
9756
9757 if (!(features & NETIF_F_GRO))
9758 features &= ~NETIF_F_GRO_HW;
9759
9760 if (features & NETIF_F_GRO_HW)
9761 features &= ~NETIF_F_LRO;
9762
9763 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
9764 * turned on or off together.
9765 */
9766 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
9767 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
9768 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
9769 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
9770 NETIF_F_HW_VLAN_STAG_RX);
9771 else
9772 features |= NETIF_F_HW_VLAN_CTAG_RX |
9773 NETIF_F_HW_VLAN_STAG_RX;
9774 }
9775 #ifdef CONFIG_BNXT_SRIOV
9776 if (BNXT_VF(bp)) {
9777 if (bp->vf.vlan) {
9778 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
9779 NETIF_F_HW_VLAN_STAG_RX);
9780 }
9781 }
9782 #endif
9783 return features;
9784 }
9785
9786 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
9787 {
9788 struct bnxt *bp = netdev_priv(dev);
9789 u32 flags = bp->flags;
9790 u32 changes;
9791 int rc = 0;
9792 bool re_init = false;
9793 bool update_tpa = false;
9794
9795 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
9796 if (features & NETIF_F_GRO_HW)
9797 flags |= BNXT_FLAG_GRO;
9798 else if (features & NETIF_F_LRO)
9799 flags |= BNXT_FLAG_LRO;
9800
9801 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
9802 flags &= ~BNXT_FLAG_TPA;
9803
9804 if (features & NETIF_F_HW_VLAN_CTAG_RX)
9805 flags |= BNXT_FLAG_STRIP_VLAN;
9806
9807 if (features & NETIF_F_NTUPLE)
9808 flags |= BNXT_FLAG_RFS;
9809
9810 changes = flags ^ bp->flags;
9811 if (changes & BNXT_FLAG_TPA) {
9812 update_tpa = true;
9813 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
9814 (flags & BNXT_FLAG_TPA) == 0 ||
9815 (bp->flags & BNXT_FLAG_CHIP_P5))
9816 re_init = true;
9817 }
9818
9819 if (changes & ~BNXT_FLAG_TPA)
9820 re_init = true;
9821
9822 if (flags != bp->flags) {
9823 u32 old_flags = bp->flags;
9824
9825 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9826 bp->flags = flags;
9827 if (update_tpa)
9828 bnxt_set_ring_params(bp);
9829 return rc;
9830 }
9831
9832 if (re_init) {
9833 bnxt_close_nic(bp, false, false);
9834 bp->flags = flags;
9835 if (update_tpa)
9836 bnxt_set_ring_params(bp);
9837
9838 return bnxt_open_nic(bp, false, false);
9839 }
9840 if (update_tpa) {
9841 bp->flags = flags;
9842 rc = bnxt_set_tpa(bp,
9843 (flags & BNXT_FLAG_TPA) ?
9844 true : false);
9845 if (rc)
9846 bp->flags = old_flags;
9847 }
9848 }
9849 return rc;
9850 }
9851
9852 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
9853 u32 ring_id, u32 *prod, u32 *cons)
9854 {
9855 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
9856 struct hwrm_dbg_ring_info_get_input req = {0};
9857 int rc;
9858
9859 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
9860 req.ring_type = ring_type;
9861 req.fw_ring_id = cpu_to_le32(ring_id);
9862 mutex_lock(&bp->hwrm_cmd_lock);
9863 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9864 if (!rc) {
9865 *prod = le32_to_cpu(resp->producer_index);
9866 *cons = le32_to_cpu(resp->consumer_index);
9867 }
9868 mutex_unlock(&bp->hwrm_cmd_lock);
9869 return rc;
9870 }
9871
9872 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
9873 {
9874 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
9875 int i = bnapi->index;
9876
9877 if (!txr)
9878 return;
9879
9880 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
9881 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
9882 txr->tx_cons);
9883 }
9884
9885 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
9886 {
9887 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
9888 int i = bnapi->index;
9889
9890 if (!rxr)
9891 return;
9892
9893 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
9894 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
9895 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
9896 rxr->rx_sw_agg_prod);
9897 }
9898
9899 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
9900 {
9901 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
9902 int i = bnapi->index;
9903
9904 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
9905 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
9906 }
9907
9908 static void bnxt_dbg_dump_states(struct bnxt *bp)
9909 {
9910 int i;
9911 struct bnxt_napi *bnapi;
9912
9913 for (i = 0; i < bp->cp_nr_rings; i++) {
9914 bnapi = bp->bnapi[i];
9915 if (netif_msg_drv(bp)) {
9916 bnxt_dump_tx_sw_state(bnapi);
9917 bnxt_dump_rx_sw_state(bnapi);
9918 bnxt_dump_cp_sw_state(bnapi);
9919 }
9920 }
9921 }
9922
9923 static void bnxt_reset_task(struct bnxt *bp, bool silent)
9924 {
9925 if (!silent)
9926 bnxt_dbg_dump_states(bp);
9927 if (netif_running(bp->dev)) {
9928 int rc;
9929
9930 if (!silent)
9931 bnxt_ulp_stop(bp);
9932 bnxt_close_nic(bp, false, false);
9933 rc = bnxt_open_nic(bp, false, false);
9934 if (!silent && !rc)
9935 bnxt_ulp_start(bp);
9936 }
9937 }
9938
9939 static void bnxt_tx_timeout(struct net_device *dev)
9940 {
9941 struct bnxt *bp = netdev_priv(dev);
9942
9943 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
9944 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
9945 bnxt_queue_sp_work(bp);
9946 }
9947
9948 static void bnxt_fw_health_check(struct bnxt *bp)
9949 {
9950 struct bnxt_fw_health *fw_health = bp->fw_health;
9951 u32 val;
9952
9953 if (!fw_health || !fw_health->enabled ||
9954 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9955 return;
9956
9957 if (fw_health->tmr_counter) {
9958 fw_health->tmr_counter--;
9959 return;
9960 }
9961
9962 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
9963 if (val == fw_health->last_fw_heartbeat)
9964 goto fw_reset;
9965
9966 fw_health->last_fw_heartbeat = val;
9967
9968 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
9969 if (val != fw_health->last_fw_reset_cnt)
9970 goto fw_reset;
9971
9972 fw_health->tmr_counter = fw_health->tmr_multiplier;
9973 return;
9974
9975 fw_reset:
9976 set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
9977 bnxt_queue_sp_work(bp);
9978 }
9979
9980 static void bnxt_timer(struct timer_list *t)
9981 {
9982 struct bnxt *bp = from_timer(bp, t, timer);
9983 struct net_device *dev = bp->dev;
9984
9985 if (!netif_running(dev))
9986 return;
9987
9988 if (atomic_read(&bp->intr_sem) != 0)
9989 goto bnxt_restart_timer;
9990
9991 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
9992 bnxt_fw_health_check(bp);
9993
9994 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
9995 bp->stats_coal_ticks) {
9996 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
9997 bnxt_queue_sp_work(bp);
9998 }
9999
10000 if (bnxt_tc_flower_enabled(bp)) {
10001 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
10002 bnxt_queue_sp_work(bp);
10003 }
10004
10005 if (bp->link_info.phy_retry) {
10006 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
10007 bp->link_info.phy_retry = false;
10008 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
10009 } else {
10010 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
10011 bnxt_queue_sp_work(bp);
10012 }
10013 }
10014
10015 if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) {
10016 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
10017 bnxt_queue_sp_work(bp);
10018 }
10019 bnxt_restart_timer:
10020 mod_timer(&bp->timer, jiffies + bp->current_interval);
10021 }
10022
10023 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
10024 {
10025 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
10026 * set. If the device is being closed, bnxt_close() may be holding
10027 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
10028 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
10029 */
10030 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10031 rtnl_lock();
10032 }
10033
10034 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
10035 {
10036 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10037 rtnl_unlock();
10038 }
10039
10040 /* Only called from bnxt_sp_task() */
10041 static void bnxt_reset(struct bnxt *bp, bool silent)
10042 {
10043 bnxt_rtnl_lock_sp(bp);
10044 if (test_bit(BNXT_STATE_OPEN, &bp->state))
10045 bnxt_reset_task(bp, silent);
10046 bnxt_rtnl_unlock_sp(bp);
10047 }
10048
10049 static void bnxt_fw_reset_close(struct bnxt *bp)
10050 {
10051 __bnxt_close_nic(bp, true, false);
10052 bnxt_ulp_irq_stop(bp);
10053 bnxt_clear_int_mode(bp);
10054 bnxt_hwrm_func_drv_unrgtr(bp);
10055 bnxt_free_ctx_mem(bp);
10056 kfree(bp->ctx);
10057 bp->ctx = NULL;
10058 }
10059
10060 static bool is_bnxt_fw_ok(struct bnxt *bp)
10061 {
10062 struct bnxt_fw_health *fw_health = bp->fw_health;
10063 bool no_heartbeat = false, has_reset = false;
10064 u32 val;
10065
10066 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
10067 if (val == fw_health->last_fw_heartbeat)
10068 no_heartbeat = true;
10069
10070 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
10071 if (val != fw_health->last_fw_reset_cnt)
10072 has_reset = true;
10073
10074 if (!no_heartbeat && has_reset)
10075 return true;
10076
10077 return false;
10078 }
10079
10080 /* rtnl_lock is acquired before calling this function */
10081 static void bnxt_force_fw_reset(struct bnxt *bp)
10082 {
10083 struct bnxt_fw_health *fw_health = bp->fw_health;
10084 u32 wait_dsecs;
10085
10086 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
10087 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10088 return;
10089
10090 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10091 bnxt_fw_reset_close(bp);
10092 wait_dsecs = fw_health->master_func_wait_dsecs;
10093 if (fw_health->master) {
10094 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
10095 wait_dsecs = 0;
10096 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
10097 } else {
10098 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
10099 wait_dsecs = fw_health->normal_func_wait_dsecs;
10100 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10101 }
10102
10103 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
10104 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
10105 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
10106 }
10107
10108 void bnxt_fw_exception(struct bnxt *bp)
10109 {
10110 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
10111 bnxt_rtnl_lock_sp(bp);
10112 bnxt_force_fw_reset(bp);
10113 bnxt_rtnl_unlock_sp(bp);
10114 }
10115
10116 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
10117 * < 0 on error.
10118 */
10119 static int bnxt_get_registered_vfs(struct bnxt *bp)
10120 {
10121 #ifdef CONFIG_BNXT_SRIOV
10122 int rc;
10123
10124 if (!BNXT_PF(bp))
10125 return 0;
10126
10127 rc = bnxt_hwrm_func_qcfg(bp);
10128 if (rc) {
10129 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
10130 return rc;
10131 }
10132 if (bp->pf.registered_vfs)
10133 return bp->pf.registered_vfs;
10134 if (bp->sriov_cfg)
10135 return 1;
10136 #endif
10137 return 0;
10138 }
10139
10140 void bnxt_fw_reset(struct bnxt *bp)
10141 {
10142 bnxt_rtnl_lock_sp(bp);
10143 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
10144 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10145 int n = 0, tmo;
10146
10147 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10148 if (bp->pf.active_vfs &&
10149 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
10150 n = bnxt_get_registered_vfs(bp);
10151 if (n < 0) {
10152 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
10153 n);
10154 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10155 dev_close(bp->dev);
10156 goto fw_reset_exit;
10157 } else if (n > 0) {
10158 u16 vf_tmo_dsecs = n * 10;
10159
10160 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
10161 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
10162 bp->fw_reset_state =
10163 BNXT_FW_RESET_STATE_POLL_VF;
10164 bnxt_queue_fw_reset_work(bp, HZ / 10);
10165 goto fw_reset_exit;
10166 }
10167 bnxt_fw_reset_close(bp);
10168 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
10169 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
10170 tmo = HZ / 10;
10171 } else {
10172 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10173 tmo = bp->fw_reset_min_dsecs * HZ / 10;
10174 }
10175 bnxt_queue_fw_reset_work(bp, tmo);
10176 }
10177 fw_reset_exit:
10178 bnxt_rtnl_unlock_sp(bp);
10179 }
10180
10181 static void bnxt_chk_missed_irq(struct bnxt *bp)
10182 {
10183 int i;
10184
10185 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
10186 return;
10187
10188 for (i = 0; i < bp->cp_nr_rings; i++) {
10189 struct bnxt_napi *bnapi = bp->bnapi[i];
10190 struct bnxt_cp_ring_info *cpr;
10191 u32 fw_ring_id;
10192 int j;
10193
10194 if (!bnapi)
10195 continue;
10196
10197 cpr = &bnapi->cp_ring;
10198 for (j = 0; j < 2; j++) {
10199 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
10200 u32 val[2];
10201
10202 if (!cpr2 || cpr2->has_more_work ||
10203 !bnxt_has_work(bp, cpr2))
10204 continue;
10205
10206 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
10207 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
10208 continue;
10209 }
10210 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
10211 bnxt_dbg_hwrm_ring_info_get(bp,
10212 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
10213 fw_ring_id, &val[0], &val[1]);
10214 cpr->missed_irqs++;
10215 }
10216 }
10217 }
10218
10219 static void bnxt_cfg_ntp_filters(struct bnxt *);
10220
10221 static void bnxt_sp_task(struct work_struct *work)
10222 {
10223 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
10224
10225 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10226 smp_mb__after_atomic();
10227 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10228 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10229 return;
10230 }
10231
10232 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
10233 bnxt_cfg_rx_mode(bp);
10234
10235 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
10236 bnxt_cfg_ntp_filters(bp);
10237 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
10238 bnxt_hwrm_exec_fwd_req(bp);
10239 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
10240 bnxt_hwrm_tunnel_dst_port_alloc(
10241 bp, bp->vxlan_port,
10242 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10243 }
10244 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
10245 bnxt_hwrm_tunnel_dst_port_free(
10246 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10247 }
10248 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
10249 bnxt_hwrm_tunnel_dst_port_alloc(
10250 bp, bp->nge_port,
10251 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10252 }
10253 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
10254 bnxt_hwrm_tunnel_dst_port_free(
10255 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10256 }
10257 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
10258 bnxt_hwrm_port_qstats(bp);
10259 bnxt_hwrm_port_qstats_ext(bp);
10260 bnxt_hwrm_pcie_qstats(bp);
10261 }
10262
10263 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
10264 int rc;
10265
10266 mutex_lock(&bp->link_lock);
10267 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
10268 &bp->sp_event))
10269 bnxt_hwrm_phy_qcaps(bp);
10270
10271 rc = bnxt_update_link(bp, true);
10272 mutex_unlock(&bp->link_lock);
10273 if (rc)
10274 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
10275 rc);
10276 }
10277 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
10278 int rc;
10279
10280 mutex_lock(&bp->link_lock);
10281 rc = bnxt_update_phy_setting(bp);
10282 mutex_unlock(&bp->link_lock);
10283 if (rc) {
10284 netdev_warn(bp->dev, "update phy settings retry failed\n");
10285 } else {
10286 bp->link_info.phy_retry = false;
10287 netdev_info(bp->dev, "update phy settings retry succeeded\n");
10288 }
10289 }
10290 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
10291 mutex_lock(&bp->link_lock);
10292 bnxt_get_port_module_status(bp);
10293 mutex_unlock(&bp->link_lock);
10294 }
10295
10296 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
10297 bnxt_tc_flow_stats_work(bp);
10298
10299 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
10300 bnxt_chk_missed_irq(bp);
10301
10302 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
10303 * must be the last functions to be called before exiting.
10304 */
10305 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
10306 bnxt_reset(bp, false);
10307
10308 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
10309 bnxt_reset(bp, true);
10310
10311 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
10312 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
10313
10314 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
10315 if (!is_bnxt_fw_ok(bp))
10316 bnxt_devlink_health_report(bp,
10317 BNXT_FW_EXCEPTION_SP_EVENT);
10318 }
10319
10320 smp_mb__before_atomic();
10321 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10322 }
10323
10324 /* Under rtnl_lock */
10325 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
10326 int tx_xdp)
10327 {
10328 int max_rx, max_tx, tx_sets = 1;
10329 int tx_rings_needed, stats;
10330 int rx_rings = rx;
10331 int cp, vnics, rc;
10332
10333 if (tcs)
10334 tx_sets = tcs;
10335
10336 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
10337 if (rc)
10338 return rc;
10339
10340 if (max_rx < rx)
10341 return -ENOMEM;
10342
10343 tx_rings_needed = tx * tx_sets + tx_xdp;
10344 if (max_tx < tx_rings_needed)
10345 return -ENOMEM;
10346
10347 vnics = 1;
10348 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
10349 vnics += rx_rings;
10350
10351 if (bp->flags & BNXT_FLAG_AGG_RINGS)
10352 rx_rings <<= 1;
10353 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
10354 stats = cp;
10355 if (BNXT_NEW_RM(bp)) {
10356 cp += bnxt_get_ulp_msix_num(bp);
10357 stats += bnxt_get_ulp_stat_ctxs(bp);
10358 }
10359 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
10360 stats, vnics);
10361 }
10362
10363 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
10364 {
10365 if (bp->bar2) {
10366 pci_iounmap(pdev, bp->bar2);
10367 bp->bar2 = NULL;
10368 }
10369
10370 if (bp->bar1) {
10371 pci_iounmap(pdev, bp->bar1);
10372 bp->bar1 = NULL;
10373 }
10374
10375 if (bp->bar0) {
10376 pci_iounmap(pdev, bp->bar0);
10377 bp->bar0 = NULL;
10378 }
10379 }
10380
10381 static void bnxt_cleanup_pci(struct bnxt *bp)
10382 {
10383 bnxt_unmap_bars(bp, bp->pdev);
10384 pci_release_regions(bp->pdev);
10385 pci_disable_device(bp->pdev);
10386 }
10387
10388 static void bnxt_init_dflt_coal(struct bnxt *bp)
10389 {
10390 struct bnxt_coal *coal;
10391
10392 /* Tick values in micro seconds.
10393 * 1 coal_buf x bufs_per_record = 1 completion record.
10394 */
10395 coal = &bp->rx_coal;
10396 coal->coal_ticks = 10;
10397 coal->coal_bufs = 30;
10398 coal->coal_ticks_irq = 1;
10399 coal->coal_bufs_irq = 2;
10400 coal->idle_thresh = 50;
10401 coal->bufs_per_record = 2;
10402 coal->budget = 64; /* NAPI budget */
10403
10404 coal = &bp->tx_coal;
10405 coal->coal_ticks = 28;
10406 coal->coal_bufs = 30;
10407 coal->coal_ticks_irq = 2;
10408 coal->coal_bufs_irq = 2;
10409 coal->bufs_per_record = 1;
10410
10411 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
10412 }
10413
10414 static int bnxt_fw_init_one_p1(struct bnxt *bp)
10415 {
10416 int rc;
10417
10418 bp->fw_cap = 0;
10419 rc = bnxt_hwrm_ver_get(bp);
10420 if (rc)
10421 return rc;
10422
10423 if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
10424 rc = bnxt_alloc_kong_hwrm_resources(bp);
10425 if (rc)
10426 bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
10427 }
10428
10429 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
10430 bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
10431 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
10432 if (rc)
10433 return rc;
10434 }
10435 rc = bnxt_hwrm_func_reset(bp);
10436 if (rc)
10437 return -ENODEV;
10438
10439 bnxt_hwrm_fw_set_time(bp);
10440 return 0;
10441 }
10442
10443 static int bnxt_fw_init_one_p2(struct bnxt *bp)
10444 {
10445 int rc;
10446
10447 /* Get the MAX capabilities for this function */
10448 rc = bnxt_hwrm_func_qcaps(bp);
10449 if (rc) {
10450 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
10451 rc);
10452 return -ENODEV;
10453 }
10454
10455 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
10456 if (rc)
10457 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
10458 rc);
10459
10460 rc = bnxt_hwrm_error_recovery_qcfg(bp);
10461 if (rc)
10462 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
10463 rc);
10464
10465 rc = bnxt_hwrm_func_drv_rgtr(bp);
10466 if (rc)
10467 return -ENODEV;
10468
10469 rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
10470 if (rc)
10471 return -ENODEV;
10472
10473 bnxt_hwrm_func_qcfg(bp);
10474 bnxt_hwrm_vnic_qcaps(bp);
10475 bnxt_hwrm_port_led_qcaps(bp);
10476 bnxt_ethtool_init(bp);
10477 bnxt_dcb_init(bp);
10478 return 0;
10479 }
10480
10481 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
10482 {
10483 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
10484 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
10485 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
10486 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
10487 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
10488 if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
10489 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
10490 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
10491 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
10492 }
10493 }
10494
10495 static void bnxt_set_dflt_rfs(struct bnxt *bp)
10496 {
10497 struct net_device *dev = bp->dev;
10498
10499 dev->hw_features &= ~NETIF_F_NTUPLE;
10500 dev->features &= ~NETIF_F_NTUPLE;
10501 bp->flags &= ~BNXT_FLAG_RFS;
10502 if (bnxt_rfs_supported(bp)) {
10503 dev->hw_features |= NETIF_F_NTUPLE;
10504 if (bnxt_rfs_capable(bp)) {
10505 bp->flags |= BNXT_FLAG_RFS;
10506 dev->features |= NETIF_F_NTUPLE;
10507 }
10508 }
10509 }
10510
10511 static void bnxt_fw_init_one_p3(struct bnxt *bp)
10512 {
10513 struct pci_dev *pdev = bp->pdev;
10514
10515 bnxt_set_dflt_rss_hash_type(bp);
10516 bnxt_set_dflt_rfs(bp);
10517
10518 bnxt_get_wol_settings(bp);
10519 if (bp->flags & BNXT_FLAG_WOL_CAP)
10520 device_set_wakeup_enable(&pdev->dev, bp->wol);
10521 else
10522 device_set_wakeup_capable(&pdev->dev, false);
10523
10524 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
10525 bnxt_hwrm_coal_params_qcaps(bp);
10526 }
10527
10528 static int bnxt_fw_init_one(struct bnxt *bp)
10529 {
10530 int rc;
10531
10532 rc = bnxt_fw_init_one_p1(bp);
10533 if (rc) {
10534 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
10535 return rc;
10536 }
10537 rc = bnxt_fw_init_one_p2(bp);
10538 if (rc) {
10539 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
10540 return rc;
10541 }
10542 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
10543 if (rc)
10544 return rc;
10545 bnxt_fw_init_one_p3(bp);
10546 return 0;
10547 }
10548
10549 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
10550 {
10551 struct bnxt_fw_health *fw_health = bp->fw_health;
10552 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
10553 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
10554 u32 reg_type, reg_off, delay_msecs;
10555
10556 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
10557 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
10558 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
10559 switch (reg_type) {
10560 case BNXT_FW_HEALTH_REG_TYPE_CFG:
10561 pci_write_config_dword(bp->pdev, reg_off, val);
10562 break;
10563 case BNXT_FW_HEALTH_REG_TYPE_GRC:
10564 writel(reg_off & BNXT_GRC_BASE_MASK,
10565 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
10566 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
10567 /* fall through */
10568 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
10569 writel(val, bp->bar0 + reg_off);
10570 break;
10571 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
10572 writel(val, bp->bar1 + reg_off);
10573 break;
10574 }
10575 if (delay_msecs) {
10576 pci_read_config_dword(bp->pdev, 0, &val);
10577 msleep(delay_msecs);
10578 }
10579 }
10580
10581 static void bnxt_reset_all(struct bnxt *bp)
10582 {
10583 struct bnxt_fw_health *fw_health = bp->fw_health;
10584 int i, rc;
10585
10586 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
10587 #ifdef CONFIG_TEE_BNXT_FW
10588 rc = tee_bnxt_fw_load();
10589 if (rc)
10590 netdev_err(bp->dev, "Unable to reset FW rc=%d\n", rc);
10591 bp->fw_reset_timestamp = jiffies;
10592 #endif
10593 return;
10594 }
10595
10596 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
10597 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
10598 bnxt_fw_reset_writel(bp, i);
10599 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
10600 struct hwrm_fw_reset_input req = {0};
10601
10602 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
10603 req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
10604 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
10605 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
10606 req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
10607 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10608 if (rc)
10609 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
10610 }
10611 bp->fw_reset_timestamp = jiffies;
10612 }
10613
10614 static void bnxt_fw_reset_task(struct work_struct *work)
10615 {
10616 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
10617 int rc;
10618
10619 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10620 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
10621 return;
10622 }
10623
10624 switch (bp->fw_reset_state) {
10625 case BNXT_FW_RESET_STATE_POLL_VF: {
10626 int n = bnxt_get_registered_vfs(bp);
10627 int tmo;
10628
10629 if (n < 0) {
10630 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
10631 n, jiffies_to_msecs(jiffies -
10632 bp->fw_reset_timestamp));
10633 goto fw_reset_abort;
10634 } else if (n > 0) {
10635 if (time_after(jiffies, bp->fw_reset_timestamp +
10636 (bp->fw_reset_max_dsecs * HZ / 10))) {
10637 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10638 bp->fw_reset_state = 0;
10639 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
10640 n);
10641 return;
10642 }
10643 bnxt_queue_fw_reset_work(bp, HZ / 10);
10644 return;
10645 }
10646 bp->fw_reset_timestamp = jiffies;
10647 rtnl_lock();
10648 bnxt_fw_reset_close(bp);
10649 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
10650 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
10651 tmo = HZ / 10;
10652 } else {
10653 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10654 tmo = bp->fw_reset_min_dsecs * HZ / 10;
10655 }
10656 rtnl_unlock();
10657 bnxt_queue_fw_reset_work(bp, tmo);
10658 return;
10659 }
10660 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
10661 u32 val;
10662
10663 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
10664 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
10665 !time_after(jiffies, bp->fw_reset_timestamp +
10666 (bp->fw_reset_max_dsecs * HZ / 10))) {
10667 bnxt_queue_fw_reset_work(bp, HZ / 5);
10668 return;
10669 }
10670
10671 if (!bp->fw_health->master) {
10672 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
10673
10674 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10675 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
10676 return;
10677 }
10678 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
10679 }
10680 /* fall through */
10681 case BNXT_FW_RESET_STATE_RESET_FW: {
10682 u32 wait_dsecs = bp->fw_health->post_reset_wait_dsecs;
10683
10684 bnxt_reset_all(bp);
10685 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10686 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
10687 return;
10688 }
10689 case BNXT_FW_RESET_STATE_ENABLE_DEV:
10690 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
10691 bp->fw_health) {
10692 u32 val;
10693
10694 val = bnxt_fw_health_readl(bp,
10695 BNXT_FW_RESET_INPROG_REG);
10696 if (val)
10697 netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n",
10698 val);
10699 }
10700 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
10701 if (pci_enable_device(bp->pdev)) {
10702 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
10703 goto fw_reset_abort;
10704 }
10705 pci_set_master(bp->pdev);
10706 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
10707 /* fall through */
10708 case BNXT_FW_RESET_STATE_POLL_FW:
10709 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
10710 rc = __bnxt_hwrm_ver_get(bp, true);
10711 if (rc) {
10712 if (time_after(jiffies, bp->fw_reset_timestamp +
10713 (bp->fw_reset_max_dsecs * HZ / 10))) {
10714 netdev_err(bp->dev, "Firmware reset aborted\n");
10715 goto fw_reset_abort;
10716 }
10717 bnxt_queue_fw_reset_work(bp, HZ / 5);
10718 return;
10719 }
10720 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
10721 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
10722 /* fall through */
10723 case BNXT_FW_RESET_STATE_OPENING:
10724 while (!rtnl_trylock()) {
10725 bnxt_queue_fw_reset_work(bp, HZ / 10);
10726 return;
10727 }
10728 rc = bnxt_open(bp->dev);
10729 if (rc) {
10730 netdev_err(bp->dev, "bnxt_open_nic() failed\n");
10731 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10732 dev_close(bp->dev);
10733 }
10734 bnxt_ulp_irq_restart(bp, rc);
10735 rtnl_unlock();
10736
10737 bp->fw_reset_state = 0;
10738 /* Make sure fw_reset_state is 0 before clearing the flag */
10739 smp_mb__before_atomic();
10740 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10741 break;
10742 }
10743 return;
10744
10745 fw_reset_abort:
10746 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10747 bp->fw_reset_state = 0;
10748 rtnl_lock();
10749 dev_close(bp->dev);
10750 rtnl_unlock();
10751 }
10752
10753 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
10754 {
10755 int rc;
10756 struct bnxt *bp = netdev_priv(dev);
10757
10758 SET_NETDEV_DEV(dev, &pdev->dev);
10759
10760 /* enable device (incl. PCI PM wakeup), and bus-mastering */
10761 rc = pci_enable_device(pdev);
10762 if (rc) {
10763 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
10764 goto init_err;
10765 }
10766
10767 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10768 dev_err(&pdev->dev,
10769 "Cannot find PCI device base address, aborting\n");
10770 rc = -ENODEV;
10771 goto init_err_disable;
10772 }
10773
10774 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10775 if (rc) {
10776 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
10777 goto init_err_disable;
10778 }
10779
10780 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
10781 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
10782 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
10783 goto init_err_disable;
10784 }
10785
10786 pci_set_master(pdev);
10787
10788 bp->dev = dev;
10789 bp->pdev = pdev;
10790
10791 bp->bar0 = pci_ioremap_bar(pdev, 0);
10792 if (!bp->bar0) {
10793 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
10794 rc = -ENOMEM;
10795 goto init_err_release;
10796 }
10797
10798 bp->bar1 = pci_ioremap_bar(pdev, 2);
10799 if (!bp->bar1) {
10800 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
10801 rc = -ENOMEM;
10802 goto init_err_release;
10803 }
10804
10805 bp->bar2 = pci_ioremap_bar(pdev, 4);
10806 if (!bp->bar2) {
10807 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
10808 rc = -ENOMEM;
10809 goto init_err_release;
10810 }
10811
10812 pci_enable_pcie_error_reporting(pdev);
10813
10814 INIT_WORK(&bp->sp_task, bnxt_sp_task);
10815 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
10816
10817 spin_lock_init(&bp->ntp_fltr_lock);
10818 #if BITS_PER_LONG == 32
10819 spin_lock_init(&bp->db_lock);
10820 #endif
10821
10822 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
10823 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
10824
10825 bnxt_init_dflt_coal(bp);
10826
10827 timer_setup(&bp->timer, bnxt_timer, 0);
10828 bp->current_interval = BNXT_TIMER_INTERVAL;
10829
10830 clear_bit(BNXT_STATE_OPEN, &bp->state);
10831 return 0;
10832
10833 init_err_release:
10834 bnxt_unmap_bars(bp, pdev);
10835 pci_release_regions(pdev);
10836
10837 init_err_disable:
10838 pci_disable_device(pdev);
10839
10840 init_err:
10841 return rc;
10842 }
10843
10844 /* rtnl_lock held */
10845 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
10846 {
10847 struct sockaddr *addr = p;
10848 struct bnxt *bp = netdev_priv(dev);
10849 int rc = 0;
10850
10851 if (!is_valid_ether_addr(addr->sa_data))
10852 return -EADDRNOTAVAIL;
10853
10854 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
10855 return 0;
10856
10857 rc = bnxt_approve_mac(bp, addr->sa_data, true);
10858 if (rc)
10859 return rc;
10860
10861 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10862 if (netif_running(dev)) {
10863 bnxt_close_nic(bp, false, false);
10864 rc = bnxt_open_nic(bp, false, false);
10865 }
10866
10867 return rc;
10868 }
10869
10870 /* rtnl_lock held */
10871 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
10872 {
10873 struct bnxt *bp = netdev_priv(dev);
10874
10875 if (netif_running(dev))
10876 bnxt_close_nic(bp, false, false);
10877
10878 dev->mtu = new_mtu;
10879 bnxt_set_ring_params(bp);
10880
10881 if (netif_running(dev))
10882 return bnxt_open_nic(bp, false, false);
10883
10884 return 0;
10885 }
10886
10887 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
10888 {
10889 struct bnxt *bp = netdev_priv(dev);
10890 bool sh = false;
10891 int rc;
10892
10893 if (tc > bp->max_tc) {
10894 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
10895 tc, bp->max_tc);
10896 return -EINVAL;
10897 }
10898
10899 if (netdev_get_num_tc(dev) == tc)
10900 return 0;
10901
10902 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
10903 sh = true;
10904
10905 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
10906 sh, tc, bp->tx_nr_rings_xdp);
10907 if (rc)
10908 return rc;
10909
10910 /* Needs to close the device and do hw resource re-allocations */
10911 if (netif_running(bp->dev))
10912 bnxt_close_nic(bp, true, false);
10913
10914 if (tc) {
10915 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
10916 netdev_set_num_tc(dev, tc);
10917 } else {
10918 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
10919 netdev_reset_tc(dev);
10920 }
10921 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
10922 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
10923 bp->tx_nr_rings + bp->rx_nr_rings;
10924
10925 if (netif_running(bp->dev))
10926 return bnxt_open_nic(bp, true, false);
10927
10928 return 0;
10929 }
10930
10931 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
10932 void *cb_priv)
10933 {
10934 struct bnxt *bp = cb_priv;
10935
10936 if (!bnxt_tc_flower_enabled(bp) ||
10937 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
10938 return -EOPNOTSUPP;
10939
10940 switch (type) {
10941 case TC_SETUP_CLSFLOWER:
10942 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
10943 default:
10944 return -EOPNOTSUPP;
10945 }
10946 }
10947
10948 static LIST_HEAD(bnxt_block_cb_list);
10949
10950 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
10951 void *type_data)
10952 {
10953 struct bnxt *bp = netdev_priv(dev);
10954
10955 switch (type) {
10956 case TC_SETUP_BLOCK:
10957 return flow_block_cb_setup_simple(type_data,
10958 &bnxt_block_cb_list,
10959 bnxt_setup_tc_block_cb,
10960 bp, bp, true);
10961 case TC_SETUP_QDISC_MQPRIO: {
10962 struct tc_mqprio_qopt *mqprio = type_data;
10963
10964 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
10965
10966 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
10967 }
10968 default:
10969 return -EOPNOTSUPP;
10970 }
10971 }
10972
10973 #ifdef CONFIG_RFS_ACCEL
10974 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
10975 struct bnxt_ntuple_filter *f2)
10976 {
10977 struct flow_keys *keys1 = &f1->fkeys;
10978 struct flow_keys *keys2 = &f2->fkeys;
10979
10980 if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
10981 keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
10982 keys1->ports.ports == keys2->ports.ports &&
10983 keys1->basic.ip_proto == keys2->basic.ip_proto &&
10984 keys1->basic.n_proto == keys2->basic.n_proto &&
10985 keys1->control.flags == keys2->control.flags &&
10986 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
10987 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
10988 return true;
10989
10990 return false;
10991 }
10992
10993 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
10994 u16 rxq_index, u32 flow_id)
10995 {
10996 struct bnxt *bp = netdev_priv(dev);
10997 struct bnxt_ntuple_filter *fltr, *new_fltr;
10998 struct flow_keys *fkeys;
10999 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
11000 int rc = 0, idx, bit_id, l2_idx = 0;
11001 struct hlist_head *head;
11002
11003 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
11004 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
11005 int off = 0, j;
11006
11007 netif_addr_lock_bh(dev);
11008 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
11009 if (ether_addr_equal(eth->h_dest,
11010 vnic->uc_list + off)) {
11011 l2_idx = j + 1;
11012 break;
11013 }
11014 }
11015 netif_addr_unlock_bh(dev);
11016 if (!l2_idx)
11017 return -EINVAL;
11018 }
11019 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
11020 if (!new_fltr)
11021 return -ENOMEM;
11022
11023 fkeys = &new_fltr->fkeys;
11024 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
11025 rc = -EPROTONOSUPPORT;
11026 goto err_free;
11027 }
11028
11029 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
11030 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
11031 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
11032 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
11033 rc = -EPROTONOSUPPORT;
11034 goto err_free;
11035 }
11036 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
11037 bp->hwrm_spec_code < 0x10601) {
11038 rc = -EPROTONOSUPPORT;
11039 goto err_free;
11040 }
11041 if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
11042 bp->hwrm_spec_code < 0x10601) {
11043 rc = -EPROTONOSUPPORT;
11044 goto err_free;
11045 }
11046
11047 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
11048 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
11049
11050 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
11051 head = &bp->ntp_fltr_hash_tbl[idx];
11052 rcu_read_lock();
11053 hlist_for_each_entry_rcu(fltr, head, hash) {
11054 if (bnxt_fltr_match(fltr, new_fltr)) {
11055 rcu_read_unlock();
11056 rc = 0;
11057 goto err_free;
11058 }
11059 }
11060 rcu_read_unlock();
11061
11062 spin_lock_bh(&bp->ntp_fltr_lock);
11063 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
11064 BNXT_NTP_FLTR_MAX_FLTR, 0);
11065 if (bit_id < 0) {
11066 spin_unlock_bh(&bp->ntp_fltr_lock);
11067 rc = -ENOMEM;
11068 goto err_free;
11069 }
11070
11071 new_fltr->sw_id = (u16)bit_id;
11072 new_fltr->flow_id = flow_id;
11073 new_fltr->l2_fltr_idx = l2_idx;
11074 new_fltr->rxq = rxq_index;
11075 hlist_add_head_rcu(&new_fltr->hash, head);
11076 bp->ntp_fltr_count++;
11077 spin_unlock_bh(&bp->ntp_fltr_lock);
11078
11079 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11080 bnxt_queue_sp_work(bp);
11081
11082 return new_fltr->sw_id;
11083
11084 err_free:
11085 kfree(new_fltr);
11086 return rc;
11087 }
11088
11089 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
11090 {
11091 int i;
11092
11093 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
11094 struct hlist_head *head;
11095 struct hlist_node *tmp;
11096 struct bnxt_ntuple_filter *fltr;
11097 int rc;
11098
11099 head = &bp->ntp_fltr_hash_tbl[i];
11100 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
11101 bool del = false;
11102
11103 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
11104 if (rps_may_expire_flow(bp->dev, fltr->rxq,
11105 fltr->flow_id,
11106 fltr->sw_id)) {
11107 bnxt_hwrm_cfa_ntuple_filter_free(bp,
11108 fltr);
11109 del = true;
11110 }
11111 } else {
11112 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
11113 fltr);
11114 if (rc)
11115 del = true;
11116 else
11117 set_bit(BNXT_FLTR_VALID, &fltr->state);
11118 }
11119
11120 if (del) {
11121 spin_lock_bh(&bp->ntp_fltr_lock);
11122 hlist_del_rcu(&fltr->hash);
11123 bp->ntp_fltr_count--;
11124 spin_unlock_bh(&bp->ntp_fltr_lock);
11125 synchronize_rcu();
11126 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
11127 kfree(fltr);
11128 }
11129 }
11130 }
11131 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
11132 netdev_info(bp->dev, "Receive PF driver unload event!");
11133 }
11134
11135 #else
11136
11137 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
11138 {
11139 }
11140
11141 #endif /* CONFIG_RFS_ACCEL */
11142
11143 static void bnxt_udp_tunnel_add(struct net_device *dev,
11144 struct udp_tunnel_info *ti)
11145 {
11146 struct bnxt *bp = netdev_priv(dev);
11147
11148 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
11149 return;
11150
11151 if (!netif_running(dev))
11152 return;
11153
11154 switch (ti->type) {
11155 case UDP_TUNNEL_TYPE_VXLAN:
11156 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
11157 return;
11158
11159 bp->vxlan_port_cnt++;
11160 if (bp->vxlan_port_cnt == 1) {
11161 bp->vxlan_port = ti->port;
11162 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
11163 bnxt_queue_sp_work(bp);
11164 }
11165 break;
11166 case UDP_TUNNEL_TYPE_GENEVE:
11167 if (bp->nge_port_cnt && bp->nge_port != ti->port)
11168 return;
11169
11170 bp->nge_port_cnt++;
11171 if (bp->nge_port_cnt == 1) {
11172 bp->nge_port = ti->port;
11173 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
11174 }
11175 break;
11176 default:
11177 return;
11178 }
11179
11180 bnxt_queue_sp_work(bp);
11181 }
11182
11183 static void bnxt_udp_tunnel_del(struct net_device *dev,
11184 struct udp_tunnel_info *ti)
11185 {
11186 struct bnxt *bp = netdev_priv(dev);
11187
11188 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
11189 return;
11190
11191 if (!netif_running(dev))
11192 return;
11193
11194 switch (ti->type) {
11195 case UDP_TUNNEL_TYPE_VXLAN:
11196 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
11197 return;
11198 bp->vxlan_port_cnt--;
11199
11200 if (bp->vxlan_port_cnt != 0)
11201 return;
11202
11203 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
11204 break;
11205 case UDP_TUNNEL_TYPE_GENEVE:
11206 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
11207 return;
11208 bp->nge_port_cnt--;
11209
11210 if (bp->nge_port_cnt != 0)
11211 return;
11212
11213 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
11214 break;
11215 default:
11216 return;
11217 }
11218
11219 bnxt_queue_sp_work(bp);
11220 }
11221
11222 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
11223 struct net_device *dev, u32 filter_mask,
11224 int nlflags)
11225 {
11226 struct bnxt *bp = netdev_priv(dev);
11227
11228 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
11229 nlflags, filter_mask, NULL);
11230 }
11231
11232 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
11233 u16 flags, struct netlink_ext_ack *extack)
11234 {
11235 struct bnxt *bp = netdev_priv(dev);
11236 struct nlattr *attr, *br_spec;
11237 int rem, rc = 0;
11238
11239 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
11240 return -EOPNOTSUPP;
11241
11242 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
11243 if (!br_spec)
11244 return -EINVAL;
11245
11246 nla_for_each_nested(attr, br_spec, rem) {
11247 u16 mode;
11248
11249 if (nla_type(attr) != IFLA_BRIDGE_MODE)
11250 continue;
11251
11252 if (nla_len(attr) < sizeof(mode))
11253 return -EINVAL;
11254
11255 mode = nla_get_u16(attr);
11256 if (mode == bp->br_mode)
11257 break;
11258
11259 rc = bnxt_hwrm_set_br_mode(bp, mode);
11260 if (!rc)
11261 bp->br_mode = mode;
11262 break;
11263 }
11264 return rc;
11265 }
11266
11267 int bnxt_get_port_parent_id(struct net_device *dev,
11268 struct netdev_phys_item_id *ppid)
11269 {
11270 struct bnxt *bp = netdev_priv(dev);
11271
11272 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
11273 return -EOPNOTSUPP;
11274
11275 /* The PF and it's VF-reps only support the switchdev framework */
11276 if (!BNXT_PF(bp))
11277 return -EOPNOTSUPP;
11278
11279 ppid->id_len = sizeof(bp->switch_id);
11280 memcpy(ppid->id, bp->switch_id, ppid->id_len);
11281
11282 return 0;
11283 }
11284
11285 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
11286 {
11287 struct bnxt *bp = netdev_priv(dev);
11288
11289 return &bp->dl_port;
11290 }
11291
11292 static const struct net_device_ops bnxt_netdev_ops = {
11293 .ndo_open = bnxt_open,
11294 .ndo_start_xmit = bnxt_start_xmit,
11295 .ndo_stop = bnxt_close,
11296 .ndo_get_stats64 = bnxt_get_stats64,
11297 .ndo_set_rx_mode = bnxt_set_rx_mode,
11298 .ndo_do_ioctl = bnxt_ioctl,
11299 .ndo_validate_addr = eth_validate_addr,
11300 .ndo_set_mac_address = bnxt_change_mac_addr,
11301 .ndo_change_mtu = bnxt_change_mtu,
11302 .ndo_fix_features = bnxt_fix_features,
11303 .ndo_set_features = bnxt_set_features,
11304 .ndo_tx_timeout = bnxt_tx_timeout,
11305 #ifdef CONFIG_BNXT_SRIOV
11306 .ndo_get_vf_config = bnxt_get_vf_config,
11307 .ndo_set_vf_mac = bnxt_set_vf_mac,
11308 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
11309 .ndo_set_vf_rate = bnxt_set_vf_bw,
11310 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
11311 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
11312 .ndo_set_vf_trust = bnxt_set_vf_trust,
11313 #endif
11314 .ndo_setup_tc = bnxt_setup_tc,
11315 #ifdef CONFIG_RFS_ACCEL
11316 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
11317 #endif
11318 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
11319 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
11320 .ndo_bpf = bnxt_xdp,
11321 .ndo_xdp_xmit = bnxt_xdp_xmit,
11322 .ndo_bridge_getlink = bnxt_bridge_getlink,
11323 .ndo_bridge_setlink = bnxt_bridge_setlink,
11324 .ndo_get_devlink_port = bnxt_get_devlink_port,
11325 };
11326
11327 static void bnxt_remove_one(struct pci_dev *pdev)
11328 {
11329 struct net_device *dev = pci_get_drvdata(pdev);
11330 struct bnxt *bp = netdev_priv(dev);
11331
11332 if (BNXT_PF(bp)) {
11333 bnxt_sriov_disable(bp);
11334 bnxt_dl_unregister(bp);
11335 }
11336
11337 pci_disable_pcie_error_reporting(pdev);
11338 unregister_netdev(dev);
11339 bnxt_shutdown_tc(bp);
11340 bnxt_cancel_sp_work(bp);
11341 bp->sp_event = 0;
11342
11343 bnxt_clear_int_mode(bp);
11344 bnxt_hwrm_func_drv_unrgtr(bp);
11345 bnxt_free_hwrm_resources(bp);
11346 bnxt_free_hwrm_short_cmd_req(bp);
11347 bnxt_ethtool_free(bp);
11348 bnxt_dcb_free(bp);
11349 kfree(bp->edev);
11350 bp->edev = NULL;
11351 bnxt_cleanup_pci(bp);
11352 bnxt_free_ctx_mem(bp);
11353 kfree(bp->ctx);
11354 bp->ctx = NULL;
11355 bnxt_free_port_stats(bp);
11356 free_netdev(dev);
11357 }
11358
11359 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
11360 {
11361 int rc = 0;
11362 struct bnxt_link_info *link_info = &bp->link_info;
11363
11364 rc = bnxt_hwrm_phy_qcaps(bp);
11365 if (rc) {
11366 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
11367 rc);
11368 return rc;
11369 }
11370 rc = bnxt_update_link(bp, false);
11371 if (rc) {
11372 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
11373 rc);
11374 return rc;
11375 }
11376
11377 /* Older firmware does not have supported_auto_speeds, so assume
11378 * that all supported speeds can be autonegotiated.
11379 */
11380 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
11381 link_info->support_auto_speeds = link_info->support_speeds;
11382
11383 if (!fw_dflt)
11384 return 0;
11385
11386 /*initialize the ethool setting copy with NVM settings */
11387 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
11388 link_info->autoneg = BNXT_AUTONEG_SPEED;
11389 if (bp->hwrm_spec_code >= 0x10201) {
11390 if (link_info->auto_pause_setting &
11391 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
11392 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11393 } else {
11394 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11395 }
11396 link_info->advertising = link_info->auto_link_speeds;
11397 } else {
11398 link_info->req_link_speed = link_info->force_link_speed;
11399 link_info->req_duplex = link_info->duplex_setting;
11400 }
11401 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
11402 link_info->req_flow_ctrl =
11403 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
11404 else
11405 link_info->req_flow_ctrl = link_info->force_pause_setting;
11406 return 0;
11407 }
11408
11409 static int bnxt_get_max_irq(struct pci_dev *pdev)
11410 {
11411 u16 ctrl;
11412
11413 if (!pdev->msix_cap)
11414 return 1;
11415
11416 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
11417 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
11418 }
11419
11420 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
11421 int *max_cp)
11422 {
11423 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11424 int max_ring_grps = 0, max_irq;
11425
11426 *max_tx = hw_resc->max_tx_rings;
11427 *max_rx = hw_resc->max_rx_rings;
11428 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
11429 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
11430 bnxt_get_ulp_msix_num(bp),
11431 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
11432 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11433 *max_cp = min_t(int, *max_cp, max_irq);
11434 max_ring_grps = hw_resc->max_hw_ring_grps;
11435 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
11436 *max_cp -= 1;
11437 *max_rx -= 2;
11438 }
11439 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11440 *max_rx >>= 1;
11441 if (bp->flags & BNXT_FLAG_CHIP_P5) {
11442 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
11443 /* On P5 chips, max_cp output param should be available NQs */
11444 *max_cp = max_irq;
11445 }
11446 *max_rx = min_t(int, *max_rx, max_ring_grps);
11447 }
11448
11449 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
11450 {
11451 int rx, tx, cp;
11452
11453 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
11454 *max_rx = rx;
11455 *max_tx = tx;
11456 if (!rx || !tx || !cp)
11457 return -ENOMEM;
11458
11459 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
11460 }
11461
11462 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
11463 bool shared)
11464 {
11465 int rc;
11466
11467 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
11468 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
11469 /* Not enough rings, try disabling agg rings. */
11470 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
11471 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
11472 if (rc) {
11473 /* set BNXT_FLAG_AGG_RINGS back for consistency */
11474 bp->flags |= BNXT_FLAG_AGG_RINGS;
11475 return rc;
11476 }
11477 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
11478 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
11479 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
11480 bnxt_set_ring_params(bp);
11481 }
11482
11483 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
11484 int max_cp, max_stat, max_irq;
11485
11486 /* Reserve minimum resources for RoCE */
11487 max_cp = bnxt_get_max_func_cp_rings(bp);
11488 max_stat = bnxt_get_max_func_stat_ctxs(bp);
11489 max_irq = bnxt_get_max_func_irqs(bp);
11490 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
11491 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
11492 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
11493 return 0;
11494
11495 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
11496 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
11497 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
11498 max_cp = min_t(int, max_cp, max_irq);
11499 max_cp = min_t(int, max_cp, max_stat);
11500 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
11501 if (rc)
11502 rc = 0;
11503 }
11504 return rc;
11505 }
11506
11507 /* In initial default shared ring setting, each shared ring must have a
11508 * RX/TX ring pair.
11509 */
11510 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
11511 {
11512 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
11513 bp->rx_nr_rings = bp->cp_nr_rings;
11514 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
11515 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
11516 }
11517
11518 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
11519 {
11520 int dflt_rings, max_rx_rings, max_tx_rings, rc;
11521
11522 if (!bnxt_can_reserve_rings(bp))
11523 return 0;
11524
11525 if (sh)
11526 bp->flags |= BNXT_FLAG_SHARED_RINGS;
11527 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
11528 /* Reduce default rings on multi-port cards so that total default
11529 * rings do not exceed CPU count.
11530 */
11531 if (bp->port_count > 1) {
11532 int max_rings =
11533 max_t(int, num_online_cpus() / bp->port_count, 1);
11534
11535 dflt_rings = min_t(int, dflt_rings, max_rings);
11536 }
11537 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
11538 if (rc)
11539 return rc;
11540 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
11541 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
11542 if (sh)
11543 bnxt_trim_dflt_sh_rings(bp);
11544 else
11545 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
11546 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
11547
11548 rc = __bnxt_reserve_rings(bp);
11549 if (rc)
11550 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
11551 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11552 if (sh)
11553 bnxt_trim_dflt_sh_rings(bp);
11554
11555 /* Rings may have been trimmed, re-reserve the trimmed rings. */
11556 if (bnxt_need_reserve_rings(bp)) {
11557 rc = __bnxt_reserve_rings(bp);
11558 if (rc)
11559 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
11560 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11561 }
11562 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11563 bp->rx_nr_rings++;
11564 bp->cp_nr_rings++;
11565 }
11566 return rc;
11567 }
11568
11569 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
11570 {
11571 int rc;
11572
11573 if (bp->tx_nr_rings)
11574 return 0;
11575
11576 bnxt_ulp_irq_stop(bp);
11577 bnxt_clear_int_mode(bp);
11578 rc = bnxt_set_dflt_rings(bp, true);
11579 if (rc) {
11580 netdev_err(bp->dev, "Not enough rings available.\n");
11581 goto init_dflt_ring_err;
11582 }
11583 rc = bnxt_init_int_mode(bp);
11584 if (rc)
11585 goto init_dflt_ring_err;
11586
11587 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11588 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
11589 bp->flags |= BNXT_FLAG_RFS;
11590 bp->dev->features |= NETIF_F_NTUPLE;
11591 }
11592 init_dflt_ring_err:
11593 bnxt_ulp_irq_restart(bp, rc);
11594 return rc;
11595 }
11596
11597 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
11598 {
11599 int rc;
11600
11601 ASSERT_RTNL();
11602 bnxt_hwrm_func_qcaps(bp);
11603
11604 if (netif_running(bp->dev))
11605 __bnxt_close_nic(bp, true, false);
11606
11607 bnxt_ulp_irq_stop(bp);
11608 bnxt_clear_int_mode(bp);
11609 rc = bnxt_init_int_mode(bp);
11610 bnxt_ulp_irq_restart(bp, rc);
11611
11612 if (netif_running(bp->dev)) {
11613 if (rc)
11614 dev_close(bp->dev);
11615 else
11616 rc = bnxt_open_nic(bp, true, false);
11617 }
11618
11619 return rc;
11620 }
11621
11622 static int bnxt_init_mac_addr(struct bnxt *bp)
11623 {
11624 int rc = 0;
11625
11626 if (BNXT_PF(bp)) {
11627 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
11628 } else {
11629 #ifdef CONFIG_BNXT_SRIOV
11630 struct bnxt_vf_info *vf = &bp->vf;
11631 bool strict_approval = true;
11632
11633 if (is_valid_ether_addr(vf->mac_addr)) {
11634 /* overwrite netdev dev_addr with admin VF MAC */
11635 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
11636 /* Older PF driver or firmware may not approve this
11637 * correctly.
11638 */
11639 strict_approval = false;
11640 } else {
11641 eth_hw_addr_random(bp->dev);
11642 }
11643 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
11644 #endif
11645 }
11646 return rc;
11647 }
11648
11649 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
11650 {
11651 struct pci_dev *pdev = bp->pdev;
11652 int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
11653 u32 dw;
11654
11655 if (!pos) {
11656 netdev_info(bp->dev, "Unable do read adapter's DSN");
11657 return -EOPNOTSUPP;
11658 }
11659
11660 /* DSN (two dw) is at an offset of 4 from the cap pos */
11661 pos += 4;
11662 pci_read_config_dword(pdev, pos, &dw);
11663 put_unaligned_le32(dw, &dsn[0]);
11664 pci_read_config_dword(pdev, pos + 4, &dw);
11665 put_unaligned_le32(dw, &dsn[4]);
11666 return 0;
11667 }
11668
11669 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
11670 {
11671 static int version_printed;
11672 struct net_device *dev;
11673 struct bnxt *bp;
11674 int rc, max_irqs;
11675
11676 if (pci_is_bridge(pdev))
11677 return -ENODEV;
11678
11679 if (version_printed++ == 0)
11680 pr_info("%s", version);
11681
11682 max_irqs = bnxt_get_max_irq(pdev);
11683 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
11684 if (!dev)
11685 return -ENOMEM;
11686
11687 bp = netdev_priv(dev);
11688 bnxt_set_max_func_irqs(bp, max_irqs);
11689
11690 if (bnxt_vf_pciid(ent->driver_data))
11691 bp->flags |= BNXT_FLAG_VF;
11692
11693 if (pdev->msix_cap)
11694 bp->flags |= BNXT_FLAG_MSIX_CAP;
11695
11696 rc = bnxt_init_board(pdev, dev);
11697 if (rc < 0)
11698 goto init_err_free;
11699
11700 dev->netdev_ops = &bnxt_netdev_ops;
11701 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
11702 dev->ethtool_ops = &bnxt_ethtool_ops;
11703 pci_set_drvdata(pdev, dev);
11704
11705 rc = bnxt_alloc_hwrm_resources(bp);
11706 if (rc)
11707 goto init_err_pci_clean;
11708
11709 mutex_init(&bp->hwrm_cmd_lock);
11710 mutex_init(&bp->link_lock);
11711
11712 rc = bnxt_fw_init_one_p1(bp);
11713 if (rc)
11714 goto init_err_pci_clean;
11715
11716 if (BNXT_CHIP_P5(bp))
11717 bp->flags |= BNXT_FLAG_CHIP_P5;
11718
11719 rc = bnxt_fw_init_one_p2(bp);
11720 if (rc)
11721 goto init_err_pci_clean;
11722
11723 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
11724 NETIF_F_TSO | NETIF_F_TSO6 |
11725 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
11726 NETIF_F_GSO_IPXIP4 |
11727 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
11728 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
11729 NETIF_F_RXCSUM | NETIF_F_GRO;
11730
11731 if (BNXT_SUPPORTS_TPA(bp))
11732 dev->hw_features |= NETIF_F_LRO;
11733
11734 dev->hw_enc_features =
11735 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
11736 NETIF_F_TSO | NETIF_F_TSO6 |
11737 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
11738 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
11739 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
11740 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
11741 NETIF_F_GSO_GRE_CSUM;
11742 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
11743 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
11744 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
11745 if (BNXT_SUPPORTS_TPA(bp))
11746 dev->hw_features |= NETIF_F_GRO_HW;
11747 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
11748 if (dev->features & NETIF_F_GRO_HW)
11749 dev->features &= ~NETIF_F_LRO;
11750 dev->priv_flags |= IFF_UNICAST_FLT;
11751
11752 #ifdef CONFIG_BNXT_SRIOV
11753 init_waitqueue_head(&bp->sriov_cfg_wait);
11754 mutex_init(&bp->sriov_lock);
11755 #endif
11756 if (BNXT_SUPPORTS_TPA(bp)) {
11757 bp->gro_func = bnxt_gro_func_5730x;
11758 if (BNXT_CHIP_P4(bp))
11759 bp->gro_func = bnxt_gro_func_5731x;
11760 else if (BNXT_CHIP_P5(bp))
11761 bp->gro_func = bnxt_gro_func_5750x;
11762 }
11763 if (!BNXT_CHIP_P4_PLUS(bp))
11764 bp->flags |= BNXT_FLAG_DOUBLE_DB;
11765
11766 bp->ulp_probe = bnxt_ulp_probe;
11767
11768 rc = bnxt_init_mac_addr(bp);
11769 if (rc) {
11770 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
11771 rc = -EADDRNOTAVAIL;
11772 goto init_err_pci_clean;
11773 }
11774
11775 if (BNXT_PF(bp)) {
11776 /* Read the adapter's DSN to use as the eswitch switch_id */
11777 rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
11778 if (rc)
11779 goto init_err_pci_clean;
11780 }
11781
11782 /* MTU range: 60 - FW defined max */
11783 dev->min_mtu = ETH_ZLEN;
11784 dev->max_mtu = bp->max_mtu;
11785
11786 rc = bnxt_probe_phy(bp, true);
11787 if (rc)
11788 goto init_err_pci_clean;
11789
11790 bnxt_set_rx_skb_mode(bp, false);
11791 bnxt_set_tpa_flags(bp);
11792 bnxt_set_ring_params(bp);
11793 rc = bnxt_set_dflt_rings(bp, true);
11794 if (rc) {
11795 netdev_err(bp->dev, "Not enough rings available.\n");
11796 rc = -ENOMEM;
11797 goto init_err_pci_clean;
11798 }
11799
11800 bnxt_fw_init_one_p3(bp);
11801
11802 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
11803 bp->flags |= BNXT_FLAG_STRIP_VLAN;
11804
11805 rc = bnxt_init_int_mode(bp);
11806 if (rc)
11807 goto init_err_pci_clean;
11808
11809 /* No TC has been set yet and rings may have been trimmed due to
11810 * limited MSIX, so we re-initialize the TX rings per TC.
11811 */
11812 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11813
11814 if (BNXT_PF(bp)) {
11815 if (!bnxt_pf_wq) {
11816 bnxt_pf_wq =
11817 create_singlethread_workqueue("bnxt_pf_wq");
11818 if (!bnxt_pf_wq) {
11819 dev_err(&pdev->dev, "Unable to create workqueue.\n");
11820 goto init_err_pci_clean;
11821 }
11822 }
11823 bnxt_init_tc(bp);
11824 }
11825
11826 rc = register_netdev(dev);
11827 if (rc)
11828 goto init_err_cleanup_tc;
11829
11830 if (BNXT_PF(bp))
11831 bnxt_dl_register(bp);
11832
11833 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
11834 board_info[ent->driver_data].name,
11835 (long)pci_resource_start(pdev, 0), dev->dev_addr);
11836 pcie_print_link_status(pdev);
11837
11838 return 0;
11839
11840 init_err_cleanup_tc:
11841 bnxt_shutdown_tc(bp);
11842 bnxt_clear_int_mode(bp);
11843
11844 init_err_pci_clean:
11845 bnxt_free_hwrm_short_cmd_req(bp);
11846 bnxt_free_hwrm_resources(bp);
11847 bnxt_free_ctx_mem(bp);
11848 kfree(bp->ctx);
11849 bp->ctx = NULL;
11850 kfree(bp->fw_health);
11851 bp->fw_health = NULL;
11852 bnxt_cleanup_pci(bp);
11853
11854 init_err_free:
11855 free_netdev(dev);
11856 return rc;
11857 }
11858
11859 static void bnxt_shutdown(struct pci_dev *pdev)
11860 {
11861 struct net_device *dev = pci_get_drvdata(pdev);
11862 struct bnxt *bp;
11863
11864 if (!dev)
11865 return;
11866
11867 rtnl_lock();
11868 bp = netdev_priv(dev);
11869 if (!bp)
11870 goto shutdown_exit;
11871
11872 if (netif_running(dev))
11873 dev_close(dev);
11874
11875 bnxt_ulp_shutdown(bp);
11876
11877 if (system_state == SYSTEM_POWER_OFF) {
11878 bnxt_clear_int_mode(bp);
11879 pci_disable_device(pdev);
11880 pci_wake_from_d3(pdev, bp->wol);
11881 pci_set_power_state(pdev, PCI_D3hot);
11882 }
11883
11884 shutdown_exit:
11885 rtnl_unlock();
11886 }
11887
11888 #ifdef CONFIG_PM_SLEEP
11889 static int bnxt_suspend(struct device *device)
11890 {
11891 struct net_device *dev = dev_get_drvdata(device);
11892 struct bnxt *bp = netdev_priv(dev);
11893 int rc = 0;
11894
11895 rtnl_lock();
11896 if (netif_running(dev)) {
11897 netif_device_detach(dev);
11898 rc = bnxt_close(dev);
11899 }
11900 bnxt_hwrm_func_drv_unrgtr(bp);
11901 rtnl_unlock();
11902 return rc;
11903 }
11904
11905 static int bnxt_resume(struct device *device)
11906 {
11907 struct net_device *dev = dev_get_drvdata(device);
11908 struct bnxt *bp = netdev_priv(dev);
11909 int rc = 0;
11910
11911 rtnl_lock();
11912 if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
11913 rc = -ENODEV;
11914 goto resume_exit;
11915 }
11916 rc = bnxt_hwrm_func_reset(bp);
11917 if (rc) {
11918 rc = -EBUSY;
11919 goto resume_exit;
11920 }
11921 bnxt_get_wol_settings(bp);
11922 if (netif_running(dev)) {
11923 rc = bnxt_open(dev);
11924 if (!rc)
11925 netif_device_attach(dev);
11926 }
11927
11928 resume_exit:
11929 rtnl_unlock();
11930 return rc;
11931 }
11932
11933 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
11934 #define BNXT_PM_OPS (&bnxt_pm_ops)
11935
11936 #else
11937
11938 #define BNXT_PM_OPS NULL
11939
11940 #endif /* CONFIG_PM_SLEEP */
11941
11942 /**
11943 * bnxt_io_error_detected - called when PCI error is detected
11944 * @pdev: Pointer to PCI device
11945 * @state: The current pci connection state
11946 *
11947 * This function is called after a PCI bus error affecting
11948 * this device has been detected.
11949 */
11950 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
11951 pci_channel_state_t state)
11952 {
11953 struct net_device *netdev = pci_get_drvdata(pdev);
11954 struct bnxt *bp = netdev_priv(netdev);
11955
11956 netdev_info(netdev, "PCI I/O error detected\n");
11957
11958 rtnl_lock();
11959 netif_device_detach(netdev);
11960
11961 bnxt_ulp_stop(bp);
11962
11963 if (state == pci_channel_io_perm_failure) {
11964 rtnl_unlock();
11965 return PCI_ERS_RESULT_DISCONNECT;
11966 }
11967
11968 if (netif_running(netdev))
11969 bnxt_close(netdev);
11970
11971 pci_disable_device(pdev);
11972 rtnl_unlock();
11973
11974 /* Request a slot slot reset. */
11975 return PCI_ERS_RESULT_NEED_RESET;
11976 }
11977
11978 /**
11979 * bnxt_io_slot_reset - called after the pci bus has been reset.
11980 * @pdev: Pointer to PCI device
11981 *
11982 * Restart the card from scratch, as if from a cold-boot.
11983 * At this point, the card has exprienced a hard reset,
11984 * followed by fixups by BIOS, and has its config space
11985 * set up identically to what it was at cold boot.
11986 */
11987 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
11988 {
11989 struct net_device *netdev = pci_get_drvdata(pdev);
11990 struct bnxt *bp = netdev_priv(netdev);
11991 int err = 0;
11992 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
11993
11994 netdev_info(bp->dev, "PCI Slot Reset\n");
11995
11996 rtnl_lock();
11997
11998 if (pci_enable_device(pdev)) {
11999 dev_err(&pdev->dev,
12000 "Cannot re-enable PCI device after reset.\n");
12001 } else {
12002 pci_set_master(pdev);
12003
12004 err = bnxt_hwrm_func_reset(bp);
12005 if (!err && netif_running(netdev))
12006 err = bnxt_open(netdev);
12007
12008 if (!err) {
12009 result = PCI_ERS_RESULT_RECOVERED;
12010 bnxt_ulp_start(bp);
12011 }
12012 }
12013
12014 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
12015 dev_close(netdev);
12016
12017 rtnl_unlock();
12018
12019 return PCI_ERS_RESULT_RECOVERED;
12020 }
12021
12022 /**
12023 * bnxt_io_resume - called when traffic can start flowing again.
12024 * @pdev: Pointer to PCI device
12025 *
12026 * This callback is called when the error recovery driver tells
12027 * us that its OK to resume normal operation.
12028 */
12029 static void bnxt_io_resume(struct pci_dev *pdev)
12030 {
12031 struct net_device *netdev = pci_get_drvdata(pdev);
12032
12033 rtnl_lock();
12034
12035 netif_device_attach(netdev);
12036
12037 rtnl_unlock();
12038 }
12039
12040 static const struct pci_error_handlers bnxt_err_handler = {
12041 .error_detected = bnxt_io_error_detected,
12042 .slot_reset = bnxt_io_slot_reset,
12043 .resume = bnxt_io_resume
12044 };
12045
12046 static struct pci_driver bnxt_pci_driver = {
12047 .name = DRV_MODULE_NAME,
12048 .id_table = bnxt_pci_tbl,
12049 .probe = bnxt_init_one,
12050 .remove = bnxt_remove_one,
12051 .shutdown = bnxt_shutdown,
12052 .driver.pm = BNXT_PM_OPS,
12053 .err_handler = &bnxt_err_handler,
12054 #if defined(CONFIG_BNXT_SRIOV)
12055 .sriov_configure = bnxt_sriov_configure,
12056 #endif
12057 };
12058
12059 static int __init bnxt_init(void)
12060 {
12061 bnxt_debug_init();
12062 return pci_register_driver(&bnxt_pci_driver);
12063 }
12064
12065 static void __exit bnxt_exit(void)
12066 {
12067 pci_unregister_driver(&bnxt_pci_driver);
12068 if (bnxt_pf_wq)
12069 destroy_workqueue(bnxt_pf_wq);
12070 bnxt_debug_exit();
12071 }
12072
12073 module_init(bnxt_init);
12074 module_exit(bnxt_exit);