]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/ethernet/broadcom/bnxt/bnxt.c
bnxt_en: Refactor the HWRM_VER_GET firmware calls
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2019 Broadcom Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/ip.h>
41 #include <net/tcp.h>
42 #include <net/udp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <net/udp_tunnel.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/ptp_clock_kernel.h>
53 #include <linux/timecounter.h>
54 #include <linux/cpu_rmap.h>
55 #include <linux/cpumask.h>
56 #include <net/pkt_cls.h>
57 #include <linux/hwmon.h>
58 #include <linux/hwmon-sysfs.h>
59 #include <net/page_pool.h>
60
61 #include "bnxt_hsi.h"
62 #include "bnxt.h"
63 #include "bnxt_ulp.h"
64 #include "bnxt_sriov.h"
65 #include "bnxt_ethtool.h"
66 #include "bnxt_dcb.h"
67 #include "bnxt_xdp.h"
68 #include "bnxt_ptp.h"
69 #include "bnxt_vfr.h"
70 #include "bnxt_tc.h"
71 #include "bnxt_devlink.h"
72 #include "bnxt_debugfs.h"
73
74 #define BNXT_TX_TIMEOUT (5 * HZ)
75 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
76 NETIF_MSG_TX_ERR)
77
78 MODULE_LICENSE("GPL");
79 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
80
81 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
82 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
83 #define BNXT_RX_COPY_THRESH 256
84
85 #define BNXT_TX_PUSH_THRESH 164
86
87 enum board_idx {
88 BCM57301,
89 BCM57302,
90 BCM57304,
91 BCM57417_NPAR,
92 BCM58700,
93 BCM57311,
94 BCM57312,
95 BCM57402,
96 BCM57404,
97 BCM57406,
98 BCM57402_NPAR,
99 BCM57407,
100 BCM57412,
101 BCM57414,
102 BCM57416,
103 BCM57417,
104 BCM57412_NPAR,
105 BCM57314,
106 BCM57417_SFP,
107 BCM57416_SFP,
108 BCM57404_NPAR,
109 BCM57406_NPAR,
110 BCM57407_SFP,
111 BCM57407_NPAR,
112 BCM57414_NPAR,
113 BCM57416_NPAR,
114 BCM57452,
115 BCM57454,
116 BCM5745x_NPAR,
117 BCM57508,
118 BCM57504,
119 BCM57502,
120 BCM57508_NPAR,
121 BCM57504_NPAR,
122 BCM57502_NPAR,
123 BCM58802,
124 BCM58804,
125 BCM58808,
126 NETXTREME_E_VF,
127 NETXTREME_C_VF,
128 NETXTREME_S_VF,
129 NETXTREME_C_VF_HV,
130 NETXTREME_E_VF_HV,
131 NETXTREME_E_P5_VF,
132 NETXTREME_E_P5_VF_HV,
133 };
134
135 /* indexed by enum above */
136 static const struct {
137 char *name;
138 } board_info[] = {
139 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
140 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
141 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
142 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
143 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
144 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
145 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
146 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
147 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
148 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
149 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
150 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
151 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
152 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
153 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
154 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
155 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
156 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
157 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
158 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
159 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
160 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
161 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
162 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
163 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
164 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
165 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
166 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
167 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
168 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
169 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
170 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
171 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
172 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
173 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
174 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
175 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
176 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
177 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
178 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
179 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
180 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
181 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
182 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
183 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
184 };
185
186 static const struct pci_device_id bnxt_pci_tbl[] = {
187 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
188 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
189 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
190 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
191 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
192 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
193 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
194 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
195 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
196 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
197 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
198 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
199 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
200 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
201 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
202 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
203 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
204 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
205 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
206 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
207 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
208 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
209 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
210 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
211 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
212 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
213 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
214 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
215 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
216 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
217 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
218 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
219 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
220 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
221 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
222 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
223 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
224 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
225 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
226 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
227 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
228 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
229 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
230 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
231 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
232 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
233 #ifdef CONFIG_BNXT_SRIOV
234 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
235 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
236 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
237 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
238 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
239 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
240 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
241 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
242 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
243 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
244 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
245 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
246 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
247 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
248 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
249 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
250 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
251 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
252 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
253 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
254 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
255 #endif
256 { 0 }
257 };
258
259 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
260
261 static const u16 bnxt_vf_req_snif[] = {
262 HWRM_FUNC_CFG,
263 HWRM_FUNC_VF_CFG,
264 HWRM_PORT_PHY_QCFG,
265 HWRM_CFA_L2_FILTER_ALLOC,
266 };
267
268 static const u16 bnxt_async_events_arr[] = {
269 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
270 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
271 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
272 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
273 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
274 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
275 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
276 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
277 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
278 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
279 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
280 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
281 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
282 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
283 };
284
285 static struct workqueue_struct *bnxt_pf_wq;
286
287 static bool bnxt_vf_pciid(enum board_idx idx)
288 {
289 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
290 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
291 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
292 idx == NETXTREME_E_P5_VF_HV);
293 }
294
295 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
296 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
297 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
298
299 #define BNXT_CP_DB_IRQ_DIS(db) \
300 writel(DB_CP_IRQ_DIS_FLAGS, db)
301
302 #define BNXT_DB_CQ(db, idx) \
303 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
304
305 #define BNXT_DB_NQ_P5(db, idx) \
306 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
307
308 #define BNXT_DB_CQ_ARM(db, idx) \
309 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
310
311 #define BNXT_DB_NQ_ARM_P5(db, idx) \
312 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
313
314 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
315 {
316 if (bp->flags & BNXT_FLAG_CHIP_P5)
317 BNXT_DB_NQ_P5(db, idx);
318 else
319 BNXT_DB_CQ(db, idx);
320 }
321
322 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
323 {
324 if (bp->flags & BNXT_FLAG_CHIP_P5)
325 BNXT_DB_NQ_ARM_P5(db, idx);
326 else
327 BNXT_DB_CQ_ARM(db, idx);
328 }
329
330 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
331 {
332 if (bp->flags & BNXT_FLAG_CHIP_P5)
333 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
334 db->doorbell);
335 else
336 BNXT_DB_CQ(db, idx);
337 }
338
339 const u16 bnxt_lhint_arr[] = {
340 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
341 TX_BD_FLAGS_LHINT_512_TO_1023,
342 TX_BD_FLAGS_LHINT_1024_TO_2047,
343 TX_BD_FLAGS_LHINT_1024_TO_2047,
344 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
345 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
346 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
347 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
348 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
349 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
350 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
351 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
352 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
353 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
354 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
355 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
356 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
357 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
358 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
359 };
360
361 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
362 {
363 struct metadata_dst *md_dst = skb_metadata_dst(skb);
364
365 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
366 return 0;
367
368 return md_dst->u.port_info.port_id;
369 }
370
371 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
372 u16 prod)
373 {
374 bnxt_db_write(bp, &txr->tx_db, prod);
375 txr->kick_pending = 0;
376 }
377
378 static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
379 struct bnxt_tx_ring_info *txr,
380 struct netdev_queue *txq)
381 {
382 netif_tx_stop_queue(txq);
383
384 /* netif_tx_stop_queue() must be done before checking
385 * tx index in bnxt_tx_avail() below, because in
386 * bnxt_tx_int(), we update tx index before checking for
387 * netif_tx_queue_stopped().
388 */
389 smp_mb();
390 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
391 netif_tx_wake_queue(txq);
392 return false;
393 }
394
395 return true;
396 }
397
398 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
399 {
400 struct bnxt *bp = netdev_priv(dev);
401 struct tx_bd *txbd;
402 struct tx_bd_ext *txbd1;
403 struct netdev_queue *txq;
404 int i;
405 dma_addr_t mapping;
406 unsigned int length, pad = 0;
407 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
408 u16 prod, last_frag;
409 struct pci_dev *pdev = bp->pdev;
410 struct bnxt_tx_ring_info *txr;
411 struct bnxt_sw_tx_bd *tx_buf;
412 __le32 lflags = 0;
413
414 i = skb_get_queue_mapping(skb);
415 if (unlikely(i >= bp->tx_nr_rings)) {
416 dev_kfree_skb_any(skb);
417 atomic_long_inc(&dev->tx_dropped);
418 return NETDEV_TX_OK;
419 }
420
421 txq = netdev_get_tx_queue(dev, i);
422 txr = &bp->tx_ring[bp->tx_ring_map[i]];
423 prod = txr->tx_prod;
424
425 free_size = bnxt_tx_avail(bp, txr);
426 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
427 /* We must have raced with NAPI cleanup */
428 if (net_ratelimit() && txr->kick_pending)
429 netif_warn(bp, tx_err, dev,
430 "bnxt: ring busy w/ flush pending!\n");
431 if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
432 return NETDEV_TX_BUSY;
433 }
434
435 length = skb->len;
436 len = skb_headlen(skb);
437 last_frag = skb_shinfo(skb)->nr_frags;
438
439 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
440
441 txbd->tx_bd_opaque = prod;
442
443 tx_buf = &txr->tx_buf_ring[prod];
444 tx_buf->skb = skb;
445 tx_buf->nr_frags = last_frag;
446
447 vlan_tag_flags = 0;
448 cfa_action = bnxt_xmit_get_cfa_action(skb);
449 if (skb_vlan_tag_present(skb)) {
450 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
451 skb_vlan_tag_get(skb);
452 /* Currently supports 8021Q, 8021AD vlan offloads
453 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
454 */
455 if (skb->vlan_proto == htons(ETH_P_8021Q))
456 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
457 }
458
459 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
460 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
461
462 if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
463 atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
464 if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
465 &ptp->tx_hdr_off)) {
466 if (vlan_tag_flags)
467 ptp->tx_hdr_off += VLAN_HLEN;
468 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
469 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
470 } else {
471 atomic_inc(&bp->ptp_cfg->tx_avail);
472 }
473 }
474 }
475
476 if (unlikely(skb->no_fcs))
477 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
478
479 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
480 !lflags) {
481 struct tx_push_buffer *tx_push_buf = txr->tx_push;
482 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
483 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
484 void __iomem *db = txr->tx_db.doorbell;
485 void *pdata = tx_push_buf->data;
486 u64 *end;
487 int j, push_len;
488
489 /* Set COAL_NOW to be ready quickly for the next push */
490 tx_push->tx_bd_len_flags_type =
491 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
492 TX_BD_TYPE_LONG_TX_BD |
493 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
494 TX_BD_FLAGS_COAL_NOW |
495 TX_BD_FLAGS_PACKET_END |
496 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
497
498 if (skb->ip_summed == CHECKSUM_PARTIAL)
499 tx_push1->tx_bd_hsize_lflags =
500 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
501 else
502 tx_push1->tx_bd_hsize_lflags = 0;
503
504 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
505 tx_push1->tx_bd_cfa_action =
506 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
507
508 end = pdata + length;
509 end = PTR_ALIGN(end, 8) - 1;
510 *end = 0;
511
512 skb_copy_from_linear_data(skb, pdata, len);
513 pdata += len;
514 for (j = 0; j < last_frag; j++) {
515 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
516 void *fptr;
517
518 fptr = skb_frag_address_safe(frag);
519 if (!fptr)
520 goto normal_tx;
521
522 memcpy(pdata, fptr, skb_frag_size(frag));
523 pdata += skb_frag_size(frag);
524 }
525
526 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
527 txbd->tx_bd_haddr = txr->data_mapping;
528 prod = NEXT_TX(prod);
529 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
530 memcpy(txbd, tx_push1, sizeof(*txbd));
531 prod = NEXT_TX(prod);
532 tx_push->doorbell =
533 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
534 txr->tx_prod = prod;
535
536 tx_buf->is_push = 1;
537 netdev_tx_sent_queue(txq, skb->len);
538 wmb(); /* Sync is_push and byte queue before pushing data */
539
540 push_len = (length + sizeof(*tx_push) + 7) / 8;
541 if (push_len > 16) {
542 __iowrite64_copy(db, tx_push_buf, 16);
543 __iowrite32_copy(db + 4, tx_push_buf + 1,
544 (push_len - 16) << 1);
545 } else {
546 __iowrite64_copy(db, tx_push_buf, push_len);
547 }
548
549 goto tx_done;
550 }
551
552 normal_tx:
553 if (length < BNXT_MIN_PKT_SIZE) {
554 pad = BNXT_MIN_PKT_SIZE - length;
555 if (skb_pad(skb, pad))
556 /* SKB already freed. */
557 goto tx_kick_pending;
558 length = BNXT_MIN_PKT_SIZE;
559 }
560
561 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
562
563 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
564 goto tx_free;
565
566 dma_unmap_addr_set(tx_buf, mapping, mapping);
567 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
568 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
569
570 txbd->tx_bd_haddr = cpu_to_le64(mapping);
571
572 prod = NEXT_TX(prod);
573 txbd1 = (struct tx_bd_ext *)
574 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
575
576 txbd1->tx_bd_hsize_lflags = lflags;
577 if (skb_is_gso(skb)) {
578 u32 hdr_len;
579
580 if (skb->encapsulation)
581 hdr_len = skb_inner_network_offset(skb) +
582 skb_inner_network_header_len(skb) +
583 inner_tcp_hdrlen(skb);
584 else
585 hdr_len = skb_transport_offset(skb) +
586 tcp_hdrlen(skb);
587
588 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
589 TX_BD_FLAGS_T_IPID |
590 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
591 length = skb_shinfo(skb)->gso_size;
592 txbd1->tx_bd_mss = cpu_to_le32(length);
593 length += hdr_len;
594 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
595 txbd1->tx_bd_hsize_lflags |=
596 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
597 txbd1->tx_bd_mss = 0;
598 }
599
600 length >>= 9;
601 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
602 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
603 skb->len);
604 i = 0;
605 goto tx_dma_error;
606 }
607 flags |= bnxt_lhint_arr[length];
608 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
609
610 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
611 txbd1->tx_bd_cfa_action =
612 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
613 for (i = 0; i < last_frag; i++) {
614 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
615
616 prod = NEXT_TX(prod);
617 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
618
619 len = skb_frag_size(frag);
620 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
621 DMA_TO_DEVICE);
622
623 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
624 goto tx_dma_error;
625
626 tx_buf = &txr->tx_buf_ring[prod];
627 dma_unmap_addr_set(tx_buf, mapping, mapping);
628
629 txbd->tx_bd_haddr = cpu_to_le64(mapping);
630
631 flags = len << TX_BD_LEN_SHIFT;
632 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
633 }
634
635 flags &= ~TX_BD_LEN;
636 txbd->tx_bd_len_flags_type =
637 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
638 TX_BD_FLAGS_PACKET_END);
639
640 netdev_tx_sent_queue(txq, skb->len);
641
642 skb_tx_timestamp(skb);
643
644 /* Sync BD data before updating doorbell */
645 wmb();
646
647 prod = NEXT_TX(prod);
648 txr->tx_prod = prod;
649
650 if (!netdev_xmit_more() || netif_xmit_stopped(txq))
651 bnxt_txr_db_kick(bp, txr, prod);
652 else
653 txr->kick_pending = 1;
654
655 tx_done:
656
657 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
658 if (netdev_xmit_more() && !tx_buf->is_push)
659 bnxt_txr_db_kick(bp, txr, prod);
660
661 bnxt_txr_netif_try_stop_queue(bp, txr, txq);
662 }
663 return NETDEV_TX_OK;
664
665 tx_dma_error:
666 if (BNXT_TX_PTP_IS_SET(lflags))
667 atomic_inc(&bp->ptp_cfg->tx_avail);
668
669 last_frag = i;
670
671 /* start back at beginning and unmap skb */
672 prod = txr->tx_prod;
673 tx_buf = &txr->tx_buf_ring[prod];
674 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
675 skb_headlen(skb), DMA_TO_DEVICE);
676 prod = NEXT_TX(prod);
677
678 /* unmap remaining mapped pages */
679 for (i = 0; i < last_frag; i++) {
680 prod = NEXT_TX(prod);
681 tx_buf = &txr->tx_buf_ring[prod];
682 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
683 skb_frag_size(&skb_shinfo(skb)->frags[i]),
684 DMA_TO_DEVICE);
685 }
686
687 tx_free:
688 dev_kfree_skb_any(skb);
689 tx_kick_pending:
690 if (txr->kick_pending)
691 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
692 txr->tx_buf_ring[txr->tx_prod].skb = NULL;
693 atomic_long_inc(&dev->tx_dropped);
694 return NETDEV_TX_OK;
695 }
696
697 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
698 {
699 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
700 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
701 u16 cons = txr->tx_cons;
702 struct pci_dev *pdev = bp->pdev;
703 int i;
704 unsigned int tx_bytes = 0;
705
706 for (i = 0; i < nr_pkts; i++) {
707 struct bnxt_sw_tx_bd *tx_buf;
708 bool compl_deferred = false;
709 struct sk_buff *skb;
710 int j, last;
711
712 tx_buf = &txr->tx_buf_ring[cons];
713 cons = NEXT_TX(cons);
714 skb = tx_buf->skb;
715 tx_buf->skb = NULL;
716
717 if (tx_buf->is_push) {
718 tx_buf->is_push = 0;
719 goto next_tx_int;
720 }
721
722 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
723 skb_headlen(skb), DMA_TO_DEVICE);
724 last = tx_buf->nr_frags;
725
726 for (j = 0; j < last; j++) {
727 cons = NEXT_TX(cons);
728 tx_buf = &txr->tx_buf_ring[cons];
729 dma_unmap_page(
730 &pdev->dev,
731 dma_unmap_addr(tx_buf, mapping),
732 skb_frag_size(&skb_shinfo(skb)->frags[j]),
733 DMA_TO_DEVICE);
734 }
735 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
736 if (bp->flags & BNXT_FLAG_CHIP_P5) {
737 if (!bnxt_get_tx_ts_p5(bp, skb))
738 compl_deferred = true;
739 else
740 atomic_inc(&bp->ptp_cfg->tx_avail);
741 }
742 }
743
744 next_tx_int:
745 cons = NEXT_TX(cons);
746
747 tx_bytes += skb->len;
748 if (!compl_deferred)
749 dev_kfree_skb_any(skb);
750 }
751
752 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
753 txr->tx_cons = cons;
754
755 /* Need to make the tx_cons update visible to bnxt_start_xmit()
756 * before checking for netif_tx_queue_stopped(). Without the
757 * memory barrier, there is a small possibility that bnxt_start_xmit()
758 * will miss it and cause the queue to be stopped forever.
759 */
760 smp_mb();
761
762 if (unlikely(netif_tx_queue_stopped(txq)) &&
763 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
764 READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
765 netif_tx_wake_queue(txq);
766 }
767
768 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
769 struct bnxt_rx_ring_info *rxr,
770 gfp_t gfp)
771 {
772 struct device *dev = &bp->pdev->dev;
773 struct page *page;
774
775 page = page_pool_dev_alloc_pages(rxr->page_pool);
776 if (!page)
777 return NULL;
778
779 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
780 DMA_ATTR_WEAK_ORDERING);
781 if (dma_mapping_error(dev, *mapping)) {
782 page_pool_recycle_direct(rxr->page_pool, page);
783 return NULL;
784 }
785 *mapping += bp->rx_dma_offset;
786 return page;
787 }
788
789 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
790 gfp_t gfp)
791 {
792 u8 *data;
793 struct pci_dev *pdev = bp->pdev;
794
795 data = kmalloc(bp->rx_buf_size, gfp);
796 if (!data)
797 return NULL;
798
799 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
800 bp->rx_buf_use_size, bp->rx_dir,
801 DMA_ATTR_WEAK_ORDERING);
802
803 if (dma_mapping_error(&pdev->dev, *mapping)) {
804 kfree(data);
805 data = NULL;
806 }
807 return data;
808 }
809
810 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
811 u16 prod, gfp_t gfp)
812 {
813 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
814 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
815 dma_addr_t mapping;
816
817 if (BNXT_RX_PAGE_MODE(bp)) {
818 struct page *page =
819 __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
820
821 if (!page)
822 return -ENOMEM;
823
824 rx_buf->data = page;
825 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
826 } else {
827 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
828
829 if (!data)
830 return -ENOMEM;
831
832 rx_buf->data = data;
833 rx_buf->data_ptr = data + bp->rx_offset;
834 }
835 rx_buf->mapping = mapping;
836
837 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
838 return 0;
839 }
840
841 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
842 {
843 u16 prod = rxr->rx_prod;
844 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
845 struct rx_bd *cons_bd, *prod_bd;
846
847 prod_rx_buf = &rxr->rx_buf_ring[prod];
848 cons_rx_buf = &rxr->rx_buf_ring[cons];
849
850 prod_rx_buf->data = data;
851 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
852
853 prod_rx_buf->mapping = cons_rx_buf->mapping;
854
855 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
856 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
857
858 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
859 }
860
861 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
862 {
863 u16 next, max = rxr->rx_agg_bmap_size;
864
865 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
866 if (next >= max)
867 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
868 return next;
869 }
870
871 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
872 struct bnxt_rx_ring_info *rxr,
873 u16 prod, gfp_t gfp)
874 {
875 struct rx_bd *rxbd =
876 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
877 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
878 struct pci_dev *pdev = bp->pdev;
879 struct page *page;
880 dma_addr_t mapping;
881 u16 sw_prod = rxr->rx_sw_agg_prod;
882 unsigned int offset = 0;
883
884 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
885 page = rxr->rx_page;
886 if (!page) {
887 page = alloc_page(gfp);
888 if (!page)
889 return -ENOMEM;
890 rxr->rx_page = page;
891 rxr->rx_page_offset = 0;
892 }
893 offset = rxr->rx_page_offset;
894 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
895 if (rxr->rx_page_offset == PAGE_SIZE)
896 rxr->rx_page = NULL;
897 else
898 get_page(page);
899 } else {
900 page = alloc_page(gfp);
901 if (!page)
902 return -ENOMEM;
903 }
904
905 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
906 BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
907 DMA_ATTR_WEAK_ORDERING);
908 if (dma_mapping_error(&pdev->dev, mapping)) {
909 __free_page(page);
910 return -EIO;
911 }
912
913 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
914 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
915
916 __set_bit(sw_prod, rxr->rx_agg_bmap);
917 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
918 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
919
920 rx_agg_buf->page = page;
921 rx_agg_buf->offset = offset;
922 rx_agg_buf->mapping = mapping;
923 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
924 rxbd->rx_bd_opaque = sw_prod;
925 return 0;
926 }
927
928 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
929 struct bnxt_cp_ring_info *cpr,
930 u16 cp_cons, u16 curr)
931 {
932 struct rx_agg_cmp *agg;
933
934 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
935 agg = (struct rx_agg_cmp *)
936 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
937 return agg;
938 }
939
940 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
941 struct bnxt_rx_ring_info *rxr,
942 u16 agg_id, u16 curr)
943 {
944 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
945
946 return &tpa_info->agg_arr[curr];
947 }
948
949 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
950 u16 start, u32 agg_bufs, bool tpa)
951 {
952 struct bnxt_napi *bnapi = cpr->bnapi;
953 struct bnxt *bp = bnapi->bp;
954 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
955 u16 prod = rxr->rx_agg_prod;
956 u16 sw_prod = rxr->rx_sw_agg_prod;
957 bool p5_tpa = false;
958 u32 i;
959
960 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
961 p5_tpa = true;
962
963 for (i = 0; i < agg_bufs; i++) {
964 u16 cons;
965 struct rx_agg_cmp *agg;
966 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
967 struct rx_bd *prod_bd;
968 struct page *page;
969
970 if (p5_tpa)
971 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
972 else
973 agg = bnxt_get_agg(bp, cpr, idx, start + i);
974 cons = agg->rx_agg_cmp_opaque;
975 __clear_bit(cons, rxr->rx_agg_bmap);
976
977 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
978 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
979
980 __set_bit(sw_prod, rxr->rx_agg_bmap);
981 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
982 cons_rx_buf = &rxr->rx_agg_ring[cons];
983
984 /* It is possible for sw_prod to be equal to cons, so
985 * set cons_rx_buf->page to NULL first.
986 */
987 page = cons_rx_buf->page;
988 cons_rx_buf->page = NULL;
989 prod_rx_buf->page = page;
990 prod_rx_buf->offset = cons_rx_buf->offset;
991
992 prod_rx_buf->mapping = cons_rx_buf->mapping;
993
994 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
995
996 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
997 prod_bd->rx_bd_opaque = sw_prod;
998
999 prod = NEXT_RX_AGG(prod);
1000 sw_prod = NEXT_RX_AGG(sw_prod);
1001 }
1002 rxr->rx_agg_prod = prod;
1003 rxr->rx_sw_agg_prod = sw_prod;
1004 }
1005
1006 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1007 struct bnxt_rx_ring_info *rxr,
1008 u16 cons, void *data, u8 *data_ptr,
1009 dma_addr_t dma_addr,
1010 unsigned int offset_and_len)
1011 {
1012 unsigned int payload = offset_and_len >> 16;
1013 unsigned int len = offset_and_len & 0xffff;
1014 skb_frag_t *frag;
1015 struct page *page = data;
1016 u16 prod = rxr->rx_prod;
1017 struct sk_buff *skb;
1018 int off, err;
1019
1020 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1021 if (unlikely(err)) {
1022 bnxt_reuse_rx_data(rxr, cons, data);
1023 return NULL;
1024 }
1025 dma_addr -= bp->rx_dma_offset;
1026 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
1027 DMA_ATTR_WEAK_ORDERING);
1028 page_pool_release_page(rxr->page_pool, page);
1029
1030 if (unlikely(!payload))
1031 payload = eth_get_headlen(bp->dev, data_ptr, len);
1032
1033 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1034 if (!skb) {
1035 __free_page(page);
1036 return NULL;
1037 }
1038
1039 off = (void *)data_ptr - page_address(page);
1040 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
1041 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1042 payload + NET_IP_ALIGN);
1043
1044 frag = &skb_shinfo(skb)->frags[0];
1045 skb_frag_size_sub(frag, payload);
1046 skb_frag_off_add(frag, payload);
1047 skb->data_len -= payload;
1048 skb->tail += payload;
1049
1050 return skb;
1051 }
1052
1053 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1054 struct bnxt_rx_ring_info *rxr, u16 cons,
1055 void *data, u8 *data_ptr,
1056 dma_addr_t dma_addr,
1057 unsigned int offset_and_len)
1058 {
1059 u16 prod = rxr->rx_prod;
1060 struct sk_buff *skb;
1061 int err;
1062
1063 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1064 if (unlikely(err)) {
1065 bnxt_reuse_rx_data(rxr, cons, data);
1066 return NULL;
1067 }
1068
1069 skb = build_skb(data, 0);
1070 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1071 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1072 if (!skb) {
1073 kfree(data);
1074 return NULL;
1075 }
1076
1077 skb_reserve(skb, bp->rx_offset);
1078 skb_put(skb, offset_and_len & 0xffff);
1079 return skb;
1080 }
1081
1082 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
1083 struct bnxt_cp_ring_info *cpr,
1084 struct sk_buff *skb, u16 idx,
1085 u32 agg_bufs, bool tpa)
1086 {
1087 struct bnxt_napi *bnapi = cpr->bnapi;
1088 struct pci_dev *pdev = bp->pdev;
1089 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1090 u16 prod = rxr->rx_agg_prod;
1091 bool p5_tpa = false;
1092 u32 i;
1093
1094 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1095 p5_tpa = true;
1096
1097 for (i = 0; i < agg_bufs; i++) {
1098 u16 cons, frag_len;
1099 struct rx_agg_cmp *agg;
1100 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1101 struct page *page;
1102 dma_addr_t mapping;
1103
1104 if (p5_tpa)
1105 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1106 else
1107 agg = bnxt_get_agg(bp, cpr, idx, i);
1108 cons = agg->rx_agg_cmp_opaque;
1109 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1110 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1111
1112 cons_rx_buf = &rxr->rx_agg_ring[cons];
1113 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1114 cons_rx_buf->offset, frag_len);
1115 __clear_bit(cons, rxr->rx_agg_bmap);
1116
1117 /* It is possible for bnxt_alloc_rx_page() to allocate
1118 * a sw_prod index that equals the cons index, so we
1119 * need to clear the cons entry now.
1120 */
1121 mapping = cons_rx_buf->mapping;
1122 page = cons_rx_buf->page;
1123 cons_rx_buf->page = NULL;
1124
1125 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1126 struct skb_shared_info *shinfo;
1127 unsigned int nr_frags;
1128
1129 shinfo = skb_shinfo(skb);
1130 nr_frags = --shinfo->nr_frags;
1131 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1132
1133 dev_kfree_skb(skb);
1134
1135 cons_rx_buf->page = page;
1136
1137 /* Update prod since possibly some pages have been
1138 * allocated already.
1139 */
1140 rxr->rx_agg_prod = prod;
1141 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1142 return NULL;
1143 }
1144
1145 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1146 DMA_FROM_DEVICE,
1147 DMA_ATTR_WEAK_ORDERING);
1148
1149 skb->data_len += frag_len;
1150 skb->len += frag_len;
1151 skb->truesize += PAGE_SIZE;
1152
1153 prod = NEXT_RX_AGG(prod);
1154 }
1155 rxr->rx_agg_prod = prod;
1156 return skb;
1157 }
1158
1159 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1160 u8 agg_bufs, u32 *raw_cons)
1161 {
1162 u16 last;
1163 struct rx_agg_cmp *agg;
1164
1165 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1166 last = RING_CMP(*raw_cons);
1167 agg = (struct rx_agg_cmp *)
1168 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1169 return RX_AGG_CMP_VALID(agg, *raw_cons);
1170 }
1171
1172 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1173 unsigned int len,
1174 dma_addr_t mapping)
1175 {
1176 struct bnxt *bp = bnapi->bp;
1177 struct pci_dev *pdev = bp->pdev;
1178 struct sk_buff *skb;
1179
1180 skb = napi_alloc_skb(&bnapi->napi, len);
1181 if (!skb)
1182 return NULL;
1183
1184 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1185 bp->rx_dir);
1186
1187 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1188 len + NET_IP_ALIGN);
1189
1190 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1191 bp->rx_dir);
1192
1193 skb_put(skb, len);
1194 return skb;
1195 }
1196
1197 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1198 u32 *raw_cons, void *cmp)
1199 {
1200 struct rx_cmp *rxcmp = cmp;
1201 u32 tmp_raw_cons = *raw_cons;
1202 u8 cmp_type, agg_bufs = 0;
1203
1204 cmp_type = RX_CMP_TYPE(rxcmp);
1205
1206 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1207 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1208 RX_CMP_AGG_BUFS) >>
1209 RX_CMP_AGG_BUFS_SHIFT;
1210 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1211 struct rx_tpa_end_cmp *tpa_end = cmp;
1212
1213 if (bp->flags & BNXT_FLAG_CHIP_P5)
1214 return 0;
1215
1216 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1217 }
1218
1219 if (agg_bufs) {
1220 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1221 return -EBUSY;
1222 }
1223 *raw_cons = tmp_raw_cons;
1224 return 0;
1225 }
1226
1227 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1228 {
1229 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1230 return;
1231
1232 if (BNXT_PF(bp))
1233 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1234 else
1235 schedule_delayed_work(&bp->fw_reset_task, delay);
1236 }
1237
1238 static void bnxt_queue_sp_work(struct bnxt *bp)
1239 {
1240 if (BNXT_PF(bp))
1241 queue_work(bnxt_pf_wq, &bp->sp_task);
1242 else
1243 schedule_work(&bp->sp_task);
1244 }
1245
1246 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1247 {
1248 if (!rxr->bnapi->in_reset) {
1249 rxr->bnapi->in_reset = true;
1250 if (bp->flags & BNXT_FLAG_CHIP_P5)
1251 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1252 else
1253 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
1254 bnxt_queue_sp_work(bp);
1255 }
1256 rxr->rx_next_cons = 0xffff;
1257 }
1258
1259 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1260 {
1261 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1262 u16 idx = agg_id & MAX_TPA_P5_MASK;
1263
1264 if (test_bit(idx, map->agg_idx_bmap))
1265 idx = find_first_zero_bit(map->agg_idx_bmap,
1266 BNXT_AGG_IDX_BMAP_SIZE);
1267 __set_bit(idx, map->agg_idx_bmap);
1268 map->agg_id_tbl[agg_id] = idx;
1269 return idx;
1270 }
1271
1272 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1273 {
1274 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1275
1276 __clear_bit(idx, map->agg_idx_bmap);
1277 }
1278
1279 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1280 {
1281 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1282
1283 return map->agg_id_tbl[agg_id];
1284 }
1285
1286 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1287 struct rx_tpa_start_cmp *tpa_start,
1288 struct rx_tpa_start_cmp_ext *tpa_start1)
1289 {
1290 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1291 struct bnxt_tpa_info *tpa_info;
1292 u16 cons, prod, agg_id;
1293 struct rx_bd *prod_bd;
1294 dma_addr_t mapping;
1295
1296 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1297 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1298 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1299 } else {
1300 agg_id = TPA_START_AGG_ID(tpa_start);
1301 }
1302 cons = tpa_start->rx_tpa_start_cmp_opaque;
1303 prod = rxr->rx_prod;
1304 cons_rx_buf = &rxr->rx_buf_ring[cons];
1305 prod_rx_buf = &rxr->rx_buf_ring[prod];
1306 tpa_info = &rxr->rx_tpa[agg_id];
1307
1308 if (unlikely(cons != rxr->rx_next_cons ||
1309 TPA_START_ERROR(tpa_start))) {
1310 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1311 cons, rxr->rx_next_cons,
1312 TPA_START_ERROR_CODE(tpa_start1));
1313 bnxt_sched_reset(bp, rxr);
1314 return;
1315 }
1316 /* Store cfa_code in tpa_info to use in tpa_end
1317 * completion processing.
1318 */
1319 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1320 prod_rx_buf->data = tpa_info->data;
1321 prod_rx_buf->data_ptr = tpa_info->data_ptr;
1322
1323 mapping = tpa_info->mapping;
1324 prod_rx_buf->mapping = mapping;
1325
1326 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1327
1328 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1329
1330 tpa_info->data = cons_rx_buf->data;
1331 tpa_info->data_ptr = cons_rx_buf->data_ptr;
1332 cons_rx_buf->data = NULL;
1333 tpa_info->mapping = cons_rx_buf->mapping;
1334
1335 tpa_info->len =
1336 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1337 RX_TPA_START_CMP_LEN_SHIFT;
1338 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1339 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1340
1341 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1342 tpa_info->gso_type = SKB_GSO_TCPV4;
1343 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1344 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1345 tpa_info->gso_type = SKB_GSO_TCPV6;
1346 tpa_info->rss_hash =
1347 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1348 } else {
1349 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1350 tpa_info->gso_type = 0;
1351 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1352 }
1353 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1354 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1355 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1356 tpa_info->agg_count = 0;
1357
1358 rxr->rx_prod = NEXT_RX(prod);
1359 cons = NEXT_RX(cons);
1360 rxr->rx_next_cons = NEXT_RX(cons);
1361 cons_rx_buf = &rxr->rx_buf_ring[cons];
1362
1363 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1364 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1365 cons_rx_buf->data = NULL;
1366 }
1367
1368 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1369 {
1370 if (agg_bufs)
1371 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1372 }
1373
1374 #ifdef CONFIG_INET
1375 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1376 {
1377 struct udphdr *uh = NULL;
1378
1379 if (ip_proto == htons(ETH_P_IP)) {
1380 struct iphdr *iph = (struct iphdr *)skb->data;
1381
1382 if (iph->protocol == IPPROTO_UDP)
1383 uh = (struct udphdr *)(iph + 1);
1384 } else {
1385 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1386
1387 if (iph->nexthdr == IPPROTO_UDP)
1388 uh = (struct udphdr *)(iph + 1);
1389 }
1390 if (uh) {
1391 if (uh->check)
1392 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1393 else
1394 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1395 }
1396 }
1397 #endif
1398
1399 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1400 int payload_off, int tcp_ts,
1401 struct sk_buff *skb)
1402 {
1403 #ifdef CONFIG_INET
1404 struct tcphdr *th;
1405 int len, nw_off;
1406 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1407 u32 hdr_info = tpa_info->hdr_info;
1408 bool loopback = false;
1409
1410 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1411 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1412 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1413
1414 /* If the packet is an internal loopback packet, the offsets will
1415 * have an extra 4 bytes.
1416 */
1417 if (inner_mac_off == 4) {
1418 loopback = true;
1419 } else if (inner_mac_off > 4) {
1420 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1421 ETH_HLEN - 2));
1422
1423 /* We only support inner iPv4/ipv6. If we don't see the
1424 * correct protocol ID, it must be a loopback packet where
1425 * the offsets are off by 4.
1426 */
1427 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1428 loopback = true;
1429 }
1430 if (loopback) {
1431 /* internal loopback packet, subtract all offsets by 4 */
1432 inner_ip_off -= 4;
1433 inner_mac_off -= 4;
1434 outer_ip_off -= 4;
1435 }
1436
1437 nw_off = inner_ip_off - ETH_HLEN;
1438 skb_set_network_header(skb, nw_off);
1439 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1440 struct ipv6hdr *iph = ipv6_hdr(skb);
1441
1442 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1443 len = skb->len - skb_transport_offset(skb);
1444 th = tcp_hdr(skb);
1445 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1446 } else {
1447 struct iphdr *iph = ip_hdr(skb);
1448
1449 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1450 len = skb->len - skb_transport_offset(skb);
1451 th = tcp_hdr(skb);
1452 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1453 }
1454
1455 if (inner_mac_off) { /* tunnel */
1456 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1457 ETH_HLEN - 2));
1458
1459 bnxt_gro_tunnel(skb, proto);
1460 }
1461 #endif
1462 return skb;
1463 }
1464
1465 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1466 int payload_off, int tcp_ts,
1467 struct sk_buff *skb)
1468 {
1469 #ifdef CONFIG_INET
1470 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1471 u32 hdr_info = tpa_info->hdr_info;
1472 int iphdr_len, nw_off;
1473
1474 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1475 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1476 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1477
1478 nw_off = inner_ip_off - ETH_HLEN;
1479 skb_set_network_header(skb, nw_off);
1480 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1481 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1482 skb_set_transport_header(skb, nw_off + iphdr_len);
1483
1484 if (inner_mac_off) { /* tunnel */
1485 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1486 ETH_HLEN - 2));
1487
1488 bnxt_gro_tunnel(skb, proto);
1489 }
1490 #endif
1491 return skb;
1492 }
1493
1494 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1495 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1496
1497 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1498 int payload_off, int tcp_ts,
1499 struct sk_buff *skb)
1500 {
1501 #ifdef CONFIG_INET
1502 struct tcphdr *th;
1503 int len, nw_off, tcp_opt_len = 0;
1504
1505 if (tcp_ts)
1506 tcp_opt_len = 12;
1507
1508 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1509 struct iphdr *iph;
1510
1511 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1512 ETH_HLEN;
1513 skb_set_network_header(skb, nw_off);
1514 iph = ip_hdr(skb);
1515 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1516 len = skb->len - skb_transport_offset(skb);
1517 th = tcp_hdr(skb);
1518 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1519 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1520 struct ipv6hdr *iph;
1521
1522 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1523 ETH_HLEN;
1524 skb_set_network_header(skb, nw_off);
1525 iph = ipv6_hdr(skb);
1526 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1527 len = skb->len - skb_transport_offset(skb);
1528 th = tcp_hdr(skb);
1529 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1530 } else {
1531 dev_kfree_skb_any(skb);
1532 return NULL;
1533 }
1534
1535 if (nw_off) /* tunnel */
1536 bnxt_gro_tunnel(skb, skb->protocol);
1537 #endif
1538 return skb;
1539 }
1540
1541 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1542 struct bnxt_tpa_info *tpa_info,
1543 struct rx_tpa_end_cmp *tpa_end,
1544 struct rx_tpa_end_cmp_ext *tpa_end1,
1545 struct sk_buff *skb)
1546 {
1547 #ifdef CONFIG_INET
1548 int payload_off;
1549 u16 segs;
1550
1551 segs = TPA_END_TPA_SEGS(tpa_end);
1552 if (segs == 1)
1553 return skb;
1554
1555 NAPI_GRO_CB(skb)->count = segs;
1556 skb_shinfo(skb)->gso_size =
1557 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1558 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1559 if (bp->flags & BNXT_FLAG_CHIP_P5)
1560 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1561 else
1562 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1563 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1564 if (likely(skb))
1565 tcp_gro_complete(skb);
1566 #endif
1567 return skb;
1568 }
1569
1570 /* Given the cfa_code of a received packet determine which
1571 * netdev (vf-rep or PF) the packet is destined to.
1572 */
1573 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1574 {
1575 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1576
1577 /* if vf-rep dev is NULL, the must belongs to the PF */
1578 return dev ? dev : bp->dev;
1579 }
1580
1581 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1582 struct bnxt_cp_ring_info *cpr,
1583 u32 *raw_cons,
1584 struct rx_tpa_end_cmp *tpa_end,
1585 struct rx_tpa_end_cmp_ext *tpa_end1,
1586 u8 *event)
1587 {
1588 struct bnxt_napi *bnapi = cpr->bnapi;
1589 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1590 u8 *data_ptr, agg_bufs;
1591 unsigned int len;
1592 struct bnxt_tpa_info *tpa_info;
1593 dma_addr_t mapping;
1594 struct sk_buff *skb;
1595 u16 idx = 0, agg_id;
1596 void *data;
1597 bool gro;
1598
1599 if (unlikely(bnapi->in_reset)) {
1600 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1601
1602 if (rc < 0)
1603 return ERR_PTR(-EBUSY);
1604 return NULL;
1605 }
1606
1607 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1608 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1609 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1610 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1611 tpa_info = &rxr->rx_tpa[agg_id];
1612 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1613 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1614 agg_bufs, tpa_info->agg_count);
1615 agg_bufs = tpa_info->agg_count;
1616 }
1617 tpa_info->agg_count = 0;
1618 *event |= BNXT_AGG_EVENT;
1619 bnxt_free_agg_idx(rxr, agg_id);
1620 idx = agg_id;
1621 gro = !!(bp->flags & BNXT_FLAG_GRO);
1622 } else {
1623 agg_id = TPA_END_AGG_ID(tpa_end);
1624 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1625 tpa_info = &rxr->rx_tpa[agg_id];
1626 idx = RING_CMP(*raw_cons);
1627 if (agg_bufs) {
1628 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1629 return ERR_PTR(-EBUSY);
1630
1631 *event |= BNXT_AGG_EVENT;
1632 idx = NEXT_CMP(idx);
1633 }
1634 gro = !!TPA_END_GRO(tpa_end);
1635 }
1636 data = tpa_info->data;
1637 data_ptr = tpa_info->data_ptr;
1638 prefetch(data_ptr);
1639 len = tpa_info->len;
1640 mapping = tpa_info->mapping;
1641
1642 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1643 bnxt_abort_tpa(cpr, idx, agg_bufs);
1644 if (agg_bufs > MAX_SKB_FRAGS)
1645 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1646 agg_bufs, (int)MAX_SKB_FRAGS);
1647 return NULL;
1648 }
1649
1650 if (len <= bp->rx_copy_thresh) {
1651 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1652 if (!skb) {
1653 bnxt_abort_tpa(cpr, idx, agg_bufs);
1654 cpr->sw_stats.rx.rx_oom_discards += 1;
1655 return NULL;
1656 }
1657 } else {
1658 u8 *new_data;
1659 dma_addr_t new_mapping;
1660
1661 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1662 if (!new_data) {
1663 bnxt_abort_tpa(cpr, idx, agg_bufs);
1664 cpr->sw_stats.rx.rx_oom_discards += 1;
1665 return NULL;
1666 }
1667
1668 tpa_info->data = new_data;
1669 tpa_info->data_ptr = new_data + bp->rx_offset;
1670 tpa_info->mapping = new_mapping;
1671
1672 skb = build_skb(data, 0);
1673 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1674 bp->rx_buf_use_size, bp->rx_dir,
1675 DMA_ATTR_WEAK_ORDERING);
1676
1677 if (!skb) {
1678 kfree(data);
1679 bnxt_abort_tpa(cpr, idx, agg_bufs);
1680 cpr->sw_stats.rx.rx_oom_discards += 1;
1681 return NULL;
1682 }
1683 skb_reserve(skb, bp->rx_offset);
1684 skb_put(skb, len);
1685 }
1686
1687 if (agg_bufs) {
1688 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
1689 if (!skb) {
1690 /* Page reuse already handled by bnxt_rx_pages(). */
1691 cpr->sw_stats.rx.rx_oom_discards += 1;
1692 return NULL;
1693 }
1694 }
1695
1696 skb->protocol =
1697 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1698
1699 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1700 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1701
1702 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1703 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1704 __be16 vlan_proto = htons(tpa_info->metadata >>
1705 RX_CMP_FLAGS2_METADATA_TPID_SFT);
1706 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1707
1708 if (eth_type_vlan(vlan_proto)) {
1709 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1710 } else {
1711 dev_kfree_skb(skb);
1712 return NULL;
1713 }
1714 }
1715
1716 skb_checksum_none_assert(skb);
1717 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1718 skb->ip_summed = CHECKSUM_UNNECESSARY;
1719 skb->csum_level =
1720 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1721 }
1722
1723 if (gro)
1724 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1725
1726 return skb;
1727 }
1728
1729 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1730 struct rx_agg_cmp *rx_agg)
1731 {
1732 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1733 struct bnxt_tpa_info *tpa_info;
1734
1735 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1736 tpa_info = &rxr->rx_tpa[agg_id];
1737 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1738 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1739 }
1740
1741 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1742 struct sk_buff *skb)
1743 {
1744 if (skb->dev != bp->dev) {
1745 /* this packet belongs to a vf-rep */
1746 bnxt_vf_rep_rx(bp, skb);
1747 return;
1748 }
1749 skb_record_rx_queue(skb, bnapi->index);
1750 napi_gro_receive(&bnapi->napi, skb);
1751 }
1752
1753 /* returns the following:
1754 * 1 - 1 packet successfully received
1755 * 0 - successful TPA_START, packet not completed yet
1756 * -EBUSY - completion ring does not have all the agg buffers yet
1757 * -ENOMEM - packet aborted due to out of memory
1758 * -EIO - packet aborted due to hw error indicated in BD
1759 */
1760 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1761 u32 *raw_cons, u8 *event)
1762 {
1763 struct bnxt_napi *bnapi = cpr->bnapi;
1764 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1765 struct net_device *dev = bp->dev;
1766 struct rx_cmp *rxcmp;
1767 struct rx_cmp_ext *rxcmp1;
1768 u32 tmp_raw_cons = *raw_cons;
1769 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1770 struct bnxt_sw_rx_bd *rx_buf;
1771 unsigned int len;
1772 u8 *data_ptr, agg_bufs, cmp_type;
1773 dma_addr_t dma_addr;
1774 struct sk_buff *skb;
1775 u32 flags, misc;
1776 void *data;
1777 int rc = 0;
1778
1779 rxcmp = (struct rx_cmp *)
1780 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1781
1782 cmp_type = RX_CMP_TYPE(rxcmp);
1783
1784 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1785 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1786 goto next_rx_no_prod_no_len;
1787 }
1788
1789 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1790 cp_cons = RING_CMP(tmp_raw_cons);
1791 rxcmp1 = (struct rx_cmp_ext *)
1792 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1793
1794 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1795 return -EBUSY;
1796
1797 /* The valid test of the entry must be done first before
1798 * reading any further.
1799 */
1800 dma_rmb();
1801 prod = rxr->rx_prod;
1802
1803 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1804 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1805 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1806
1807 *event |= BNXT_RX_EVENT;
1808 goto next_rx_no_prod_no_len;
1809
1810 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1811 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1812 (struct rx_tpa_end_cmp *)rxcmp,
1813 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1814
1815 if (IS_ERR(skb))
1816 return -EBUSY;
1817
1818 rc = -ENOMEM;
1819 if (likely(skb)) {
1820 bnxt_deliver_skb(bp, bnapi, skb);
1821 rc = 1;
1822 }
1823 *event |= BNXT_RX_EVENT;
1824 goto next_rx_no_prod_no_len;
1825 }
1826
1827 cons = rxcmp->rx_cmp_opaque;
1828 if (unlikely(cons != rxr->rx_next_cons)) {
1829 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
1830
1831 /* 0xffff is forced error, don't print it */
1832 if (rxr->rx_next_cons != 0xffff)
1833 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1834 cons, rxr->rx_next_cons);
1835 bnxt_sched_reset(bp, rxr);
1836 if (rc1)
1837 return rc1;
1838 goto next_rx_no_prod_no_len;
1839 }
1840 rx_buf = &rxr->rx_buf_ring[cons];
1841 data = rx_buf->data;
1842 data_ptr = rx_buf->data_ptr;
1843 prefetch(data_ptr);
1844
1845 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1846 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1847
1848 if (agg_bufs) {
1849 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1850 return -EBUSY;
1851
1852 cp_cons = NEXT_CMP(cp_cons);
1853 *event |= BNXT_AGG_EVENT;
1854 }
1855 *event |= BNXT_RX_EVENT;
1856
1857 rx_buf->data = NULL;
1858 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1859 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1860
1861 bnxt_reuse_rx_data(rxr, cons, data);
1862 if (agg_bufs)
1863 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1864 false);
1865
1866 rc = -EIO;
1867 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1868 bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
1869 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1870 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
1871 netdev_warn_once(bp->dev, "RX buffer error %x\n",
1872 rx_err);
1873 bnxt_sched_reset(bp, rxr);
1874 }
1875 }
1876 goto next_rx_no_len;
1877 }
1878
1879 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
1880 len = flags >> RX_CMP_LEN_SHIFT;
1881 dma_addr = rx_buf->mapping;
1882
1883 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1884 rc = 1;
1885 goto next_rx;
1886 }
1887
1888 if (len <= bp->rx_copy_thresh) {
1889 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1890 bnxt_reuse_rx_data(rxr, cons, data);
1891 if (!skb) {
1892 if (agg_bufs)
1893 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1894 agg_bufs, false);
1895 cpr->sw_stats.rx.rx_oom_discards += 1;
1896 rc = -ENOMEM;
1897 goto next_rx;
1898 }
1899 } else {
1900 u32 payload;
1901
1902 if (rx_buf->data_ptr == data_ptr)
1903 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1904 else
1905 payload = 0;
1906 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1907 payload | len);
1908 if (!skb) {
1909 cpr->sw_stats.rx.rx_oom_discards += 1;
1910 rc = -ENOMEM;
1911 goto next_rx;
1912 }
1913 }
1914
1915 if (agg_bufs) {
1916 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
1917 if (!skb) {
1918 cpr->sw_stats.rx.rx_oom_discards += 1;
1919 rc = -ENOMEM;
1920 goto next_rx;
1921 }
1922 }
1923
1924 if (RX_CMP_HASH_VALID(rxcmp)) {
1925 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1926 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1927
1928 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1929 if (hash_type != 1 && hash_type != 3)
1930 type = PKT_HASH_TYPE_L3;
1931 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1932 }
1933
1934 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1935 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1936
1937 if ((rxcmp1->rx_cmp_flags2 &
1938 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1939 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1940 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1941 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1942 __be16 vlan_proto = htons(meta_data >>
1943 RX_CMP_FLAGS2_METADATA_TPID_SFT);
1944
1945 if (eth_type_vlan(vlan_proto)) {
1946 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1947 } else {
1948 dev_kfree_skb(skb);
1949 goto next_rx;
1950 }
1951 }
1952
1953 skb_checksum_none_assert(skb);
1954 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1955 if (dev->features & NETIF_F_RXCSUM) {
1956 skb->ip_summed = CHECKSUM_UNNECESSARY;
1957 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1958 }
1959 } else {
1960 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1961 if (dev->features & NETIF_F_RXCSUM)
1962 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
1963 }
1964 }
1965
1966 if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
1967 RX_CMP_FLAGS_ITYPE_PTP_W_TS)) {
1968 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1969 u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
1970 u64 ns, ts;
1971
1972 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
1973 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
1974
1975 spin_lock_bh(&ptp->ptp_lock);
1976 ns = timecounter_cyc2time(&ptp->tc, ts);
1977 spin_unlock_bh(&ptp->ptp_lock);
1978 memset(skb_hwtstamps(skb), 0,
1979 sizeof(*skb_hwtstamps(skb)));
1980 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
1981 }
1982 }
1983 }
1984 bnxt_deliver_skb(bp, bnapi, skb);
1985 rc = 1;
1986
1987 next_rx:
1988 cpr->rx_packets += 1;
1989 cpr->rx_bytes += len;
1990
1991 next_rx_no_len:
1992 rxr->rx_prod = NEXT_RX(prod);
1993 rxr->rx_next_cons = NEXT_RX(cons);
1994
1995 next_rx_no_prod_no_len:
1996 *raw_cons = tmp_raw_cons;
1997
1998 return rc;
1999 }
2000
2001 /* In netpoll mode, if we are using a combined completion ring, we need to
2002 * discard the rx packets and recycle the buffers.
2003 */
2004 static int bnxt_force_rx_discard(struct bnxt *bp,
2005 struct bnxt_cp_ring_info *cpr,
2006 u32 *raw_cons, u8 *event)
2007 {
2008 u32 tmp_raw_cons = *raw_cons;
2009 struct rx_cmp_ext *rxcmp1;
2010 struct rx_cmp *rxcmp;
2011 u16 cp_cons;
2012 u8 cmp_type;
2013 int rc;
2014
2015 cp_cons = RING_CMP(tmp_raw_cons);
2016 rxcmp = (struct rx_cmp *)
2017 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2018
2019 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2020 cp_cons = RING_CMP(tmp_raw_cons);
2021 rxcmp1 = (struct rx_cmp_ext *)
2022 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2023
2024 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2025 return -EBUSY;
2026
2027 /* The valid test of the entry must be done first before
2028 * reading any further.
2029 */
2030 dma_rmb();
2031 cmp_type = RX_CMP_TYPE(rxcmp);
2032 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2033 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2034 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2035 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2036 struct rx_tpa_end_cmp_ext *tpa_end1;
2037
2038 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2039 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2040 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2041 }
2042 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2043 if (rc && rc != -EBUSY)
2044 cpr->sw_stats.rx.rx_netpoll_discards += 1;
2045 return rc;
2046 }
2047
2048 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2049 {
2050 struct bnxt_fw_health *fw_health = bp->fw_health;
2051 u32 reg = fw_health->regs[reg_idx];
2052 u32 reg_type, reg_off, val = 0;
2053
2054 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2055 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2056 switch (reg_type) {
2057 case BNXT_FW_HEALTH_REG_TYPE_CFG:
2058 pci_read_config_dword(bp->pdev, reg_off, &val);
2059 break;
2060 case BNXT_FW_HEALTH_REG_TYPE_GRC:
2061 reg_off = fw_health->mapped_regs[reg_idx];
2062 fallthrough;
2063 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2064 val = readl(bp->bar0 + reg_off);
2065 break;
2066 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2067 val = readl(bp->bar1 + reg_off);
2068 break;
2069 }
2070 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2071 val &= fw_health->fw_reset_inprog_reg_mask;
2072 return val;
2073 }
2074
2075 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2076 {
2077 int i;
2078
2079 for (i = 0; i < bp->rx_nr_rings; i++) {
2080 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2081 struct bnxt_ring_grp_info *grp_info;
2082
2083 grp_info = &bp->grp_info[grp_idx];
2084 if (grp_info->agg_fw_ring_id == ring_id)
2085 return grp_idx;
2086 }
2087 return INVALID_HW_RING_ID;
2088 }
2089
2090 static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2091 {
2092 switch (BNXT_EVENT_ERROR_REPORT_TYPE(data1)) {
2093 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2094 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2095 BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2096 break;
2097 default:
2098 netdev_err(bp->dev, "FW reported unknown error type\n");
2099 break;
2100 }
2101 }
2102
2103 #define BNXT_GET_EVENT_PORT(data) \
2104 ((data) & \
2105 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2106
2107 #define BNXT_EVENT_RING_TYPE(data2) \
2108 ((data2) & \
2109 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2110
2111 #define BNXT_EVENT_RING_TYPE_RX(data2) \
2112 (BNXT_EVENT_RING_TYPE(data2) == \
2113 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2114
2115 static int bnxt_async_event_process(struct bnxt *bp,
2116 struct hwrm_async_event_cmpl *cmpl)
2117 {
2118 u16 event_id = le16_to_cpu(cmpl->event_id);
2119 u32 data1 = le32_to_cpu(cmpl->event_data1);
2120 u32 data2 = le32_to_cpu(cmpl->event_data2);
2121
2122 /* TODO CHIMP_FW: Define event id's for link change, error etc */
2123 switch (event_id) {
2124 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2125 struct bnxt_link_info *link_info = &bp->link_info;
2126
2127 if (BNXT_VF(bp))
2128 goto async_event_process_exit;
2129
2130 /* print unsupported speed warning in forced speed mode only */
2131 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2132 (data1 & 0x20000)) {
2133 u16 fw_speed = link_info->force_link_speed;
2134 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2135
2136 if (speed != SPEED_UNKNOWN)
2137 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2138 speed);
2139 }
2140 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2141 }
2142 fallthrough;
2143 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2144 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2145 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2146 fallthrough;
2147 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2148 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2149 break;
2150 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2151 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2152 break;
2153 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2154 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2155
2156 if (BNXT_VF(bp))
2157 break;
2158
2159 if (bp->pf.port_id != port_id)
2160 break;
2161
2162 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2163 break;
2164 }
2165 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2166 if (BNXT_PF(bp))
2167 goto async_event_process_exit;
2168 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2169 break;
2170 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2171 char *fatal_str = "non-fatal";
2172
2173 if (!bp->fw_health)
2174 goto async_event_process_exit;
2175
2176 bp->fw_reset_timestamp = jiffies;
2177 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2178 if (!bp->fw_reset_min_dsecs)
2179 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2180 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2181 if (!bp->fw_reset_max_dsecs)
2182 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2183 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2184 fatal_str = "fatal";
2185 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2186 }
2187 netif_warn(bp, hw, bp->dev,
2188 "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2189 fatal_str, data1, data2,
2190 bp->fw_reset_min_dsecs * 100,
2191 bp->fw_reset_max_dsecs * 100);
2192 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2193 break;
2194 }
2195 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2196 struct bnxt_fw_health *fw_health = bp->fw_health;
2197
2198 if (!fw_health)
2199 goto async_event_process_exit;
2200
2201 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
2202 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2203 if (!fw_health->enabled) {
2204 netif_info(bp, drv, bp->dev,
2205 "Error recovery info: error recovery[0]\n");
2206 break;
2207 }
2208 fw_health->tmr_multiplier =
2209 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2210 bp->current_interval * 10);
2211 fw_health->tmr_counter = fw_health->tmr_multiplier;
2212 fw_health->last_fw_heartbeat =
2213 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2214 fw_health->last_fw_reset_cnt =
2215 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2216 netif_info(bp, drv, bp->dev,
2217 "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
2218 fw_health->master, fw_health->last_fw_reset_cnt,
2219 bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
2220 goto async_event_process_exit;
2221 }
2222 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2223 netif_notice(bp, hw, bp->dev,
2224 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2225 data1, data2);
2226 goto async_event_process_exit;
2227 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2228 struct bnxt_rx_ring_info *rxr;
2229 u16 grp_idx;
2230
2231 if (bp->flags & BNXT_FLAG_CHIP_P5)
2232 goto async_event_process_exit;
2233
2234 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2235 BNXT_EVENT_RING_TYPE(data2), data1);
2236 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2237 goto async_event_process_exit;
2238
2239 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2240 if (grp_idx == INVALID_HW_RING_ID) {
2241 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2242 data1);
2243 goto async_event_process_exit;
2244 }
2245 rxr = bp->bnapi[grp_idx]->rx_ring;
2246 bnxt_sched_reset(bp, rxr);
2247 goto async_event_process_exit;
2248 }
2249 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2250 struct bnxt_fw_health *fw_health = bp->fw_health;
2251
2252 netif_notice(bp, hw, bp->dev,
2253 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2254 data1, data2);
2255 if (fw_health) {
2256 fw_health->echo_req_data1 = data1;
2257 fw_health->echo_req_data2 = data2;
2258 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2259 break;
2260 }
2261 goto async_event_process_exit;
2262 }
2263 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2264 bnxt_ptp_pps_event(bp, data1, data2);
2265 goto async_event_process_exit;
2266 }
2267 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2268 bnxt_event_error_report(bp, data1, data2);
2269 goto async_event_process_exit;
2270 }
2271 default:
2272 goto async_event_process_exit;
2273 }
2274 bnxt_queue_sp_work(bp);
2275 async_event_process_exit:
2276 bnxt_ulp_async_events(bp, cmpl);
2277 return 0;
2278 }
2279
2280 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2281 {
2282 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2283 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2284 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2285 (struct hwrm_fwd_req_cmpl *)txcmp;
2286
2287 switch (cmpl_type) {
2288 case CMPL_BASE_TYPE_HWRM_DONE:
2289 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2290 if (seq_id == bp->hwrm_intr_seq_id)
2291 bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
2292 else
2293 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
2294 break;
2295
2296 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2297 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2298
2299 if ((vf_id < bp->pf.first_vf_id) ||
2300 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2301 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2302 vf_id);
2303 return -EINVAL;
2304 }
2305
2306 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2307 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2308 bnxt_queue_sp_work(bp);
2309 break;
2310
2311 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2312 bnxt_async_event_process(bp,
2313 (struct hwrm_async_event_cmpl *)txcmp);
2314 break;
2315
2316 default:
2317 break;
2318 }
2319
2320 return 0;
2321 }
2322
2323 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2324 {
2325 struct bnxt_napi *bnapi = dev_instance;
2326 struct bnxt *bp = bnapi->bp;
2327 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2328 u32 cons = RING_CMP(cpr->cp_raw_cons);
2329
2330 cpr->event_ctr++;
2331 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2332 napi_schedule(&bnapi->napi);
2333 return IRQ_HANDLED;
2334 }
2335
2336 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2337 {
2338 u32 raw_cons = cpr->cp_raw_cons;
2339 u16 cons = RING_CMP(raw_cons);
2340 struct tx_cmp *txcmp;
2341
2342 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2343
2344 return TX_CMP_VALID(txcmp, raw_cons);
2345 }
2346
2347 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2348 {
2349 struct bnxt_napi *bnapi = dev_instance;
2350 struct bnxt *bp = bnapi->bp;
2351 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2352 u32 cons = RING_CMP(cpr->cp_raw_cons);
2353 u32 int_status;
2354
2355 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2356
2357 if (!bnxt_has_work(bp, cpr)) {
2358 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2359 /* return if erroneous interrupt */
2360 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2361 return IRQ_NONE;
2362 }
2363
2364 /* disable ring IRQ */
2365 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2366
2367 /* Return here if interrupt is shared and is disabled. */
2368 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2369 return IRQ_HANDLED;
2370
2371 napi_schedule(&bnapi->napi);
2372 return IRQ_HANDLED;
2373 }
2374
2375 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2376 int budget)
2377 {
2378 struct bnxt_napi *bnapi = cpr->bnapi;
2379 u32 raw_cons = cpr->cp_raw_cons;
2380 u32 cons;
2381 int tx_pkts = 0;
2382 int rx_pkts = 0;
2383 u8 event = 0;
2384 struct tx_cmp *txcmp;
2385
2386 cpr->has_more_work = 0;
2387 cpr->had_work_done = 1;
2388 while (1) {
2389 int rc;
2390
2391 cons = RING_CMP(raw_cons);
2392 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2393
2394 if (!TX_CMP_VALID(txcmp, raw_cons))
2395 break;
2396
2397 /* The valid test of the entry must be done first before
2398 * reading any further.
2399 */
2400 dma_rmb();
2401 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2402 tx_pkts++;
2403 /* return full budget so NAPI will complete. */
2404 if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
2405 rx_pkts = budget;
2406 raw_cons = NEXT_RAW_CMP(raw_cons);
2407 if (budget)
2408 cpr->has_more_work = 1;
2409 break;
2410 }
2411 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2412 if (likely(budget))
2413 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2414 else
2415 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2416 &event);
2417 if (likely(rc >= 0))
2418 rx_pkts += rc;
2419 /* Increment rx_pkts when rc is -ENOMEM to count towards
2420 * the NAPI budget. Otherwise, we may potentially loop
2421 * here forever if we consistently cannot allocate
2422 * buffers.
2423 */
2424 else if (rc == -ENOMEM && budget)
2425 rx_pkts++;
2426 else if (rc == -EBUSY) /* partial completion */
2427 break;
2428 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2429 CMPL_BASE_TYPE_HWRM_DONE) ||
2430 (TX_CMP_TYPE(txcmp) ==
2431 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2432 (TX_CMP_TYPE(txcmp) ==
2433 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2434 bnxt_hwrm_handler(bp, txcmp);
2435 }
2436 raw_cons = NEXT_RAW_CMP(raw_cons);
2437
2438 if (rx_pkts && rx_pkts == budget) {
2439 cpr->has_more_work = 1;
2440 break;
2441 }
2442 }
2443
2444 if (event & BNXT_REDIRECT_EVENT)
2445 xdp_do_flush_map();
2446
2447 if (event & BNXT_TX_EVENT) {
2448 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2449 u16 prod = txr->tx_prod;
2450
2451 /* Sync BD data before updating doorbell */
2452 wmb();
2453
2454 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2455 }
2456
2457 cpr->cp_raw_cons = raw_cons;
2458 bnapi->tx_pkts += tx_pkts;
2459 bnapi->events |= event;
2460 return rx_pkts;
2461 }
2462
2463 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2464 {
2465 if (bnapi->tx_pkts) {
2466 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2467 bnapi->tx_pkts = 0;
2468 }
2469
2470 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
2471 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2472
2473 if (bnapi->events & BNXT_AGG_EVENT)
2474 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2475 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2476 }
2477 bnapi->events = 0;
2478 }
2479
2480 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2481 int budget)
2482 {
2483 struct bnxt_napi *bnapi = cpr->bnapi;
2484 int rx_pkts;
2485
2486 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2487
2488 /* ACK completion ring before freeing tx ring and producing new
2489 * buffers in rx/agg rings to prevent overflowing the completion
2490 * ring.
2491 */
2492 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2493
2494 __bnxt_poll_work_done(bp, bnapi);
2495 return rx_pkts;
2496 }
2497
2498 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2499 {
2500 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2501 struct bnxt *bp = bnapi->bp;
2502 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2503 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2504 struct tx_cmp *txcmp;
2505 struct rx_cmp_ext *rxcmp1;
2506 u32 cp_cons, tmp_raw_cons;
2507 u32 raw_cons = cpr->cp_raw_cons;
2508 u32 rx_pkts = 0;
2509 u8 event = 0;
2510
2511 while (1) {
2512 int rc;
2513
2514 cp_cons = RING_CMP(raw_cons);
2515 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2516
2517 if (!TX_CMP_VALID(txcmp, raw_cons))
2518 break;
2519
2520 /* The valid test of the entry must be done first before
2521 * reading any further.
2522 */
2523 dma_rmb();
2524 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2525 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2526 cp_cons = RING_CMP(tmp_raw_cons);
2527 rxcmp1 = (struct rx_cmp_ext *)
2528 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2529
2530 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2531 break;
2532
2533 /* force an error to recycle the buffer */
2534 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2535 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2536
2537 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2538 if (likely(rc == -EIO) && budget)
2539 rx_pkts++;
2540 else if (rc == -EBUSY) /* partial completion */
2541 break;
2542 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2543 CMPL_BASE_TYPE_HWRM_DONE)) {
2544 bnxt_hwrm_handler(bp, txcmp);
2545 } else {
2546 netdev_err(bp->dev,
2547 "Invalid completion received on special ring\n");
2548 }
2549 raw_cons = NEXT_RAW_CMP(raw_cons);
2550
2551 if (rx_pkts == budget)
2552 break;
2553 }
2554
2555 cpr->cp_raw_cons = raw_cons;
2556 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2557 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2558
2559 if (event & BNXT_AGG_EVENT)
2560 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2561
2562 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2563 napi_complete_done(napi, rx_pkts);
2564 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2565 }
2566 return rx_pkts;
2567 }
2568
2569 static int bnxt_poll(struct napi_struct *napi, int budget)
2570 {
2571 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2572 struct bnxt *bp = bnapi->bp;
2573 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2574 int work_done = 0;
2575
2576 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2577 napi_complete(napi);
2578 return 0;
2579 }
2580 while (1) {
2581 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2582
2583 if (work_done >= budget) {
2584 if (!budget)
2585 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2586 break;
2587 }
2588
2589 if (!bnxt_has_work(bp, cpr)) {
2590 if (napi_complete_done(napi, work_done))
2591 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2592 break;
2593 }
2594 }
2595 if (bp->flags & BNXT_FLAG_DIM) {
2596 struct dim_sample dim_sample = {};
2597
2598 dim_update_sample(cpr->event_ctr,
2599 cpr->rx_packets,
2600 cpr->rx_bytes,
2601 &dim_sample);
2602 net_dim(&cpr->dim, dim_sample);
2603 }
2604 return work_done;
2605 }
2606
2607 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2608 {
2609 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2610 int i, work_done = 0;
2611
2612 for (i = 0; i < 2; i++) {
2613 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2614
2615 if (cpr2) {
2616 work_done += __bnxt_poll_work(bp, cpr2,
2617 budget - work_done);
2618 cpr->has_more_work |= cpr2->has_more_work;
2619 }
2620 }
2621 return work_done;
2622 }
2623
2624 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2625 u64 dbr_type)
2626 {
2627 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2628 int i;
2629
2630 for (i = 0; i < 2; i++) {
2631 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2632 struct bnxt_db_info *db;
2633
2634 if (cpr2 && cpr2->had_work_done) {
2635 db = &cpr2->cp_db;
2636 writeq(db->db_key64 | dbr_type |
2637 RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2638 cpr2->had_work_done = 0;
2639 }
2640 }
2641 __bnxt_poll_work_done(bp, bnapi);
2642 }
2643
2644 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2645 {
2646 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2647 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2648 u32 raw_cons = cpr->cp_raw_cons;
2649 struct bnxt *bp = bnapi->bp;
2650 struct nqe_cn *nqcmp;
2651 int work_done = 0;
2652 u32 cons;
2653
2654 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2655 napi_complete(napi);
2656 return 0;
2657 }
2658 if (cpr->has_more_work) {
2659 cpr->has_more_work = 0;
2660 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2661 }
2662 while (1) {
2663 cons = RING_CMP(raw_cons);
2664 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2665
2666 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2667 if (cpr->has_more_work)
2668 break;
2669
2670 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
2671 cpr->cp_raw_cons = raw_cons;
2672 if (napi_complete_done(napi, work_done))
2673 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2674 cpr->cp_raw_cons);
2675 return work_done;
2676 }
2677
2678 /* The valid test of the entry must be done first before
2679 * reading any further.
2680 */
2681 dma_rmb();
2682
2683 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2684 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2685 struct bnxt_cp_ring_info *cpr2;
2686
2687 cpr2 = cpr->cp_ring_arr[idx];
2688 work_done += __bnxt_poll_work(bp, cpr2,
2689 budget - work_done);
2690 cpr->has_more_work |= cpr2->has_more_work;
2691 } else {
2692 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2693 }
2694 raw_cons = NEXT_RAW_CMP(raw_cons);
2695 }
2696 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
2697 if (raw_cons != cpr->cp_raw_cons) {
2698 cpr->cp_raw_cons = raw_cons;
2699 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2700 }
2701 return work_done;
2702 }
2703
2704 static void bnxt_free_tx_skbs(struct bnxt *bp)
2705 {
2706 int i, max_idx;
2707 struct pci_dev *pdev = bp->pdev;
2708
2709 if (!bp->tx_ring)
2710 return;
2711
2712 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2713 for (i = 0; i < bp->tx_nr_rings; i++) {
2714 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2715 int j;
2716
2717 for (j = 0; j < max_idx;) {
2718 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2719 struct sk_buff *skb;
2720 int k, last;
2721
2722 if (i < bp->tx_nr_rings_xdp &&
2723 tx_buf->action == XDP_REDIRECT) {
2724 dma_unmap_single(&pdev->dev,
2725 dma_unmap_addr(tx_buf, mapping),
2726 dma_unmap_len(tx_buf, len),
2727 DMA_TO_DEVICE);
2728 xdp_return_frame(tx_buf->xdpf);
2729 tx_buf->action = 0;
2730 tx_buf->xdpf = NULL;
2731 j++;
2732 continue;
2733 }
2734
2735 skb = tx_buf->skb;
2736 if (!skb) {
2737 j++;
2738 continue;
2739 }
2740
2741 tx_buf->skb = NULL;
2742
2743 if (tx_buf->is_push) {
2744 dev_kfree_skb(skb);
2745 j += 2;
2746 continue;
2747 }
2748
2749 dma_unmap_single(&pdev->dev,
2750 dma_unmap_addr(tx_buf, mapping),
2751 skb_headlen(skb),
2752 DMA_TO_DEVICE);
2753
2754 last = tx_buf->nr_frags;
2755 j += 2;
2756 for (k = 0; k < last; k++, j++) {
2757 int ring_idx = j & bp->tx_ring_mask;
2758 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2759
2760 tx_buf = &txr->tx_buf_ring[ring_idx];
2761 dma_unmap_page(
2762 &pdev->dev,
2763 dma_unmap_addr(tx_buf, mapping),
2764 skb_frag_size(frag), DMA_TO_DEVICE);
2765 }
2766 dev_kfree_skb(skb);
2767 }
2768 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2769 }
2770 }
2771
2772 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2773 {
2774 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
2775 struct pci_dev *pdev = bp->pdev;
2776 struct bnxt_tpa_idx_map *map;
2777 int i, max_idx, max_agg_idx;
2778
2779 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2780 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2781 if (!rxr->rx_tpa)
2782 goto skip_rx_tpa_free;
2783
2784 for (i = 0; i < bp->max_tpa; i++) {
2785 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2786 u8 *data = tpa_info->data;
2787
2788 if (!data)
2789 continue;
2790
2791 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2792 bp->rx_buf_use_size, bp->rx_dir,
2793 DMA_ATTR_WEAK_ORDERING);
2794
2795 tpa_info->data = NULL;
2796
2797 kfree(data);
2798 }
2799
2800 skip_rx_tpa_free:
2801 for (i = 0; i < max_idx; i++) {
2802 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2803 dma_addr_t mapping = rx_buf->mapping;
2804 void *data = rx_buf->data;
2805
2806 if (!data)
2807 continue;
2808
2809 rx_buf->data = NULL;
2810 if (BNXT_RX_PAGE_MODE(bp)) {
2811 mapping -= bp->rx_dma_offset;
2812 dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
2813 bp->rx_dir,
2814 DMA_ATTR_WEAK_ORDERING);
2815 page_pool_recycle_direct(rxr->page_pool, data);
2816 } else {
2817 dma_unmap_single_attrs(&pdev->dev, mapping,
2818 bp->rx_buf_use_size, bp->rx_dir,
2819 DMA_ATTR_WEAK_ORDERING);
2820 kfree(data);
2821 }
2822 }
2823 for (i = 0; i < max_agg_idx; i++) {
2824 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2825 struct page *page = rx_agg_buf->page;
2826
2827 if (!page)
2828 continue;
2829
2830 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2831 BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
2832 DMA_ATTR_WEAK_ORDERING);
2833
2834 rx_agg_buf->page = NULL;
2835 __clear_bit(i, rxr->rx_agg_bmap);
2836
2837 __free_page(page);
2838 }
2839 if (rxr->rx_page) {
2840 __free_page(rxr->rx_page);
2841 rxr->rx_page = NULL;
2842 }
2843 map = rxr->rx_tpa_idx_map;
2844 if (map)
2845 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2846 }
2847
2848 static void bnxt_free_rx_skbs(struct bnxt *bp)
2849 {
2850 int i;
2851
2852 if (!bp->rx_ring)
2853 return;
2854
2855 for (i = 0; i < bp->rx_nr_rings; i++)
2856 bnxt_free_one_rx_ring_skbs(bp, i);
2857 }
2858
2859 static void bnxt_free_skbs(struct bnxt *bp)
2860 {
2861 bnxt_free_tx_skbs(bp);
2862 bnxt_free_rx_skbs(bp);
2863 }
2864
2865 static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
2866 {
2867 u8 init_val = mem_init->init_val;
2868 u16 offset = mem_init->offset;
2869 u8 *p2 = p;
2870 int i;
2871
2872 if (!init_val)
2873 return;
2874 if (offset == BNXT_MEM_INVALID_OFFSET) {
2875 memset(p, init_val, len);
2876 return;
2877 }
2878 for (i = 0; i < len; i += mem_init->size)
2879 *(p2 + i + offset) = init_val;
2880 }
2881
2882 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2883 {
2884 struct pci_dev *pdev = bp->pdev;
2885 int i;
2886
2887 for (i = 0; i < rmem->nr_pages; i++) {
2888 if (!rmem->pg_arr[i])
2889 continue;
2890
2891 dma_free_coherent(&pdev->dev, rmem->page_size,
2892 rmem->pg_arr[i], rmem->dma_arr[i]);
2893
2894 rmem->pg_arr[i] = NULL;
2895 }
2896 if (rmem->pg_tbl) {
2897 size_t pg_tbl_size = rmem->nr_pages * 8;
2898
2899 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2900 pg_tbl_size = rmem->page_size;
2901 dma_free_coherent(&pdev->dev, pg_tbl_size,
2902 rmem->pg_tbl, rmem->pg_tbl_map);
2903 rmem->pg_tbl = NULL;
2904 }
2905 if (rmem->vmem_size && *rmem->vmem) {
2906 vfree(*rmem->vmem);
2907 *rmem->vmem = NULL;
2908 }
2909 }
2910
2911 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2912 {
2913 struct pci_dev *pdev = bp->pdev;
2914 u64 valid_bit = 0;
2915 int i;
2916
2917 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2918 valid_bit = PTU_PTE_VALID;
2919 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2920 size_t pg_tbl_size = rmem->nr_pages * 8;
2921
2922 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2923 pg_tbl_size = rmem->page_size;
2924 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2925 &rmem->pg_tbl_map,
2926 GFP_KERNEL);
2927 if (!rmem->pg_tbl)
2928 return -ENOMEM;
2929 }
2930
2931 for (i = 0; i < rmem->nr_pages; i++) {
2932 u64 extra_bits = valid_bit;
2933
2934 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2935 rmem->page_size,
2936 &rmem->dma_arr[i],
2937 GFP_KERNEL);
2938 if (!rmem->pg_arr[i])
2939 return -ENOMEM;
2940
2941 if (rmem->mem_init)
2942 bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
2943 rmem->page_size);
2944 if (rmem->nr_pages > 1 || rmem->depth > 0) {
2945 if (i == rmem->nr_pages - 2 &&
2946 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2947 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2948 else if (i == rmem->nr_pages - 1 &&
2949 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2950 extra_bits |= PTU_PTE_LAST;
2951 rmem->pg_tbl[i] =
2952 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2953 }
2954 }
2955
2956 if (rmem->vmem_size) {
2957 *rmem->vmem = vzalloc(rmem->vmem_size);
2958 if (!(*rmem->vmem))
2959 return -ENOMEM;
2960 }
2961 return 0;
2962 }
2963
2964 static void bnxt_free_tpa_info(struct bnxt *bp)
2965 {
2966 int i;
2967
2968 for (i = 0; i < bp->rx_nr_rings; i++) {
2969 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2970
2971 kfree(rxr->rx_tpa_idx_map);
2972 rxr->rx_tpa_idx_map = NULL;
2973 if (rxr->rx_tpa) {
2974 kfree(rxr->rx_tpa[0].agg_arr);
2975 rxr->rx_tpa[0].agg_arr = NULL;
2976 }
2977 kfree(rxr->rx_tpa);
2978 rxr->rx_tpa = NULL;
2979 }
2980 }
2981
2982 static int bnxt_alloc_tpa_info(struct bnxt *bp)
2983 {
2984 int i, j, total_aggs = 0;
2985
2986 bp->max_tpa = MAX_TPA;
2987 if (bp->flags & BNXT_FLAG_CHIP_P5) {
2988 if (!bp->max_tpa_v2)
2989 return 0;
2990 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2991 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
2992 }
2993
2994 for (i = 0; i < bp->rx_nr_rings; i++) {
2995 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2996 struct rx_agg_cmp *agg;
2997
2998 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
2999 GFP_KERNEL);
3000 if (!rxr->rx_tpa)
3001 return -ENOMEM;
3002
3003 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3004 continue;
3005 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
3006 rxr->rx_tpa[0].agg_arr = agg;
3007 if (!agg)
3008 return -ENOMEM;
3009 for (j = 1; j < bp->max_tpa; j++)
3010 rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
3011 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3012 GFP_KERNEL);
3013 if (!rxr->rx_tpa_idx_map)
3014 return -ENOMEM;
3015 }
3016 return 0;
3017 }
3018
3019 static void bnxt_free_rx_rings(struct bnxt *bp)
3020 {
3021 int i;
3022
3023 if (!bp->rx_ring)
3024 return;
3025
3026 bnxt_free_tpa_info(bp);
3027 for (i = 0; i < bp->rx_nr_rings; i++) {
3028 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3029 struct bnxt_ring_struct *ring;
3030
3031 if (rxr->xdp_prog)
3032 bpf_prog_put(rxr->xdp_prog);
3033
3034 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3035 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3036
3037 page_pool_destroy(rxr->page_pool);
3038 rxr->page_pool = NULL;
3039
3040 kfree(rxr->rx_agg_bmap);
3041 rxr->rx_agg_bmap = NULL;
3042
3043 ring = &rxr->rx_ring_struct;
3044 bnxt_free_ring(bp, &ring->ring_mem);
3045
3046 ring = &rxr->rx_agg_ring_struct;
3047 bnxt_free_ring(bp, &ring->ring_mem);
3048 }
3049 }
3050
3051 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3052 struct bnxt_rx_ring_info *rxr)
3053 {
3054 struct page_pool_params pp = { 0 };
3055
3056 pp.pool_size = bp->rx_ring_size;
3057 pp.nid = dev_to_node(&bp->pdev->dev);
3058 pp.dev = &bp->pdev->dev;
3059 pp.dma_dir = DMA_BIDIRECTIONAL;
3060
3061 rxr->page_pool = page_pool_create(&pp);
3062 if (IS_ERR(rxr->page_pool)) {
3063 int err = PTR_ERR(rxr->page_pool);
3064
3065 rxr->page_pool = NULL;
3066 return err;
3067 }
3068 return 0;
3069 }
3070
3071 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3072 {
3073 int i, rc = 0, agg_rings = 0;
3074
3075 if (!bp->rx_ring)
3076 return -ENOMEM;
3077
3078 if (bp->flags & BNXT_FLAG_AGG_RINGS)
3079 agg_rings = 1;
3080
3081 for (i = 0; i < bp->rx_nr_rings; i++) {
3082 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3083 struct bnxt_ring_struct *ring;
3084
3085 ring = &rxr->rx_ring_struct;
3086
3087 rc = bnxt_alloc_rx_page_pool(bp, rxr);
3088 if (rc)
3089 return rc;
3090
3091 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3092 if (rc < 0)
3093 return rc;
3094
3095 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3096 MEM_TYPE_PAGE_POOL,
3097 rxr->page_pool);
3098 if (rc) {
3099 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3100 return rc;
3101 }
3102
3103 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3104 if (rc)
3105 return rc;
3106
3107 ring->grp_idx = i;
3108 if (agg_rings) {
3109 u16 mem_size;
3110
3111 ring = &rxr->rx_agg_ring_struct;
3112 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3113 if (rc)
3114 return rc;
3115
3116 ring->grp_idx = i;
3117 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3118 mem_size = rxr->rx_agg_bmap_size / 8;
3119 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3120 if (!rxr->rx_agg_bmap)
3121 return -ENOMEM;
3122 }
3123 }
3124 if (bp->flags & BNXT_FLAG_TPA)
3125 rc = bnxt_alloc_tpa_info(bp);
3126 return rc;
3127 }
3128
3129 static void bnxt_free_tx_rings(struct bnxt *bp)
3130 {
3131 int i;
3132 struct pci_dev *pdev = bp->pdev;
3133
3134 if (!bp->tx_ring)
3135 return;
3136
3137 for (i = 0; i < bp->tx_nr_rings; i++) {
3138 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3139 struct bnxt_ring_struct *ring;
3140
3141 if (txr->tx_push) {
3142 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3143 txr->tx_push, txr->tx_push_mapping);
3144 txr->tx_push = NULL;
3145 }
3146
3147 ring = &txr->tx_ring_struct;
3148
3149 bnxt_free_ring(bp, &ring->ring_mem);
3150 }
3151 }
3152
3153 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3154 {
3155 int i, j, rc;
3156 struct pci_dev *pdev = bp->pdev;
3157
3158 bp->tx_push_size = 0;
3159 if (bp->tx_push_thresh) {
3160 int push_size;
3161
3162 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3163 bp->tx_push_thresh);
3164
3165 if (push_size > 256) {
3166 push_size = 0;
3167 bp->tx_push_thresh = 0;
3168 }
3169
3170 bp->tx_push_size = push_size;
3171 }
3172
3173 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3174 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3175 struct bnxt_ring_struct *ring;
3176 u8 qidx;
3177
3178 ring = &txr->tx_ring_struct;
3179
3180 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3181 if (rc)
3182 return rc;
3183
3184 ring->grp_idx = txr->bnapi->index;
3185 if (bp->tx_push_size) {
3186 dma_addr_t mapping;
3187
3188 /* One pre-allocated DMA buffer to backup
3189 * TX push operation
3190 */
3191 txr->tx_push = dma_alloc_coherent(&pdev->dev,
3192 bp->tx_push_size,
3193 &txr->tx_push_mapping,
3194 GFP_KERNEL);
3195
3196 if (!txr->tx_push)
3197 return -ENOMEM;
3198
3199 mapping = txr->tx_push_mapping +
3200 sizeof(struct tx_push_bd);
3201 txr->data_mapping = cpu_to_le64(mapping);
3202 }
3203 qidx = bp->tc_to_qidx[j];
3204 ring->queue_id = bp->q_info[qidx].queue_id;
3205 if (i < bp->tx_nr_rings_xdp)
3206 continue;
3207 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3208 j++;
3209 }
3210 return 0;
3211 }
3212
3213 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
3214 {
3215 kfree(cpr->cp_desc_ring);
3216 cpr->cp_desc_ring = NULL;
3217 kfree(cpr->cp_desc_mapping);
3218 cpr->cp_desc_mapping = NULL;
3219 }
3220
3221 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
3222 {
3223 cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
3224 if (!cpr->cp_desc_ring)
3225 return -ENOMEM;
3226 cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
3227 GFP_KERNEL);
3228 if (!cpr->cp_desc_mapping)
3229 return -ENOMEM;
3230 return 0;
3231 }
3232
3233 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
3234 {
3235 int i;
3236
3237 if (!bp->bnapi)
3238 return;
3239 for (i = 0; i < bp->cp_nr_rings; i++) {
3240 struct bnxt_napi *bnapi = bp->bnapi[i];
3241
3242 if (!bnapi)
3243 continue;
3244 bnxt_free_cp_arrays(&bnapi->cp_ring);
3245 }
3246 }
3247
3248 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
3249 {
3250 int i, n = bp->cp_nr_pages;
3251
3252 for (i = 0; i < bp->cp_nr_rings; i++) {
3253 struct bnxt_napi *bnapi = bp->bnapi[i];
3254 int rc;
3255
3256 if (!bnapi)
3257 continue;
3258 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
3259 if (rc)
3260 return rc;
3261 }
3262 return 0;
3263 }
3264
3265 static void bnxt_free_cp_rings(struct bnxt *bp)
3266 {
3267 int i;
3268
3269 if (!bp->bnapi)
3270 return;
3271
3272 for (i = 0; i < bp->cp_nr_rings; i++) {
3273 struct bnxt_napi *bnapi = bp->bnapi[i];
3274 struct bnxt_cp_ring_info *cpr;
3275 struct bnxt_ring_struct *ring;
3276 int j;
3277
3278 if (!bnapi)
3279 continue;
3280
3281 cpr = &bnapi->cp_ring;
3282 ring = &cpr->cp_ring_struct;
3283
3284 bnxt_free_ring(bp, &ring->ring_mem);
3285
3286 for (j = 0; j < 2; j++) {
3287 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3288
3289 if (cpr2) {
3290 ring = &cpr2->cp_ring_struct;
3291 bnxt_free_ring(bp, &ring->ring_mem);
3292 bnxt_free_cp_arrays(cpr2);
3293 kfree(cpr2);
3294 cpr->cp_ring_arr[j] = NULL;
3295 }
3296 }
3297 }
3298 }
3299
3300 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3301 {
3302 struct bnxt_ring_mem_info *rmem;
3303 struct bnxt_ring_struct *ring;
3304 struct bnxt_cp_ring_info *cpr;
3305 int rc;
3306
3307 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3308 if (!cpr)
3309 return NULL;
3310
3311 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
3312 if (rc) {
3313 bnxt_free_cp_arrays(cpr);
3314 kfree(cpr);
3315 return NULL;
3316 }
3317 ring = &cpr->cp_ring_struct;
3318 rmem = &ring->ring_mem;
3319 rmem->nr_pages = bp->cp_nr_pages;
3320 rmem->page_size = HW_CMPD_RING_SIZE;
3321 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3322 rmem->dma_arr = cpr->cp_desc_mapping;
3323 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3324 rc = bnxt_alloc_ring(bp, rmem);
3325 if (rc) {
3326 bnxt_free_ring(bp, rmem);
3327 bnxt_free_cp_arrays(cpr);
3328 kfree(cpr);
3329 cpr = NULL;
3330 }
3331 return cpr;
3332 }
3333
3334 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3335 {
3336 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3337 int i, rc, ulp_base_vec, ulp_msix;
3338
3339 ulp_msix = bnxt_get_ulp_msix_num(bp);
3340 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3341 for (i = 0; i < bp->cp_nr_rings; i++) {
3342 struct bnxt_napi *bnapi = bp->bnapi[i];
3343 struct bnxt_cp_ring_info *cpr;
3344 struct bnxt_ring_struct *ring;
3345
3346 if (!bnapi)
3347 continue;
3348
3349 cpr = &bnapi->cp_ring;
3350 cpr->bnapi = bnapi;
3351 ring = &cpr->cp_ring_struct;
3352
3353 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3354 if (rc)
3355 return rc;
3356
3357 if (ulp_msix && i >= ulp_base_vec)
3358 ring->map_idx = i + ulp_msix;
3359 else
3360 ring->map_idx = i;
3361
3362 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3363 continue;
3364
3365 if (i < bp->rx_nr_rings) {
3366 struct bnxt_cp_ring_info *cpr2 =
3367 bnxt_alloc_cp_sub_ring(bp);
3368
3369 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3370 if (!cpr2)
3371 return -ENOMEM;
3372 cpr2->bnapi = bnapi;
3373 }
3374 if ((sh && i < bp->tx_nr_rings) ||
3375 (!sh && i >= bp->rx_nr_rings)) {
3376 struct bnxt_cp_ring_info *cpr2 =
3377 bnxt_alloc_cp_sub_ring(bp);
3378
3379 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3380 if (!cpr2)
3381 return -ENOMEM;
3382 cpr2->bnapi = bnapi;
3383 }
3384 }
3385 return 0;
3386 }
3387
3388 static void bnxt_init_ring_struct(struct bnxt *bp)
3389 {
3390 int i;
3391
3392 for (i = 0; i < bp->cp_nr_rings; i++) {
3393 struct bnxt_napi *bnapi = bp->bnapi[i];
3394 struct bnxt_ring_mem_info *rmem;
3395 struct bnxt_cp_ring_info *cpr;
3396 struct bnxt_rx_ring_info *rxr;
3397 struct bnxt_tx_ring_info *txr;
3398 struct bnxt_ring_struct *ring;
3399
3400 if (!bnapi)
3401 continue;
3402
3403 cpr = &bnapi->cp_ring;
3404 ring = &cpr->cp_ring_struct;
3405 rmem = &ring->ring_mem;
3406 rmem->nr_pages = bp->cp_nr_pages;
3407 rmem->page_size = HW_CMPD_RING_SIZE;
3408 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3409 rmem->dma_arr = cpr->cp_desc_mapping;
3410 rmem->vmem_size = 0;
3411
3412 rxr = bnapi->rx_ring;
3413 if (!rxr)
3414 goto skip_rx;
3415
3416 ring = &rxr->rx_ring_struct;
3417 rmem = &ring->ring_mem;
3418 rmem->nr_pages = bp->rx_nr_pages;
3419 rmem->page_size = HW_RXBD_RING_SIZE;
3420 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3421 rmem->dma_arr = rxr->rx_desc_mapping;
3422 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3423 rmem->vmem = (void **)&rxr->rx_buf_ring;
3424
3425 ring = &rxr->rx_agg_ring_struct;
3426 rmem = &ring->ring_mem;
3427 rmem->nr_pages = bp->rx_agg_nr_pages;
3428 rmem->page_size = HW_RXBD_RING_SIZE;
3429 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3430 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3431 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3432 rmem->vmem = (void **)&rxr->rx_agg_ring;
3433
3434 skip_rx:
3435 txr = bnapi->tx_ring;
3436 if (!txr)
3437 continue;
3438
3439 ring = &txr->tx_ring_struct;
3440 rmem = &ring->ring_mem;
3441 rmem->nr_pages = bp->tx_nr_pages;
3442 rmem->page_size = HW_RXBD_RING_SIZE;
3443 rmem->pg_arr = (void **)txr->tx_desc_ring;
3444 rmem->dma_arr = txr->tx_desc_mapping;
3445 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3446 rmem->vmem = (void **)&txr->tx_buf_ring;
3447 }
3448 }
3449
3450 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3451 {
3452 int i;
3453 u32 prod;
3454 struct rx_bd **rx_buf_ring;
3455
3456 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3457 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3458 int j;
3459 struct rx_bd *rxbd;
3460
3461 rxbd = rx_buf_ring[i];
3462 if (!rxbd)
3463 continue;
3464
3465 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3466 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3467 rxbd->rx_bd_opaque = prod;
3468 }
3469 }
3470 }
3471
3472 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
3473 {
3474 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3475 struct net_device *dev = bp->dev;
3476 u32 prod;
3477 int i;
3478
3479 prod = rxr->rx_prod;
3480 for (i = 0; i < bp->rx_ring_size; i++) {
3481 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
3482 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3483 ring_nr, i, bp->rx_ring_size);
3484 break;
3485 }
3486 prod = NEXT_RX(prod);
3487 }
3488 rxr->rx_prod = prod;
3489
3490 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3491 return 0;
3492
3493 prod = rxr->rx_agg_prod;
3494 for (i = 0; i < bp->rx_agg_ring_size; i++) {
3495 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
3496 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3497 ring_nr, i, bp->rx_ring_size);
3498 break;
3499 }
3500 prod = NEXT_RX_AGG(prod);
3501 }
3502 rxr->rx_agg_prod = prod;
3503
3504 if (rxr->rx_tpa) {
3505 dma_addr_t mapping;
3506 u8 *data;
3507
3508 for (i = 0; i < bp->max_tpa; i++) {
3509 data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
3510 if (!data)
3511 return -ENOMEM;
3512
3513 rxr->rx_tpa[i].data = data;
3514 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3515 rxr->rx_tpa[i].mapping = mapping;
3516 }
3517 }
3518 return 0;
3519 }
3520
3521 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3522 {
3523 struct bnxt_rx_ring_info *rxr;
3524 struct bnxt_ring_struct *ring;
3525 u32 type;
3526
3527 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3528 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3529
3530 if (NET_IP_ALIGN == 2)
3531 type |= RX_BD_FLAGS_SOP;
3532
3533 rxr = &bp->rx_ring[ring_nr];
3534 ring = &rxr->rx_ring_struct;
3535 bnxt_init_rxbd_pages(ring, type);
3536
3537 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3538 bpf_prog_add(bp->xdp_prog, 1);
3539 rxr->xdp_prog = bp->xdp_prog;
3540 }
3541 ring->fw_ring_id = INVALID_HW_RING_ID;
3542
3543 ring = &rxr->rx_agg_ring_struct;
3544 ring->fw_ring_id = INVALID_HW_RING_ID;
3545
3546 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3547 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3548 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3549
3550 bnxt_init_rxbd_pages(ring, type);
3551 }
3552
3553 return bnxt_alloc_one_rx_ring(bp, ring_nr);
3554 }
3555
3556 static void bnxt_init_cp_rings(struct bnxt *bp)
3557 {
3558 int i, j;
3559
3560 for (i = 0; i < bp->cp_nr_rings; i++) {
3561 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3562 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3563
3564 ring->fw_ring_id = INVALID_HW_RING_ID;
3565 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3566 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3567 for (j = 0; j < 2; j++) {
3568 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3569
3570 if (!cpr2)
3571 continue;
3572
3573 ring = &cpr2->cp_ring_struct;
3574 ring->fw_ring_id = INVALID_HW_RING_ID;
3575 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3576 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3577 }
3578 }
3579 }
3580
3581 static int bnxt_init_rx_rings(struct bnxt *bp)
3582 {
3583 int i, rc = 0;
3584
3585 if (BNXT_RX_PAGE_MODE(bp)) {
3586 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3587 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3588 } else {
3589 bp->rx_offset = BNXT_RX_OFFSET;
3590 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3591 }
3592
3593 for (i = 0; i < bp->rx_nr_rings; i++) {
3594 rc = bnxt_init_one_rx_ring(bp, i);
3595 if (rc)
3596 break;
3597 }
3598
3599 return rc;
3600 }
3601
3602 static int bnxt_init_tx_rings(struct bnxt *bp)
3603 {
3604 u16 i;
3605
3606 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3607 MAX_SKB_FRAGS + 1);
3608
3609 for (i = 0; i < bp->tx_nr_rings; i++) {
3610 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3611 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3612
3613 ring->fw_ring_id = INVALID_HW_RING_ID;
3614 }
3615
3616 return 0;
3617 }
3618
3619 static void bnxt_free_ring_grps(struct bnxt *bp)
3620 {
3621 kfree(bp->grp_info);
3622 bp->grp_info = NULL;
3623 }
3624
3625 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3626 {
3627 int i;
3628
3629 if (irq_re_init) {
3630 bp->grp_info = kcalloc(bp->cp_nr_rings,
3631 sizeof(struct bnxt_ring_grp_info),
3632 GFP_KERNEL);
3633 if (!bp->grp_info)
3634 return -ENOMEM;
3635 }
3636 for (i = 0; i < bp->cp_nr_rings; i++) {
3637 if (irq_re_init)
3638 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3639 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3640 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3641 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3642 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3643 }
3644 return 0;
3645 }
3646
3647 static void bnxt_free_vnics(struct bnxt *bp)
3648 {
3649 kfree(bp->vnic_info);
3650 bp->vnic_info = NULL;
3651 bp->nr_vnics = 0;
3652 }
3653
3654 static int bnxt_alloc_vnics(struct bnxt *bp)
3655 {
3656 int num_vnics = 1;
3657
3658 #ifdef CONFIG_RFS_ACCEL
3659 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3660 num_vnics += bp->rx_nr_rings;
3661 #endif
3662
3663 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3664 num_vnics++;
3665
3666 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3667 GFP_KERNEL);
3668 if (!bp->vnic_info)
3669 return -ENOMEM;
3670
3671 bp->nr_vnics = num_vnics;
3672 return 0;
3673 }
3674
3675 static void bnxt_init_vnics(struct bnxt *bp)
3676 {
3677 int i;
3678
3679 for (i = 0; i < bp->nr_vnics; i++) {
3680 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3681 int j;
3682
3683 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3684 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3685 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3686
3687 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3688
3689 if (bp->vnic_info[i].rss_hash_key) {
3690 if (i == 0)
3691 prandom_bytes(vnic->rss_hash_key,
3692 HW_HASH_KEY_SIZE);
3693 else
3694 memcpy(vnic->rss_hash_key,
3695 bp->vnic_info[0].rss_hash_key,
3696 HW_HASH_KEY_SIZE);
3697 }
3698 }
3699 }
3700
3701 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3702 {
3703 int pages;
3704
3705 pages = ring_size / desc_per_pg;
3706
3707 if (!pages)
3708 return 1;
3709
3710 pages++;
3711
3712 while (pages & (pages - 1))
3713 pages++;
3714
3715 return pages;
3716 }
3717
3718 void bnxt_set_tpa_flags(struct bnxt *bp)
3719 {
3720 bp->flags &= ~BNXT_FLAG_TPA;
3721 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3722 return;
3723 if (bp->dev->features & NETIF_F_LRO)
3724 bp->flags |= BNXT_FLAG_LRO;
3725 else if (bp->dev->features & NETIF_F_GRO_HW)
3726 bp->flags |= BNXT_FLAG_GRO;
3727 }
3728
3729 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3730 * be set on entry.
3731 */
3732 void bnxt_set_ring_params(struct bnxt *bp)
3733 {
3734 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
3735 u32 agg_factor = 0, agg_ring_size = 0;
3736
3737 /* 8 for CRC and VLAN */
3738 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3739
3740 rx_space = rx_size + NET_SKB_PAD +
3741 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3742
3743 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3744 ring_size = bp->rx_ring_size;
3745 bp->rx_agg_ring_size = 0;
3746 bp->rx_agg_nr_pages = 0;
3747
3748 if (bp->flags & BNXT_FLAG_TPA)
3749 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3750
3751 bp->flags &= ~BNXT_FLAG_JUMBO;
3752 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3753 u32 jumbo_factor;
3754
3755 bp->flags |= BNXT_FLAG_JUMBO;
3756 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3757 if (jumbo_factor > agg_factor)
3758 agg_factor = jumbo_factor;
3759 }
3760 if (agg_factor) {
3761 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
3762 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
3763 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
3764 bp->rx_ring_size, ring_size);
3765 bp->rx_ring_size = ring_size;
3766 }
3767 agg_ring_size = ring_size * agg_factor;
3768
3769 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3770 RX_DESC_CNT);
3771 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3772 u32 tmp = agg_ring_size;
3773
3774 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3775 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3776 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3777 tmp, agg_ring_size);
3778 }
3779 bp->rx_agg_ring_size = agg_ring_size;
3780 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3781 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3782 rx_space = rx_size + NET_SKB_PAD +
3783 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3784 }
3785
3786 bp->rx_buf_use_size = rx_size;
3787 bp->rx_buf_size = rx_space;
3788
3789 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3790 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3791
3792 ring_size = bp->tx_ring_size;
3793 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3794 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3795
3796 max_rx_cmpl = bp->rx_ring_size;
3797 /* MAX TPA needs to be added because TPA_START completions are
3798 * immediately recycled, so the TPA completions are not bound by
3799 * the RX ring size.
3800 */
3801 if (bp->flags & BNXT_FLAG_TPA)
3802 max_rx_cmpl += bp->max_tpa;
3803 /* RX and TPA completions are 32-byte, all others are 16-byte */
3804 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
3805 bp->cp_ring_size = ring_size;
3806
3807 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3808 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3809 bp->cp_nr_pages = MAX_CP_PAGES;
3810 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3811 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3812 ring_size, bp->cp_ring_size);
3813 }
3814 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3815 bp->cp_ring_mask = bp->cp_bit - 1;
3816 }
3817
3818 /* Changing allocation mode of RX rings.
3819 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3820 */
3821 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3822 {
3823 if (page_mode) {
3824 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3825 return -EOPNOTSUPP;
3826 bp->dev->max_mtu =
3827 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3828 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3829 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3830 bp->rx_dir = DMA_BIDIRECTIONAL;
3831 bp->rx_skb_func = bnxt_rx_page_skb;
3832 /* Disable LRO or GRO_HW */
3833 netdev_update_features(bp->dev);
3834 } else {
3835 bp->dev->max_mtu = bp->max_mtu;
3836 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3837 bp->rx_dir = DMA_FROM_DEVICE;
3838 bp->rx_skb_func = bnxt_rx_skb;
3839 }
3840 return 0;
3841 }
3842
3843 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3844 {
3845 int i;
3846 struct bnxt_vnic_info *vnic;
3847 struct pci_dev *pdev = bp->pdev;
3848
3849 if (!bp->vnic_info)
3850 return;
3851
3852 for (i = 0; i < bp->nr_vnics; i++) {
3853 vnic = &bp->vnic_info[i];
3854
3855 kfree(vnic->fw_grp_ids);
3856 vnic->fw_grp_ids = NULL;
3857
3858 kfree(vnic->uc_list);
3859 vnic->uc_list = NULL;
3860
3861 if (vnic->mc_list) {
3862 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3863 vnic->mc_list, vnic->mc_list_mapping);
3864 vnic->mc_list = NULL;
3865 }
3866
3867 if (vnic->rss_table) {
3868 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
3869 vnic->rss_table,
3870 vnic->rss_table_dma_addr);
3871 vnic->rss_table = NULL;
3872 }
3873
3874 vnic->rss_hash_key = NULL;
3875 vnic->flags = 0;
3876 }
3877 }
3878
3879 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3880 {
3881 int i, rc = 0, size;
3882 struct bnxt_vnic_info *vnic;
3883 struct pci_dev *pdev = bp->pdev;
3884 int max_rings;
3885
3886 for (i = 0; i < bp->nr_vnics; i++) {
3887 vnic = &bp->vnic_info[i];
3888
3889 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3890 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3891
3892 if (mem_size > 0) {
3893 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3894 if (!vnic->uc_list) {
3895 rc = -ENOMEM;
3896 goto out;
3897 }
3898 }
3899 }
3900
3901 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3902 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3903 vnic->mc_list =
3904 dma_alloc_coherent(&pdev->dev,
3905 vnic->mc_list_size,
3906 &vnic->mc_list_mapping,
3907 GFP_KERNEL);
3908 if (!vnic->mc_list) {
3909 rc = -ENOMEM;
3910 goto out;
3911 }
3912 }
3913
3914 if (bp->flags & BNXT_FLAG_CHIP_P5)
3915 goto vnic_skip_grps;
3916
3917 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3918 max_rings = bp->rx_nr_rings;
3919 else
3920 max_rings = 1;
3921
3922 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3923 if (!vnic->fw_grp_ids) {
3924 rc = -ENOMEM;
3925 goto out;
3926 }
3927 vnic_skip_grps:
3928 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3929 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3930 continue;
3931
3932 /* Allocate rss table and hash key */
3933 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3934 if (bp->flags & BNXT_FLAG_CHIP_P5)
3935 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
3936
3937 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
3938 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
3939 vnic->rss_table_size,
3940 &vnic->rss_table_dma_addr,
3941 GFP_KERNEL);
3942 if (!vnic->rss_table) {
3943 rc = -ENOMEM;
3944 goto out;
3945 }
3946
3947 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3948 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3949 }
3950 return 0;
3951
3952 out:
3953 return rc;
3954 }
3955
3956 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3957 {
3958 struct pci_dev *pdev = bp->pdev;
3959
3960 if (bp->hwrm_cmd_resp_addr) {
3961 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3962 bp->hwrm_cmd_resp_dma_addr);
3963 bp->hwrm_cmd_resp_addr = NULL;
3964 }
3965 }
3966
3967 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3968 {
3969 struct pci_dev *pdev = bp->pdev;
3970
3971 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3972 &bp->hwrm_cmd_resp_dma_addr,
3973 GFP_KERNEL);
3974 if (!bp->hwrm_cmd_resp_addr)
3975 return -ENOMEM;
3976
3977 return 0;
3978 }
3979
3980 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3981 {
3982 if (bp->hwrm_short_cmd_req_addr) {
3983 struct pci_dev *pdev = bp->pdev;
3984
3985 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3986 bp->hwrm_short_cmd_req_addr,
3987 bp->hwrm_short_cmd_req_dma_addr);
3988 bp->hwrm_short_cmd_req_addr = NULL;
3989 }
3990 }
3991
3992 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3993 {
3994 struct pci_dev *pdev = bp->pdev;
3995
3996 if (bp->hwrm_short_cmd_req_addr)
3997 return 0;
3998
3999 bp->hwrm_short_cmd_req_addr =
4000 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
4001 &bp->hwrm_short_cmd_req_dma_addr,
4002 GFP_KERNEL);
4003 if (!bp->hwrm_short_cmd_req_addr)
4004 return -ENOMEM;
4005
4006 return 0;
4007 }
4008
4009 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
4010 {
4011 kfree(stats->hw_masks);
4012 stats->hw_masks = NULL;
4013 kfree(stats->sw_stats);
4014 stats->sw_stats = NULL;
4015 if (stats->hw_stats) {
4016 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
4017 stats->hw_stats_map);
4018 stats->hw_stats = NULL;
4019 }
4020 }
4021
4022 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
4023 bool alloc_masks)
4024 {
4025 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
4026 &stats->hw_stats_map, GFP_KERNEL);
4027 if (!stats->hw_stats)
4028 return -ENOMEM;
4029
4030 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
4031 if (!stats->sw_stats)
4032 goto stats_mem_err;
4033
4034 if (alloc_masks) {
4035 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
4036 if (!stats->hw_masks)
4037 goto stats_mem_err;
4038 }
4039 return 0;
4040
4041 stats_mem_err:
4042 bnxt_free_stats_mem(bp, stats);
4043 return -ENOMEM;
4044 }
4045
4046 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
4047 {
4048 int i;
4049
4050 for (i = 0; i < count; i++)
4051 mask_arr[i] = mask;
4052 }
4053
4054 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
4055 {
4056 int i;
4057
4058 for (i = 0; i < count; i++)
4059 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
4060 }
4061
4062 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
4063 struct bnxt_stats_mem *stats)
4064 {
4065 struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
4066 struct hwrm_func_qstats_ext_input req = {0};
4067 __le64 *hw_masks;
4068 int rc;
4069
4070 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
4071 !(bp->flags & BNXT_FLAG_CHIP_P5))
4072 return -EOPNOTSUPP;
4073
4074 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1);
4075 req.fid = cpu_to_le16(0xffff);
4076 req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4077 mutex_lock(&bp->hwrm_cmd_lock);
4078 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4079 if (rc)
4080 goto qstat_exit;
4081
4082 hw_masks = &resp->rx_ucast_pkts;
4083 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
4084
4085 qstat_exit:
4086 mutex_unlock(&bp->hwrm_cmd_lock);
4087 return rc;
4088 }
4089
4090 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
4091 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
4092
4093 static void bnxt_init_stats(struct bnxt *bp)
4094 {
4095 struct bnxt_napi *bnapi = bp->bnapi[0];
4096 struct bnxt_cp_ring_info *cpr;
4097 struct bnxt_stats_mem *stats;
4098 __le64 *rx_stats, *tx_stats;
4099 int rc, rx_count, tx_count;
4100 u64 *rx_masks, *tx_masks;
4101 u64 mask;
4102 u8 flags;
4103
4104 cpr = &bnapi->cp_ring;
4105 stats = &cpr->stats;
4106 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
4107 if (rc) {
4108 if (bp->flags & BNXT_FLAG_CHIP_P5)
4109 mask = (1ULL << 48) - 1;
4110 else
4111 mask = -1ULL;
4112 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
4113 }
4114 if (bp->flags & BNXT_FLAG_PORT_STATS) {
4115 stats = &bp->port_stats;
4116 rx_stats = stats->hw_stats;
4117 rx_masks = stats->hw_masks;
4118 rx_count = sizeof(struct rx_port_stats) / 8;
4119 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4120 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4121 tx_count = sizeof(struct tx_port_stats) / 8;
4122
4123 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
4124 rc = bnxt_hwrm_port_qstats(bp, flags);
4125 if (rc) {
4126 mask = (1ULL << 40) - 1;
4127
4128 bnxt_fill_masks(rx_masks, mask, rx_count);
4129 bnxt_fill_masks(tx_masks, mask, tx_count);
4130 } else {
4131 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4132 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
4133 bnxt_hwrm_port_qstats(bp, 0);
4134 }
4135 }
4136 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4137 stats = &bp->rx_port_stats_ext;
4138 rx_stats = stats->hw_stats;
4139 rx_masks = stats->hw_masks;
4140 rx_count = sizeof(struct rx_port_stats_ext) / 8;
4141 stats = &bp->tx_port_stats_ext;
4142 tx_stats = stats->hw_stats;
4143 tx_masks = stats->hw_masks;
4144 tx_count = sizeof(struct tx_port_stats_ext) / 8;
4145
4146 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4147 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4148 if (rc) {
4149 mask = (1ULL << 40) - 1;
4150
4151 bnxt_fill_masks(rx_masks, mask, rx_count);
4152 if (tx_stats)
4153 bnxt_fill_masks(tx_masks, mask, tx_count);
4154 } else {
4155 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4156 if (tx_stats)
4157 bnxt_copy_hw_masks(tx_masks, tx_stats,
4158 tx_count);
4159 bnxt_hwrm_port_qstats_ext(bp, 0);
4160 }
4161 }
4162 }
4163
4164 static void bnxt_free_port_stats(struct bnxt *bp)
4165 {
4166 bp->flags &= ~BNXT_FLAG_PORT_STATS;
4167 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
4168
4169 bnxt_free_stats_mem(bp, &bp->port_stats);
4170 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4171 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
4172 }
4173
4174 static void bnxt_free_ring_stats(struct bnxt *bp)
4175 {
4176 int i;
4177
4178 if (!bp->bnapi)
4179 return;
4180
4181 for (i = 0; i < bp->cp_nr_rings; i++) {
4182 struct bnxt_napi *bnapi = bp->bnapi[i];
4183 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4184
4185 bnxt_free_stats_mem(bp, &cpr->stats);
4186 }
4187 }
4188
4189 static int bnxt_alloc_stats(struct bnxt *bp)
4190 {
4191 u32 size, i;
4192 int rc;
4193
4194 size = bp->hw_ring_stats_size;
4195
4196 for (i = 0; i < bp->cp_nr_rings; i++) {
4197 struct bnxt_napi *bnapi = bp->bnapi[i];
4198 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4199
4200 cpr->stats.len = size;
4201 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4202 if (rc)
4203 return rc;
4204
4205 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4206 }
4207
4208 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4209 return 0;
4210
4211 if (bp->port_stats.hw_stats)
4212 goto alloc_ext_stats;
4213
4214 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4215 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4216 if (rc)
4217 return rc;
4218
4219 bp->flags |= BNXT_FLAG_PORT_STATS;
4220
4221 alloc_ext_stats:
4222 /* Display extended statistics only if FW supports it */
4223 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4224 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4225 return 0;
4226
4227 if (bp->rx_port_stats_ext.hw_stats)
4228 goto alloc_tx_ext_stats;
4229
4230 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4231 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4232 /* Extended stats are optional */
4233 if (rc)
4234 return 0;
4235
4236 alloc_tx_ext_stats:
4237 if (bp->tx_port_stats_ext.hw_stats)
4238 return 0;
4239
4240 if (bp->hwrm_spec_code >= 0x10902 ||
4241 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4242 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4243 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4244 /* Extended stats are optional */
4245 if (rc)
4246 return 0;
4247 }
4248 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4249 return 0;
4250 }
4251
4252 static void bnxt_clear_ring_indices(struct bnxt *bp)
4253 {
4254 int i;
4255
4256 if (!bp->bnapi)
4257 return;
4258
4259 for (i = 0; i < bp->cp_nr_rings; i++) {
4260 struct bnxt_napi *bnapi = bp->bnapi[i];
4261 struct bnxt_cp_ring_info *cpr;
4262 struct bnxt_rx_ring_info *rxr;
4263 struct bnxt_tx_ring_info *txr;
4264
4265 if (!bnapi)
4266 continue;
4267
4268 cpr = &bnapi->cp_ring;
4269 cpr->cp_raw_cons = 0;
4270
4271 txr = bnapi->tx_ring;
4272 if (txr) {
4273 txr->tx_prod = 0;
4274 txr->tx_cons = 0;
4275 }
4276
4277 rxr = bnapi->rx_ring;
4278 if (rxr) {
4279 rxr->rx_prod = 0;
4280 rxr->rx_agg_prod = 0;
4281 rxr->rx_sw_agg_prod = 0;
4282 rxr->rx_next_cons = 0;
4283 }
4284 }
4285 }
4286
4287 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4288 {
4289 #ifdef CONFIG_RFS_ACCEL
4290 int i;
4291
4292 /* Under rtnl_lock and all our NAPIs have been disabled. It's
4293 * safe to delete the hash table.
4294 */
4295 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4296 struct hlist_head *head;
4297 struct hlist_node *tmp;
4298 struct bnxt_ntuple_filter *fltr;
4299
4300 head = &bp->ntp_fltr_hash_tbl[i];
4301 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4302 hlist_del(&fltr->hash);
4303 kfree(fltr);
4304 }
4305 }
4306 if (irq_reinit) {
4307 kfree(bp->ntp_fltr_bmap);
4308 bp->ntp_fltr_bmap = NULL;
4309 }
4310 bp->ntp_fltr_count = 0;
4311 #endif
4312 }
4313
4314 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4315 {
4316 #ifdef CONFIG_RFS_ACCEL
4317 int i, rc = 0;
4318
4319 if (!(bp->flags & BNXT_FLAG_RFS))
4320 return 0;
4321
4322 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4323 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4324
4325 bp->ntp_fltr_count = 0;
4326 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
4327 sizeof(long),
4328 GFP_KERNEL);
4329
4330 if (!bp->ntp_fltr_bmap)
4331 rc = -ENOMEM;
4332
4333 return rc;
4334 #else
4335 return 0;
4336 #endif
4337 }
4338
4339 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4340 {
4341 bnxt_free_vnic_attributes(bp);
4342 bnxt_free_tx_rings(bp);
4343 bnxt_free_rx_rings(bp);
4344 bnxt_free_cp_rings(bp);
4345 bnxt_free_all_cp_arrays(bp);
4346 bnxt_free_ntp_fltrs(bp, irq_re_init);
4347 if (irq_re_init) {
4348 bnxt_free_ring_stats(bp);
4349 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
4350 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
4351 bnxt_free_port_stats(bp);
4352 bnxt_free_ring_grps(bp);
4353 bnxt_free_vnics(bp);
4354 kfree(bp->tx_ring_map);
4355 bp->tx_ring_map = NULL;
4356 kfree(bp->tx_ring);
4357 bp->tx_ring = NULL;
4358 kfree(bp->rx_ring);
4359 bp->rx_ring = NULL;
4360 kfree(bp->bnapi);
4361 bp->bnapi = NULL;
4362 } else {
4363 bnxt_clear_ring_indices(bp);
4364 }
4365 }
4366
4367 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4368 {
4369 int i, j, rc, size, arr_size;
4370 void *bnapi;
4371
4372 if (irq_re_init) {
4373 /* Allocate bnapi mem pointer array and mem block for
4374 * all queues
4375 */
4376 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4377 bp->cp_nr_rings);
4378 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4379 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4380 if (!bnapi)
4381 return -ENOMEM;
4382
4383 bp->bnapi = bnapi;
4384 bnapi += arr_size;
4385 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4386 bp->bnapi[i] = bnapi;
4387 bp->bnapi[i]->index = i;
4388 bp->bnapi[i]->bp = bp;
4389 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4390 struct bnxt_cp_ring_info *cpr =
4391 &bp->bnapi[i]->cp_ring;
4392
4393 cpr->cp_ring_struct.ring_mem.flags =
4394 BNXT_RMEM_RING_PTE_FLAG;
4395 }
4396 }
4397
4398 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4399 sizeof(struct bnxt_rx_ring_info),
4400 GFP_KERNEL);
4401 if (!bp->rx_ring)
4402 return -ENOMEM;
4403
4404 for (i = 0; i < bp->rx_nr_rings; i++) {
4405 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4406
4407 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4408 rxr->rx_ring_struct.ring_mem.flags =
4409 BNXT_RMEM_RING_PTE_FLAG;
4410 rxr->rx_agg_ring_struct.ring_mem.flags =
4411 BNXT_RMEM_RING_PTE_FLAG;
4412 }
4413 rxr->bnapi = bp->bnapi[i];
4414 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4415 }
4416
4417 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4418 sizeof(struct bnxt_tx_ring_info),
4419 GFP_KERNEL);
4420 if (!bp->tx_ring)
4421 return -ENOMEM;
4422
4423 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4424 GFP_KERNEL);
4425
4426 if (!bp->tx_ring_map)
4427 return -ENOMEM;
4428
4429 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4430 j = 0;
4431 else
4432 j = bp->rx_nr_rings;
4433
4434 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4435 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4436
4437 if (bp->flags & BNXT_FLAG_CHIP_P5)
4438 txr->tx_ring_struct.ring_mem.flags =
4439 BNXT_RMEM_RING_PTE_FLAG;
4440 txr->bnapi = bp->bnapi[j];
4441 bp->bnapi[j]->tx_ring = txr;
4442 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4443 if (i >= bp->tx_nr_rings_xdp) {
4444 txr->txq_index = i - bp->tx_nr_rings_xdp;
4445 bp->bnapi[j]->tx_int = bnxt_tx_int;
4446 } else {
4447 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4448 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4449 }
4450 }
4451
4452 rc = bnxt_alloc_stats(bp);
4453 if (rc)
4454 goto alloc_mem_err;
4455 bnxt_init_stats(bp);
4456
4457 rc = bnxt_alloc_ntp_fltrs(bp);
4458 if (rc)
4459 goto alloc_mem_err;
4460
4461 rc = bnxt_alloc_vnics(bp);
4462 if (rc)
4463 goto alloc_mem_err;
4464 }
4465
4466 rc = bnxt_alloc_all_cp_arrays(bp);
4467 if (rc)
4468 goto alloc_mem_err;
4469
4470 bnxt_init_ring_struct(bp);
4471
4472 rc = bnxt_alloc_rx_rings(bp);
4473 if (rc)
4474 goto alloc_mem_err;
4475
4476 rc = bnxt_alloc_tx_rings(bp);
4477 if (rc)
4478 goto alloc_mem_err;
4479
4480 rc = bnxt_alloc_cp_rings(bp);
4481 if (rc)
4482 goto alloc_mem_err;
4483
4484 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4485 BNXT_VNIC_UCAST_FLAG;
4486 rc = bnxt_alloc_vnic_attributes(bp);
4487 if (rc)
4488 goto alloc_mem_err;
4489 return 0;
4490
4491 alloc_mem_err:
4492 bnxt_free_mem(bp, true);
4493 return rc;
4494 }
4495
4496 static void bnxt_disable_int(struct bnxt *bp)
4497 {
4498 int i;
4499
4500 if (!bp->bnapi)
4501 return;
4502
4503 for (i = 0; i < bp->cp_nr_rings; i++) {
4504 struct bnxt_napi *bnapi = bp->bnapi[i];
4505 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4506 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4507
4508 if (ring->fw_ring_id != INVALID_HW_RING_ID)
4509 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4510 }
4511 }
4512
4513 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4514 {
4515 struct bnxt_napi *bnapi = bp->bnapi[n];
4516 struct bnxt_cp_ring_info *cpr;
4517
4518 cpr = &bnapi->cp_ring;
4519 return cpr->cp_ring_struct.map_idx;
4520 }
4521
4522 static void bnxt_disable_int_sync(struct bnxt *bp)
4523 {
4524 int i;
4525
4526 if (!bp->irq_tbl)
4527 return;
4528
4529 atomic_inc(&bp->intr_sem);
4530
4531 bnxt_disable_int(bp);
4532 for (i = 0; i < bp->cp_nr_rings; i++) {
4533 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4534
4535 synchronize_irq(bp->irq_tbl[map_idx].vector);
4536 }
4537 }
4538
4539 static void bnxt_enable_int(struct bnxt *bp)
4540 {
4541 int i;
4542
4543 atomic_set(&bp->intr_sem, 0);
4544 for (i = 0; i < bp->cp_nr_rings; i++) {
4545 struct bnxt_napi *bnapi = bp->bnapi[i];
4546 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4547
4548 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4549 }
4550 }
4551
4552 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
4553 u16 cmpl_ring, u16 target_id)
4554 {
4555 struct input *req = request;
4556
4557 req->req_type = cpu_to_le16(req_type);
4558 req->cmpl_ring = cpu_to_le16(cmpl_ring);
4559 req->target_id = cpu_to_le16(target_id);
4560 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
4561 }
4562
4563 static int bnxt_hwrm_to_stderr(u32 hwrm_err)
4564 {
4565 switch (hwrm_err) {
4566 case HWRM_ERR_CODE_SUCCESS:
4567 return 0;
4568 case HWRM_ERR_CODE_RESOURCE_LOCKED:
4569 return -EROFS;
4570 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
4571 return -EACCES;
4572 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
4573 return -ENOSPC;
4574 case HWRM_ERR_CODE_INVALID_PARAMS:
4575 case HWRM_ERR_CODE_INVALID_FLAGS:
4576 case HWRM_ERR_CODE_INVALID_ENABLES:
4577 case HWRM_ERR_CODE_UNSUPPORTED_TLV:
4578 case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
4579 return -EINVAL;
4580 case HWRM_ERR_CODE_NO_BUFFER:
4581 return -ENOMEM;
4582 case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
4583 case HWRM_ERR_CODE_BUSY:
4584 return -EAGAIN;
4585 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
4586 return -EOPNOTSUPP;
4587 default:
4588 return -EIO;
4589 }
4590 }
4591
4592 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
4593 int timeout, bool silent)
4594 {
4595 int i, intr_process, rc, tmo_count;
4596 struct input *req = msg;
4597 u32 *data = msg;
4598 u8 *valid;
4599 u16 cp_ring_id, len = 0;
4600 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
4601 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
4602 struct hwrm_short_input short_input = {0};
4603 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
4604 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
4605 u16 dst = BNXT_HWRM_CHNL_CHIMP;
4606
4607 if (BNXT_NO_FW_ACCESS(bp) &&
4608 le16_to_cpu(req->req_type) != HWRM_FUNC_RESET)
4609 return -EBUSY;
4610
4611 if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4612 if (msg_len > bp->hwrm_max_ext_req_len ||
4613 !bp->hwrm_short_cmd_req_addr)
4614 return -EINVAL;
4615 }
4616
4617 if (bnxt_kong_hwrm_message(bp, req)) {
4618 dst = BNXT_HWRM_CHNL_KONG;
4619 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
4620 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
4621 }
4622
4623 memset(resp, 0, PAGE_SIZE);
4624 cp_ring_id = le16_to_cpu(req->cmpl_ring);
4625 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
4626
4627 req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
4628 /* currently supports only one outstanding message */
4629 if (intr_process)
4630 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
4631
4632 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
4633 msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4634 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
4635 u16 max_msg_len;
4636
4637 /* Set boundary for maximum extended request length for short
4638 * cmd format. If passed up from device use the max supported
4639 * internal req length.
4640 */
4641 max_msg_len = bp->hwrm_max_ext_req_len;
4642
4643 memcpy(short_cmd_req, req, msg_len);
4644 if (msg_len < max_msg_len)
4645 memset(short_cmd_req + msg_len, 0,
4646 max_msg_len - msg_len);
4647
4648 short_input.req_type = req->req_type;
4649 short_input.signature =
4650 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
4651 short_input.size = cpu_to_le16(msg_len);
4652 short_input.req_addr =
4653 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
4654
4655 data = (u32 *)&short_input;
4656 msg_len = sizeof(short_input);
4657
4658 /* Sync memory write before updating doorbell */
4659 wmb();
4660
4661 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
4662 }
4663
4664 /* Write request msg to hwrm channel */
4665 __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
4666
4667 for (i = msg_len; i < max_req_len; i += 4)
4668 writel(0, bp->bar0 + bar_offset + i);
4669
4670 /* Ring channel doorbell */
4671 writel(1, bp->bar0 + doorbell_offset);
4672
4673 if (!pci_is_enabled(bp->pdev))
4674 return -ENODEV;
4675
4676 if (!timeout)
4677 timeout = DFLT_HWRM_CMD_TIMEOUT;
4678 /* Limit timeout to an upper limit */
4679 timeout = min(timeout, HWRM_CMD_MAX_TIMEOUT);
4680 /* convert timeout to usec */
4681 timeout *= 1000;
4682
4683 i = 0;
4684 /* Short timeout for the first few iterations:
4685 * number of loops = number of loops for short timeout +
4686 * number of loops for standard timeout.
4687 */
4688 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
4689 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
4690 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
4691
4692 if (intr_process) {
4693 u16 seq_id = bp->hwrm_intr_seq_id;
4694
4695 /* Wait until hwrm response cmpl interrupt is processed */
4696 while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
4697 i++ < tmo_count) {
4698 /* Abort the wait for completion if the FW health
4699 * check has failed.
4700 */
4701 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4702 return -EBUSY;
4703 /* on first few passes, just barely sleep */
4704 if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
4705 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4706 HWRM_SHORT_MAX_TIMEOUT);
4707 } else {
4708 if (HWRM_WAIT_MUST_ABORT(bp, req))
4709 break;
4710 usleep_range(HWRM_MIN_TIMEOUT,
4711 HWRM_MAX_TIMEOUT);
4712 }
4713 }
4714
4715 if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
4716 if (!silent)
4717 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
4718 le16_to_cpu(req->req_type));
4719 return -EBUSY;
4720 }
4721 len = le16_to_cpu(resp->resp_len);
4722 valid = ((u8 *)resp) + len - 1;
4723 } else {
4724 int j;
4725
4726 /* Check if response len is updated */
4727 for (i = 0; i < tmo_count; i++) {
4728 /* Abort the wait for completion if the FW health
4729 * check has failed.
4730 */
4731 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4732 return -EBUSY;
4733 len = le16_to_cpu(resp->resp_len);
4734 if (len)
4735 break;
4736 /* on first few passes, just barely sleep */
4737 if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
4738 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4739 HWRM_SHORT_MAX_TIMEOUT);
4740 } else {
4741 if (HWRM_WAIT_MUST_ABORT(bp, req))
4742 goto timeout_abort;
4743 usleep_range(HWRM_MIN_TIMEOUT,
4744 HWRM_MAX_TIMEOUT);
4745 }
4746 }
4747
4748 if (i >= tmo_count) {
4749 timeout_abort:
4750 if (!silent)
4751 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
4752 HWRM_TOTAL_TIMEOUT(i),
4753 le16_to_cpu(req->req_type),
4754 le16_to_cpu(req->seq_id), len);
4755 return -EBUSY;
4756 }
4757
4758 /* Last byte of resp contains valid bit */
4759 valid = ((u8 *)resp) + len - 1;
4760 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
4761 /* make sure we read from updated DMA memory */
4762 dma_rmb();
4763 if (*valid)
4764 break;
4765 usleep_range(1, 5);
4766 }
4767
4768 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
4769 if (!silent)
4770 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
4771 HWRM_TOTAL_TIMEOUT(i),
4772 le16_to_cpu(req->req_type),
4773 le16_to_cpu(req->seq_id), len,
4774 *valid);
4775 return -EBUSY;
4776 }
4777 }
4778
4779 /* Zero valid bit for compatibility. Valid bit in an older spec
4780 * may become a new field in a newer spec. We must make sure that
4781 * a new field not implemented by old spec will read zero.
4782 */
4783 *valid = 0;
4784 rc = le16_to_cpu(resp->error_code);
4785 if (rc && !silent)
4786 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
4787 le16_to_cpu(resp->req_type),
4788 le16_to_cpu(resp->seq_id), rc);
4789 return bnxt_hwrm_to_stderr(rc);
4790 }
4791
4792 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4793 {
4794 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
4795 }
4796
4797 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4798 int timeout)
4799 {
4800 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4801 }
4802
4803 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4804 {
4805 int rc;
4806
4807 mutex_lock(&bp->hwrm_cmd_lock);
4808 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
4809 mutex_unlock(&bp->hwrm_cmd_lock);
4810 return rc;
4811 }
4812
4813 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4814 int timeout)
4815 {
4816 int rc;
4817
4818 mutex_lock(&bp->hwrm_cmd_lock);
4819 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4820 mutex_unlock(&bp->hwrm_cmd_lock);
4821 return rc;
4822 }
4823
4824 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4825 bool async_only)
4826 {
4827 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
4828 struct hwrm_func_drv_rgtr_input req = {0};
4829 DECLARE_BITMAP(async_events_bmap, 256);
4830 u32 *events = (u32 *)async_events_bmap;
4831 u32 flags;
4832 int rc, i;
4833
4834 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4835
4836 req.enables =
4837 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4838 FUNC_DRV_RGTR_REQ_ENABLES_VER |
4839 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4840
4841 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4842 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4843 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4844 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4845 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4846 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4847 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
4848 req.flags = cpu_to_le32(flags);
4849 req.ver_maj_8b = DRV_VER_MAJ;
4850 req.ver_min_8b = DRV_VER_MIN;
4851 req.ver_upd_8b = DRV_VER_UPD;
4852 req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4853 req.ver_min = cpu_to_le16(DRV_VER_MIN);
4854 req.ver_upd = cpu_to_le16(DRV_VER_UPD);
4855
4856 if (BNXT_PF(bp)) {
4857 u32 data[8];
4858 int i;
4859
4860 memset(data, 0, sizeof(data));
4861 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4862 u16 cmd = bnxt_vf_req_snif[i];
4863 unsigned int bit, idx;
4864
4865 idx = cmd / 32;
4866 bit = cmd % 32;
4867 data[idx] |= 1 << bit;
4868 }
4869
4870 for (i = 0; i < 8; i++)
4871 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4872
4873 req.enables |=
4874 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4875 }
4876
4877 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4878 req.flags |= cpu_to_le32(
4879 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4880
4881 memset(async_events_bmap, 0, sizeof(async_events_bmap));
4882 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4883 u16 event_id = bnxt_async_events_arr[i];
4884
4885 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4886 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4887 continue;
4888 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4889 }
4890 if (bmap && bmap_size) {
4891 for (i = 0; i < bmap_size; i++) {
4892 if (test_bit(i, bmap))
4893 __set_bit(i, async_events_bmap);
4894 }
4895 }
4896 for (i = 0; i < 8; i++)
4897 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4898
4899 if (async_only)
4900 req.enables =
4901 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4902
4903 mutex_lock(&bp->hwrm_cmd_lock);
4904 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4905 if (!rc) {
4906 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4907 if (resp->flags &
4908 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4909 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4910 }
4911 mutex_unlock(&bp->hwrm_cmd_lock);
4912 return rc;
4913 }
4914
4915 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4916 {
4917 struct hwrm_func_drv_unrgtr_input req = {0};
4918
4919 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4920 return 0;
4921
4922 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4923 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4924 }
4925
4926 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4927 {
4928 u32 rc = 0;
4929 struct hwrm_tunnel_dst_port_free_input req = {0};
4930
4931 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4932 req.tunnel_type = tunnel_type;
4933
4934 switch (tunnel_type) {
4935 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4936 req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
4937 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
4938 break;
4939 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4940 req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
4941 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
4942 break;
4943 default:
4944 break;
4945 }
4946
4947 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4948 if (rc)
4949 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4950 rc);
4951 return rc;
4952 }
4953
4954 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4955 u8 tunnel_type)
4956 {
4957 u32 rc = 0;
4958 struct hwrm_tunnel_dst_port_alloc_input req = {0};
4959 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4960
4961 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4962
4963 req.tunnel_type = tunnel_type;
4964 req.tunnel_dst_port_val = port;
4965
4966 mutex_lock(&bp->hwrm_cmd_lock);
4967 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4968 if (rc) {
4969 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4970 rc);
4971 goto err_out;
4972 }
4973
4974 switch (tunnel_type) {
4975 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4976 bp->vxlan_fw_dst_port_id =
4977 le16_to_cpu(resp->tunnel_dst_port_id);
4978 break;
4979 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4980 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
4981 break;
4982 default:
4983 break;
4984 }
4985
4986 err_out:
4987 mutex_unlock(&bp->hwrm_cmd_lock);
4988 return rc;
4989 }
4990
4991 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4992 {
4993 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4994 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4995
4996 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
4997 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4998
4999 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
5000 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
5001 req.mask = cpu_to_le32(vnic->rx_mask);
5002 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5003 }
5004
5005 #ifdef CONFIG_RFS_ACCEL
5006 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
5007 struct bnxt_ntuple_filter *fltr)
5008 {
5009 struct hwrm_cfa_ntuple_filter_free_input req = {0};
5010
5011 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
5012 req.ntuple_filter_id = fltr->filter_id;
5013 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5014 }
5015
5016 #define BNXT_NTP_FLTR_FLAGS \
5017 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
5018 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
5019 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
5020 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
5021 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
5022 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
5023 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
5024 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
5025 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
5026 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
5027 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
5028 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
5029 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
5030 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
5031
5032 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
5033 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
5034
5035 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
5036 struct bnxt_ntuple_filter *fltr)
5037 {
5038 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
5039 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
5040 struct flow_keys *keys = &fltr->fkeys;
5041 struct bnxt_vnic_info *vnic;
5042 u32 flags = 0;
5043 int rc = 0;
5044
5045 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
5046 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
5047
5048 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
5049 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
5050 req.dst_id = cpu_to_le16(fltr->rxq);
5051 } else {
5052 vnic = &bp->vnic_info[fltr->rxq + 1];
5053 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
5054 }
5055 req.flags = cpu_to_le32(flags);
5056 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
5057
5058 req.ethertype = htons(ETH_P_IP);
5059 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
5060 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
5061 req.ip_protocol = keys->basic.ip_proto;
5062
5063 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
5064 int i;
5065
5066 req.ethertype = htons(ETH_P_IPV6);
5067 req.ip_addr_type =
5068 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
5069 *(struct in6_addr *)&req.src_ipaddr[0] =
5070 keys->addrs.v6addrs.src;
5071 *(struct in6_addr *)&req.dst_ipaddr[0] =
5072 keys->addrs.v6addrs.dst;
5073 for (i = 0; i < 4; i++) {
5074 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
5075 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
5076 }
5077 } else {
5078 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
5079 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
5080 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
5081 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
5082 }
5083 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
5084 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
5085 req.tunnel_type =
5086 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
5087 }
5088
5089 req.src_port = keys->ports.src;
5090 req.src_port_mask = cpu_to_be16(0xffff);
5091 req.dst_port = keys->ports.dst;
5092 req.dst_port_mask = cpu_to_be16(0xffff);
5093
5094 mutex_lock(&bp->hwrm_cmd_lock);
5095 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5096 if (!rc) {
5097 resp = bnxt_get_hwrm_resp_addr(bp, &req);
5098 fltr->filter_id = resp->ntuple_filter_id;
5099 }
5100 mutex_unlock(&bp->hwrm_cmd_lock);
5101 return rc;
5102 }
5103 #endif
5104
5105 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
5106 u8 *mac_addr)
5107 {
5108 u32 rc = 0;
5109 struct hwrm_cfa_l2_filter_alloc_input req = {0};
5110 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5111
5112 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
5113 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
5114 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
5115 req.flags |=
5116 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
5117 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
5118 req.enables =
5119 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
5120 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
5121 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
5122 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
5123 req.l2_addr_mask[0] = 0xff;
5124 req.l2_addr_mask[1] = 0xff;
5125 req.l2_addr_mask[2] = 0xff;
5126 req.l2_addr_mask[3] = 0xff;
5127 req.l2_addr_mask[4] = 0xff;
5128 req.l2_addr_mask[5] = 0xff;
5129
5130 mutex_lock(&bp->hwrm_cmd_lock);
5131 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5132 if (!rc)
5133 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
5134 resp->l2_filter_id;
5135 mutex_unlock(&bp->hwrm_cmd_lock);
5136 return rc;
5137 }
5138
5139 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
5140 {
5141 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
5142 int rc = 0;
5143
5144 /* Any associated ntuple filters will also be cleared by firmware. */
5145 mutex_lock(&bp->hwrm_cmd_lock);
5146 for (i = 0; i < num_of_vnics; i++) {
5147 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5148
5149 for (j = 0; j < vnic->uc_filter_count; j++) {
5150 struct hwrm_cfa_l2_filter_free_input req = {0};
5151
5152 bnxt_hwrm_cmd_hdr_init(bp, &req,
5153 HWRM_CFA_L2_FILTER_FREE, -1, -1);
5154
5155 req.l2_filter_id = vnic->fw_l2_filter_id[j];
5156
5157 rc = _hwrm_send_message(bp, &req, sizeof(req),
5158 HWRM_CMD_TIMEOUT);
5159 }
5160 vnic->uc_filter_count = 0;
5161 }
5162 mutex_unlock(&bp->hwrm_cmd_lock);
5163
5164 return rc;
5165 }
5166
5167 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
5168 {
5169 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5170 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
5171 struct hwrm_vnic_tpa_cfg_input req = {0};
5172
5173 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
5174 return 0;
5175
5176 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
5177
5178 if (tpa_flags) {
5179 u16 mss = bp->dev->mtu - 40;
5180 u32 nsegs, n, segs = 0, flags;
5181
5182 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
5183 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
5184 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
5185 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
5186 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
5187 if (tpa_flags & BNXT_FLAG_GRO)
5188 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
5189
5190 req.flags = cpu_to_le32(flags);
5191
5192 req.enables =
5193 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
5194 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
5195 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
5196
5197 /* Number of segs are log2 units, and first packet is not
5198 * included as part of this units.
5199 */
5200 if (mss <= BNXT_RX_PAGE_SIZE) {
5201 n = BNXT_RX_PAGE_SIZE / mss;
5202 nsegs = (MAX_SKB_FRAGS - 1) * n;
5203 } else {
5204 n = mss / BNXT_RX_PAGE_SIZE;
5205 if (mss & (BNXT_RX_PAGE_SIZE - 1))
5206 n++;
5207 nsegs = (MAX_SKB_FRAGS - n) / n;
5208 }
5209
5210 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5211 segs = MAX_TPA_SEGS_P5;
5212 max_aggs = bp->max_tpa;
5213 } else {
5214 segs = ilog2(nsegs);
5215 }
5216 req.max_agg_segs = cpu_to_le16(segs);
5217 req.max_aggs = cpu_to_le16(max_aggs);
5218
5219 req.min_agg_len = cpu_to_le32(512);
5220 }
5221 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5222
5223 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5224 }
5225
5226 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
5227 {
5228 struct bnxt_ring_grp_info *grp_info;
5229
5230 grp_info = &bp->grp_info[ring->grp_idx];
5231 return grp_info->cp_fw_ring_id;
5232 }
5233
5234 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
5235 {
5236 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5237 struct bnxt_napi *bnapi = rxr->bnapi;
5238 struct bnxt_cp_ring_info *cpr;
5239
5240 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
5241 return cpr->cp_ring_struct.fw_ring_id;
5242 } else {
5243 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
5244 }
5245 }
5246
5247 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
5248 {
5249 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5250 struct bnxt_napi *bnapi = txr->bnapi;
5251 struct bnxt_cp_ring_info *cpr;
5252
5253 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
5254 return cpr->cp_ring_struct.fw_ring_id;
5255 } else {
5256 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5257 }
5258 }
5259
5260 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
5261 {
5262 int entries;
5263
5264 if (bp->flags & BNXT_FLAG_CHIP_P5)
5265 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
5266 else
5267 entries = HW_HASH_INDEX_SIZE;
5268
5269 bp->rss_indir_tbl_entries = entries;
5270 bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
5271 GFP_KERNEL);
5272 if (!bp->rss_indir_tbl)
5273 return -ENOMEM;
5274 return 0;
5275 }
5276
5277 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5278 {
5279 u16 max_rings, max_entries, pad, i;
5280
5281 if (!bp->rx_nr_rings)
5282 return;
5283
5284 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5285 max_rings = bp->rx_nr_rings - 1;
5286 else
5287 max_rings = bp->rx_nr_rings;
5288
5289 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5290
5291 for (i = 0; i < max_entries; i++)
5292 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5293
5294 pad = bp->rss_indir_tbl_entries - max_entries;
5295 if (pad)
5296 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5297 }
5298
5299 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5300 {
5301 u16 i, tbl_size, max_ring = 0;
5302
5303 if (!bp->rss_indir_tbl)
5304 return 0;
5305
5306 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5307 for (i = 0; i < tbl_size; i++)
5308 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5309 return max_ring;
5310 }
5311
5312 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5313 {
5314 if (bp->flags & BNXT_FLAG_CHIP_P5)
5315 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5316 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5317 return 2;
5318 return 1;
5319 }
5320
5321 static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5322 {
5323 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5324 u16 i, j;
5325
5326 /* Fill the RSS indirection table with ring group ids */
5327 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5328 if (!no_rss)
5329 j = bp->rss_indir_tbl[i];
5330 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5331 }
5332 }
5333
5334 static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5335 struct bnxt_vnic_info *vnic)
5336 {
5337 __le16 *ring_tbl = vnic->rss_table;
5338 struct bnxt_rx_ring_info *rxr;
5339 u16 tbl_size, i;
5340
5341 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5342
5343 for (i = 0; i < tbl_size; i++) {
5344 u16 ring_id, j;
5345
5346 j = bp->rss_indir_tbl[i];
5347 rxr = &bp->rx_ring[j];
5348
5349 ring_id = rxr->rx_ring_struct.fw_ring_id;
5350 *ring_tbl++ = cpu_to_le16(ring_id);
5351 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5352 *ring_tbl++ = cpu_to_le16(ring_id);
5353 }
5354 }
5355
5356 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5357 {
5358 if (bp->flags & BNXT_FLAG_CHIP_P5)
5359 __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5360 else
5361 __bnxt_fill_hw_rss_tbl(bp, vnic);
5362 }
5363
5364 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5365 {
5366 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5367 struct hwrm_vnic_rss_cfg_input req = {0};
5368
5369 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5370 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
5371 return 0;
5372
5373 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5374 if (set_rss) {
5375 bnxt_fill_hw_rss_tbl(bp, vnic);
5376 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
5377 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5378 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5379 req.hash_key_tbl_addr =
5380 cpu_to_le64(vnic->rss_hash_key_dma_addr);
5381 }
5382 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5383 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5384 }
5385
5386 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5387 {
5388 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5389 struct hwrm_vnic_rss_cfg_input req = {0};
5390 dma_addr_t ring_tbl_map;
5391 u32 i, nr_ctxs;
5392
5393 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5394 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5395 if (!set_rss) {
5396 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5397 return 0;
5398 }
5399 bnxt_fill_hw_rss_tbl(bp, vnic);
5400 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
5401 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5402 req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
5403 ring_tbl_map = vnic->rss_table_dma_addr;
5404 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
5405 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
5406 int rc;
5407
5408 req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
5409 req.ring_table_pair_index = i;
5410 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5411 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5412 if (rc)
5413 return rc;
5414 }
5415 return 0;
5416 }
5417
5418 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5419 {
5420 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5421 struct hwrm_vnic_plcmodes_cfg_input req = {0};
5422
5423 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
5424 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
5425 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5426 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5427 req.enables =
5428 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
5429 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
5430 /* thresholds not implemented in firmware yet */
5431 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5432 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5433 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5434 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5435 }
5436
5437 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5438 u16 ctx_idx)
5439 {
5440 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
5441
5442 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
5443 req.rss_cos_lb_ctx_id =
5444 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
5445
5446 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5447 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
5448 }
5449
5450 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5451 {
5452 int i, j;
5453
5454 for (i = 0; i < bp->nr_vnics; i++) {
5455 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5456
5457 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5458 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5459 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5460 }
5461 }
5462 bp->rsscos_nr_ctxs = 0;
5463 }
5464
5465 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
5466 {
5467 int rc;
5468 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
5469 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
5470 bp->hwrm_cmd_resp_addr;
5471
5472 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
5473 -1);
5474
5475 mutex_lock(&bp->hwrm_cmd_lock);
5476 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5477 if (!rc)
5478 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
5479 le16_to_cpu(resp->rss_cos_lb_ctx_id);
5480 mutex_unlock(&bp->hwrm_cmd_lock);
5481
5482 return rc;
5483 }
5484
5485 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5486 {
5487 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5488 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5489 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5490 }
5491
5492 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5493 {
5494 unsigned int ring = 0, grp_idx;
5495 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5496 struct hwrm_vnic_cfg_input req = {0};
5497 u16 def_vlan = 0;
5498
5499 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
5500
5501 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5502 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5503
5504 req.default_rx_ring_id =
5505 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5506 req.default_cmpl_ring_id =
5507 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5508 req.enables =
5509 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5510 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5511 goto vnic_mru;
5512 }
5513 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5514 /* Only RSS support for now TBD: COS & LB */
5515 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5516 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5517 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5518 VNIC_CFG_REQ_ENABLES_MRU);
5519 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5520 req.rss_rule =
5521 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5522 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5523 VNIC_CFG_REQ_ENABLES_MRU);
5524 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5525 } else {
5526 req.rss_rule = cpu_to_le16(0xffff);
5527 }
5528
5529 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5530 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5531 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5532 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5533 } else {
5534 req.cos_rule = cpu_to_le16(0xffff);
5535 }
5536
5537 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5538 ring = 0;
5539 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5540 ring = vnic_id - 1;
5541 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5542 ring = bp->rx_nr_rings - 1;
5543
5544 grp_idx = bp->rx_ring[ring].bnapi->index;
5545 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5546 req.lb_rule = cpu_to_le16(0xffff);
5547 vnic_mru:
5548 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
5549
5550 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5551 #ifdef CONFIG_BNXT_SRIOV
5552 if (BNXT_VF(bp))
5553 def_vlan = bp->vf.vlan;
5554 #endif
5555 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5556 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5557 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
5558 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5559
5560 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5561 }
5562
5563 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5564 {
5565 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5566 struct hwrm_vnic_free_input req = {0};
5567
5568 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
5569 req.vnic_id =
5570 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5571
5572 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5573 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5574 }
5575 }
5576
5577 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5578 {
5579 u16 i;
5580
5581 for (i = 0; i < bp->nr_vnics; i++)
5582 bnxt_hwrm_vnic_free_one(bp, i);
5583 }
5584
5585 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5586 unsigned int start_rx_ring_idx,
5587 unsigned int nr_rings)
5588 {
5589 int rc = 0;
5590 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5591 struct hwrm_vnic_alloc_input req = {0};
5592 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5593 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5594
5595 if (bp->flags & BNXT_FLAG_CHIP_P5)
5596 goto vnic_no_ring_grps;
5597
5598 /* map ring groups to this vnic */
5599 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5600 grp_idx = bp->rx_ring[i].bnapi->index;
5601 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5602 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5603 j, nr_rings);
5604 break;
5605 }
5606 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5607 }
5608
5609 vnic_no_ring_grps:
5610 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5611 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5612 if (vnic_id == 0)
5613 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5614
5615 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
5616
5617 mutex_lock(&bp->hwrm_cmd_lock);
5618 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5619 if (!rc)
5620 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5621 mutex_unlock(&bp->hwrm_cmd_lock);
5622 return rc;
5623 }
5624
5625 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5626 {
5627 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5628 struct hwrm_vnic_qcaps_input req = {0};
5629 int rc;
5630
5631 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5632 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5633 if (bp->hwrm_spec_code < 0x10600)
5634 return 0;
5635
5636 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
5637 mutex_lock(&bp->hwrm_cmd_lock);
5638 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5639 if (!rc) {
5640 u32 flags = le32_to_cpu(resp->flags);
5641
5642 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5643 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5644 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5645 if (flags &
5646 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5647 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5648
5649 /* Older P5 fw before EXT_HW_STATS support did not set
5650 * VLAN_STRIP_CAP properly.
5651 */
5652 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
5653 (BNXT_CHIP_P5_THOR(bp) &&
5654 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5655 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
5656 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5657 if (bp->max_tpa_v2) {
5658 if (BNXT_CHIP_P5_THOR(bp))
5659 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5660 else
5661 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5662 }
5663 }
5664 mutex_unlock(&bp->hwrm_cmd_lock);
5665 return rc;
5666 }
5667
5668 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5669 {
5670 u16 i;
5671 u32 rc = 0;
5672
5673 if (bp->flags & BNXT_FLAG_CHIP_P5)
5674 return 0;
5675
5676 mutex_lock(&bp->hwrm_cmd_lock);
5677 for (i = 0; i < bp->rx_nr_rings; i++) {
5678 struct hwrm_ring_grp_alloc_input req = {0};
5679 struct hwrm_ring_grp_alloc_output *resp =
5680 bp->hwrm_cmd_resp_addr;
5681 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5682
5683 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
5684
5685 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5686 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5687 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5688 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5689
5690 rc = _hwrm_send_message(bp, &req, sizeof(req),
5691 HWRM_CMD_TIMEOUT);
5692 if (rc)
5693 break;
5694
5695 bp->grp_info[grp_idx].fw_grp_id =
5696 le32_to_cpu(resp->ring_group_id);
5697 }
5698 mutex_unlock(&bp->hwrm_cmd_lock);
5699 return rc;
5700 }
5701
5702 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5703 {
5704 u16 i;
5705 struct hwrm_ring_grp_free_input req = {0};
5706
5707 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5708 return;
5709
5710 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
5711
5712 mutex_lock(&bp->hwrm_cmd_lock);
5713 for (i = 0; i < bp->cp_nr_rings; i++) {
5714 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5715 continue;
5716 req.ring_group_id =
5717 cpu_to_le32(bp->grp_info[i].fw_grp_id);
5718
5719 _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5720 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5721 }
5722 mutex_unlock(&bp->hwrm_cmd_lock);
5723 }
5724
5725 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5726 struct bnxt_ring_struct *ring,
5727 u32 ring_type, u32 map_index)
5728 {
5729 int rc = 0, err = 0;
5730 struct hwrm_ring_alloc_input req = {0};
5731 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5732 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5733 struct bnxt_ring_grp_info *grp_info;
5734 u16 ring_id;
5735
5736 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
5737
5738 req.enables = 0;
5739 if (rmem->nr_pages > 1) {
5740 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5741 /* Page size is in log2 units */
5742 req.page_size = BNXT_PAGE_SHIFT;
5743 req.page_tbl_depth = 1;
5744 } else {
5745 req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
5746 }
5747 req.fbo = 0;
5748 /* Association of ring index with doorbell index and MSIX number */
5749 req.logical_id = cpu_to_le16(map_index);
5750
5751 switch (ring_type) {
5752 case HWRM_RING_ALLOC_TX: {
5753 struct bnxt_tx_ring_info *txr;
5754
5755 txr = container_of(ring, struct bnxt_tx_ring_info,
5756 tx_ring_struct);
5757 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5758 /* Association of transmit ring with completion ring */
5759 grp_info = &bp->grp_info[ring->grp_idx];
5760 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5761 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
5762 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5763 req.queue_id = cpu_to_le16(ring->queue_id);
5764 break;
5765 }
5766 case HWRM_RING_ALLOC_RX:
5767 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5768 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
5769 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5770 u16 flags = 0;
5771
5772 /* Association of rx ring with stats context */
5773 grp_info = &bp->grp_info[ring->grp_idx];
5774 req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5775 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5776 req.enables |= cpu_to_le32(
5777 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5778 if (NET_IP_ALIGN == 2)
5779 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5780 req.flags = cpu_to_le16(flags);
5781 }
5782 break;
5783 case HWRM_RING_ALLOC_AGG:
5784 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5785 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5786 /* Association of agg ring with rx ring */
5787 grp_info = &bp->grp_info[ring->grp_idx];
5788 req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5789 req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5790 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5791 req.enables |= cpu_to_le32(
5792 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5793 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5794 } else {
5795 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5796 }
5797 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5798 break;
5799 case HWRM_RING_ALLOC_CMPL:
5800 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5801 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5802 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5803 /* Association of cp ring with nq */
5804 grp_info = &bp->grp_info[map_index];
5805 req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5806 req.cq_handle = cpu_to_le64(ring->handle);
5807 req.enables |= cpu_to_le32(
5808 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5809 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5810 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5811 }
5812 break;
5813 case HWRM_RING_ALLOC_NQ:
5814 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5815 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5816 if (bp->flags & BNXT_FLAG_USING_MSIX)
5817 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5818 break;
5819 default:
5820 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5821 ring_type);
5822 return -1;
5823 }
5824
5825 mutex_lock(&bp->hwrm_cmd_lock);
5826 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5827 err = le16_to_cpu(resp->error_code);
5828 ring_id = le16_to_cpu(resp->ring_id);
5829 mutex_unlock(&bp->hwrm_cmd_lock);
5830
5831 if (rc || err) {
5832 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5833 ring_type, rc, err);
5834 return -EIO;
5835 }
5836 ring->fw_ring_id = ring_id;
5837 return rc;
5838 }
5839
5840 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5841 {
5842 int rc;
5843
5844 if (BNXT_PF(bp)) {
5845 struct hwrm_func_cfg_input req = {0};
5846
5847 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5848 req.fid = cpu_to_le16(0xffff);
5849 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5850 req.async_event_cr = cpu_to_le16(idx);
5851 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5852 } else {
5853 struct hwrm_func_vf_cfg_input req = {0};
5854
5855 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
5856 req.enables =
5857 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5858 req.async_event_cr = cpu_to_le16(idx);
5859 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5860 }
5861 return rc;
5862 }
5863
5864 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5865 u32 map_idx, u32 xid)
5866 {
5867 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5868 if (BNXT_PF(bp))
5869 db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
5870 else
5871 db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
5872 switch (ring_type) {
5873 case HWRM_RING_ALLOC_TX:
5874 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5875 break;
5876 case HWRM_RING_ALLOC_RX:
5877 case HWRM_RING_ALLOC_AGG:
5878 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5879 break;
5880 case HWRM_RING_ALLOC_CMPL:
5881 db->db_key64 = DBR_PATH_L2;
5882 break;
5883 case HWRM_RING_ALLOC_NQ:
5884 db->db_key64 = DBR_PATH_L2;
5885 break;
5886 }
5887 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5888 } else {
5889 db->doorbell = bp->bar1 + map_idx * 0x80;
5890 switch (ring_type) {
5891 case HWRM_RING_ALLOC_TX:
5892 db->db_key32 = DB_KEY_TX;
5893 break;
5894 case HWRM_RING_ALLOC_RX:
5895 case HWRM_RING_ALLOC_AGG:
5896 db->db_key32 = DB_KEY_RX;
5897 break;
5898 case HWRM_RING_ALLOC_CMPL:
5899 db->db_key32 = DB_KEY_CP;
5900 break;
5901 }
5902 }
5903 }
5904
5905 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5906 {
5907 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5908 int i, rc = 0;
5909 u32 type;
5910
5911 if (bp->flags & BNXT_FLAG_CHIP_P5)
5912 type = HWRM_RING_ALLOC_NQ;
5913 else
5914 type = HWRM_RING_ALLOC_CMPL;
5915 for (i = 0; i < bp->cp_nr_rings; i++) {
5916 struct bnxt_napi *bnapi = bp->bnapi[i];
5917 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5918 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5919 u32 map_idx = ring->map_idx;
5920 unsigned int vector;
5921
5922 vector = bp->irq_tbl[map_idx].vector;
5923 disable_irq_nosync(vector);
5924 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5925 if (rc) {
5926 enable_irq(vector);
5927 goto err_out;
5928 }
5929 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5930 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5931 enable_irq(vector);
5932 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5933
5934 if (!i) {
5935 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5936 if (rc)
5937 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5938 }
5939 }
5940
5941 type = HWRM_RING_ALLOC_TX;
5942 for (i = 0; i < bp->tx_nr_rings; i++) {
5943 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5944 struct bnxt_ring_struct *ring;
5945 u32 map_idx;
5946
5947 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5948 struct bnxt_napi *bnapi = txr->bnapi;
5949 struct bnxt_cp_ring_info *cpr, *cpr2;
5950 u32 type2 = HWRM_RING_ALLOC_CMPL;
5951
5952 cpr = &bnapi->cp_ring;
5953 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5954 ring = &cpr2->cp_ring_struct;
5955 ring->handle = BNXT_TX_HDL;
5956 map_idx = bnapi->index;
5957 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5958 if (rc)
5959 goto err_out;
5960 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5961 ring->fw_ring_id);
5962 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5963 }
5964 ring = &txr->tx_ring_struct;
5965 map_idx = i;
5966 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5967 if (rc)
5968 goto err_out;
5969 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5970 }
5971
5972 type = HWRM_RING_ALLOC_RX;
5973 for (i = 0; i < bp->rx_nr_rings; i++) {
5974 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5975 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5976 struct bnxt_napi *bnapi = rxr->bnapi;
5977 u32 map_idx = bnapi->index;
5978
5979 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5980 if (rc)
5981 goto err_out;
5982 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5983 /* If we have agg rings, post agg buffers first. */
5984 if (!agg_rings)
5985 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5986 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5987 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5988 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5989 u32 type2 = HWRM_RING_ALLOC_CMPL;
5990 struct bnxt_cp_ring_info *cpr2;
5991
5992 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5993 ring = &cpr2->cp_ring_struct;
5994 ring->handle = BNXT_RX_HDL;
5995 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5996 if (rc)
5997 goto err_out;
5998 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5999 ring->fw_ring_id);
6000 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
6001 }
6002 }
6003
6004 if (agg_rings) {
6005 type = HWRM_RING_ALLOC_AGG;
6006 for (i = 0; i < bp->rx_nr_rings; i++) {
6007 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6008 struct bnxt_ring_struct *ring =
6009 &rxr->rx_agg_ring_struct;
6010 u32 grp_idx = ring->grp_idx;
6011 u32 map_idx = grp_idx + bp->rx_nr_rings;
6012
6013 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
6014 if (rc)
6015 goto err_out;
6016
6017 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
6018 ring->fw_ring_id);
6019 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
6020 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
6021 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
6022 }
6023 }
6024 err_out:
6025 return rc;
6026 }
6027
6028 static int hwrm_ring_free_send_msg(struct bnxt *bp,
6029 struct bnxt_ring_struct *ring,
6030 u32 ring_type, int cmpl_ring_id)
6031 {
6032 int rc;
6033 struct hwrm_ring_free_input req = {0};
6034 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
6035 u16 error_code;
6036
6037 if (BNXT_NO_FW_ACCESS(bp))
6038 return 0;
6039
6040 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
6041 req.ring_type = ring_type;
6042 req.ring_id = cpu_to_le16(ring->fw_ring_id);
6043
6044 mutex_lock(&bp->hwrm_cmd_lock);
6045 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6046 error_code = le16_to_cpu(resp->error_code);
6047 mutex_unlock(&bp->hwrm_cmd_lock);
6048
6049 if (rc || error_code) {
6050 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
6051 ring_type, rc, error_code);
6052 return -EIO;
6053 }
6054 return 0;
6055 }
6056
6057 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
6058 {
6059 u32 type;
6060 int i;
6061
6062 if (!bp->bnapi)
6063 return;
6064
6065 for (i = 0; i < bp->tx_nr_rings; i++) {
6066 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
6067 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
6068
6069 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6070 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
6071
6072 hwrm_ring_free_send_msg(bp, ring,
6073 RING_FREE_REQ_RING_TYPE_TX,
6074 close_path ? cmpl_ring_id :
6075 INVALID_HW_RING_ID);
6076 ring->fw_ring_id = INVALID_HW_RING_ID;
6077 }
6078 }
6079
6080 for (i = 0; i < bp->rx_nr_rings; i++) {
6081 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6082 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
6083 u32 grp_idx = rxr->bnapi->index;
6084
6085 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6086 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6087
6088 hwrm_ring_free_send_msg(bp, ring,
6089 RING_FREE_REQ_RING_TYPE_RX,
6090 close_path ? cmpl_ring_id :
6091 INVALID_HW_RING_ID);
6092 ring->fw_ring_id = INVALID_HW_RING_ID;
6093 bp->grp_info[grp_idx].rx_fw_ring_id =
6094 INVALID_HW_RING_ID;
6095 }
6096 }
6097
6098 if (bp->flags & BNXT_FLAG_CHIP_P5)
6099 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
6100 else
6101 type = RING_FREE_REQ_RING_TYPE_RX;
6102 for (i = 0; i < bp->rx_nr_rings; i++) {
6103 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6104 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
6105 u32 grp_idx = rxr->bnapi->index;
6106
6107 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6108 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6109
6110 hwrm_ring_free_send_msg(bp, ring, type,
6111 close_path ? cmpl_ring_id :
6112 INVALID_HW_RING_ID);
6113 ring->fw_ring_id = INVALID_HW_RING_ID;
6114 bp->grp_info[grp_idx].agg_fw_ring_id =
6115 INVALID_HW_RING_ID;
6116 }
6117 }
6118
6119 /* The completion rings are about to be freed. After that the
6120 * IRQ doorbell will not work anymore. So we need to disable
6121 * IRQ here.
6122 */
6123 bnxt_disable_int_sync(bp);
6124
6125 if (bp->flags & BNXT_FLAG_CHIP_P5)
6126 type = RING_FREE_REQ_RING_TYPE_NQ;
6127 else
6128 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
6129 for (i = 0; i < bp->cp_nr_rings; i++) {
6130 struct bnxt_napi *bnapi = bp->bnapi[i];
6131 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6132 struct bnxt_ring_struct *ring;
6133 int j;
6134
6135 for (j = 0; j < 2; j++) {
6136 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
6137
6138 if (cpr2) {
6139 ring = &cpr2->cp_ring_struct;
6140 if (ring->fw_ring_id == INVALID_HW_RING_ID)
6141 continue;
6142 hwrm_ring_free_send_msg(bp, ring,
6143 RING_FREE_REQ_RING_TYPE_L2_CMPL,
6144 INVALID_HW_RING_ID);
6145 ring->fw_ring_id = INVALID_HW_RING_ID;
6146 }
6147 }
6148 ring = &cpr->cp_ring_struct;
6149 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6150 hwrm_ring_free_send_msg(bp, ring, type,
6151 INVALID_HW_RING_ID);
6152 ring->fw_ring_id = INVALID_HW_RING_ID;
6153 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
6154 }
6155 }
6156 }
6157
6158 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
6159 bool shared);
6160
6161 static int bnxt_hwrm_get_rings(struct bnxt *bp)
6162 {
6163 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6164 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6165 struct hwrm_func_qcfg_input req = {0};
6166 int rc;
6167
6168 if (bp->hwrm_spec_code < 0x10601)
6169 return 0;
6170
6171 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6172 req.fid = cpu_to_le16(0xffff);
6173 mutex_lock(&bp->hwrm_cmd_lock);
6174 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6175 if (rc) {
6176 mutex_unlock(&bp->hwrm_cmd_lock);
6177 return rc;
6178 }
6179
6180 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6181 if (BNXT_NEW_RM(bp)) {
6182 u16 cp, stats;
6183
6184 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
6185 hw_resc->resv_hw_ring_grps =
6186 le32_to_cpu(resp->alloc_hw_ring_grps);
6187 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
6188 cp = le16_to_cpu(resp->alloc_cmpl_rings);
6189 stats = le16_to_cpu(resp->alloc_stat_ctx);
6190 hw_resc->resv_irqs = cp;
6191 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6192 int rx = hw_resc->resv_rx_rings;
6193 int tx = hw_resc->resv_tx_rings;
6194
6195 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6196 rx >>= 1;
6197 if (cp < (rx + tx)) {
6198 bnxt_trim_rings(bp, &rx, &tx, cp, false);
6199 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6200 rx <<= 1;
6201 hw_resc->resv_rx_rings = rx;
6202 hw_resc->resv_tx_rings = tx;
6203 }
6204 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
6205 hw_resc->resv_hw_ring_grps = rx;
6206 }
6207 hw_resc->resv_cp_rings = cp;
6208 hw_resc->resv_stat_ctxs = stats;
6209 }
6210 mutex_unlock(&bp->hwrm_cmd_lock);
6211 return 0;
6212 }
6213
6214 /* Caller must hold bp->hwrm_cmd_lock */
6215 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
6216 {
6217 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6218 struct hwrm_func_qcfg_input req = {0};
6219 int rc;
6220
6221 if (bp->hwrm_spec_code < 0x10601)
6222 return 0;
6223
6224 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6225 req.fid = cpu_to_le16(fid);
6226 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6227 if (!rc)
6228 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6229
6230 return rc;
6231 }
6232
6233 static bool bnxt_rfs_supported(struct bnxt *bp);
6234
6235 static void
6236 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
6237 int tx_rings, int rx_rings, int ring_grps,
6238 int cp_rings, int stats, int vnics)
6239 {
6240 u32 enables = 0;
6241
6242 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
6243 req->fid = cpu_to_le16(0xffff);
6244 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6245 req->num_tx_rings = cpu_to_le16(tx_rings);
6246 if (BNXT_NEW_RM(bp)) {
6247 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
6248 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6249 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6250 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6251 enables |= tx_rings + ring_grps ?
6252 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6253 enables |= rx_rings ?
6254 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6255 } else {
6256 enables |= cp_rings ?
6257 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6258 enables |= ring_grps ?
6259 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
6260 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6261 }
6262 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
6263
6264 req->num_rx_rings = cpu_to_le16(rx_rings);
6265 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6266 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6267 req->num_msix = cpu_to_le16(cp_rings);
6268 req->num_rsscos_ctxs =
6269 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6270 } else {
6271 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6272 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6273 req->num_rsscos_ctxs = cpu_to_le16(1);
6274 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6275 bnxt_rfs_supported(bp))
6276 req->num_rsscos_ctxs =
6277 cpu_to_le16(ring_grps + 1);
6278 }
6279 req->num_stat_ctxs = cpu_to_le16(stats);
6280 req->num_vnics = cpu_to_le16(vnics);
6281 }
6282 req->enables = cpu_to_le32(enables);
6283 }
6284
6285 static void
6286 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
6287 struct hwrm_func_vf_cfg_input *req, int tx_rings,
6288 int rx_rings, int ring_grps, int cp_rings,
6289 int stats, int vnics)
6290 {
6291 u32 enables = 0;
6292
6293 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
6294 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6295 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
6296 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6297 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6298 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6299 enables |= tx_rings + ring_grps ?
6300 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6301 } else {
6302 enables |= cp_rings ?
6303 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6304 enables |= ring_grps ?
6305 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
6306 }
6307 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
6308 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
6309
6310 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
6311 req->num_tx_rings = cpu_to_le16(tx_rings);
6312 req->num_rx_rings = cpu_to_le16(rx_rings);
6313 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6314 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6315 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6316 } else {
6317 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6318 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6319 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6320 }
6321 req->num_stat_ctxs = cpu_to_le16(stats);
6322 req->num_vnics = cpu_to_le16(vnics);
6323
6324 req->enables = cpu_to_le32(enables);
6325 }
6326
6327 static int
6328 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6329 int ring_grps, int cp_rings, int stats, int vnics)
6330 {
6331 struct hwrm_func_cfg_input req = {0};
6332 int rc;
6333
6334 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6335 cp_rings, stats, vnics);
6336 if (!req.enables)
6337 return 0;
6338
6339 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6340 if (rc)
6341 return rc;
6342
6343 if (bp->hwrm_spec_code < 0x10601)
6344 bp->hw_resc.resv_tx_rings = tx_rings;
6345
6346 return bnxt_hwrm_get_rings(bp);
6347 }
6348
6349 static int
6350 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6351 int ring_grps, int cp_rings, int stats, int vnics)
6352 {
6353 struct hwrm_func_vf_cfg_input req = {0};
6354 int rc;
6355
6356 if (!BNXT_NEW_RM(bp)) {
6357 bp->hw_resc.resv_tx_rings = tx_rings;
6358 return 0;
6359 }
6360
6361 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6362 cp_rings, stats, vnics);
6363 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6364 if (rc)
6365 return rc;
6366
6367 return bnxt_hwrm_get_rings(bp);
6368 }
6369
6370 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
6371 int cp, int stat, int vnic)
6372 {
6373 if (BNXT_PF(bp))
6374 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6375 vnic);
6376 else
6377 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6378 vnic);
6379 }
6380
6381 int bnxt_nq_rings_in_use(struct bnxt *bp)
6382 {
6383 int cp = bp->cp_nr_rings;
6384 int ulp_msix, ulp_base;
6385
6386 ulp_msix = bnxt_get_ulp_msix_num(bp);
6387 if (ulp_msix) {
6388 ulp_base = bnxt_get_ulp_msix_base(bp);
6389 cp += ulp_msix;
6390 if ((ulp_base + ulp_msix) > cp)
6391 cp = ulp_base + ulp_msix;
6392 }
6393 return cp;
6394 }
6395
6396 static int bnxt_cp_rings_in_use(struct bnxt *bp)
6397 {
6398 int cp;
6399
6400 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6401 return bnxt_nq_rings_in_use(bp);
6402
6403 cp = bp->tx_nr_rings + bp->rx_nr_rings;
6404 return cp;
6405 }
6406
6407 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6408 {
6409 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6410 int cp = bp->cp_nr_rings;
6411
6412 if (!ulp_stat)
6413 return cp;
6414
6415 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6416 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6417
6418 return cp + ulp_stat;
6419 }
6420
6421 /* Check if a default RSS map needs to be setup. This function is only
6422 * used on older firmware that does not require reserving RX rings.
6423 */
6424 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6425 {
6426 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6427
6428 /* The RSS map is valid for RX rings set to resv_rx_rings */
6429 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6430 hw_resc->resv_rx_rings = bp->rx_nr_rings;
6431 if (!netif_is_rxfh_configured(bp->dev))
6432 bnxt_set_dflt_rss_indir_tbl(bp);
6433 }
6434 }
6435
6436 static bool bnxt_need_reserve_rings(struct bnxt *bp)
6437 {
6438 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6439 int cp = bnxt_cp_rings_in_use(bp);
6440 int nq = bnxt_nq_rings_in_use(bp);
6441 int rx = bp->rx_nr_rings, stat;
6442 int vnic = 1, grp = rx;
6443
6444 if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6445 bp->hwrm_spec_code >= 0x10601)
6446 return true;
6447
6448 /* Old firmware does not need RX ring reservations but we still
6449 * need to setup a default RSS map when needed. With new firmware
6450 * we go through RX ring reservations first and then set up the
6451 * RSS map for the successfully reserved RX rings when needed.
6452 */
6453 if (!BNXT_NEW_RM(bp)) {
6454 bnxt_check_rss_tbl_no_rmgr(bp);
6455 return false;
6456 }
6457 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6458 vnic = rx + 1;
6459 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6460 rx <<= 1;
6461 stat = bnxt_get_func_stat_ctxs(bp);
6462 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6463 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6464 (hw_resc->resv_hw_ring_grps != grp &&
6465 !(bp->flags & BNXT_FLAG_CHIP_P5)))
6466 return true;
6467 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6468 hw_resc->resv_irqs != nq)
6469 return true;
6470 return false;
6471 }
6472
6473 static int __bnxt_reserve_rings(struct bnxt *bp)
6474 {
6475 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6476 int cp = bnxt_nq_rings_in_use(bp);
6477 int tx = bp->tx_nr_rings;
6478 int rx = bp->rx_nr_rings;
6479 int grp, rx_rings, rc;
6480 int vnic = 1, stat;
6481 bool sh = false;
6482
6483 if (!bnxt_need_reserve_rings(bp))
6484 return 0;
6485
6486 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6487 sh = true;
6488 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6489 vnic = rx + 1;
6490 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6491 rx <<= 1;
6492 grp = bp->rx_nr_rings;
6493 stat = bnxt_get_func_stat_ctxs(bp);
6494
6495 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
6496 if (rc)
6497 return rc;
6498
6499 tx = hw_resc->resv_tx_rings;
6500 if (BNXT_NEW_RM(bp)) {
6501 rx = hw_resc->resv_rx_rings;
6502 cp = hw_resc->resv_irqs;
6503 grp = hw_resc->resv_hw_ring_grps;
6504 vnic = hw_resc->resv_vnics;
6505 stat = hw_resc->resv_stat_ctxs;
6506 }
6507
6508 rx_rings = rx;
6509 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6510 if (rx >= 2) {
6511 rx_rings = rx >> 1;
6512 } else {
6513 if (netif_running(bp->dev))
6514 return -ENOMEM;
6515
6516 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6517 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6518 bp->dev->hw_features &= ~NETIF_F_LRO;
6519 bp->dev->features &= ~NETIF_F_LRO;
6520 bnxt_set_ring_params(bp);
6521 }
6522 }
6523 rx_rings = min_t(int, rx_rings, grp);
6524 cp = min_t(int, cp, bp->cp_nr_rings);
6525 if (stat > bnxt_get_ulp_stat_ctxs(bp))
6526 stat -= bnxt_get_ulp_stat_ctxs(bp);
6527 cp = min_t(int, cp, stat);
6528 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6529 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6530 rx = rx_rings << 1;
6531 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6532 bp->tx_nr_rings = tx;
6533
6534 /* If we cannot reserve all the RX rings, reset the RSS map only
6535 * if absolutely necessary
6536 */
6537 if (rx_rings != bp->rx_nr_rings) {
6538 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6539 rx_rings, bp->rx_nr_rings);
6540 if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
6541 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6542 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6543 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6544 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6545 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6546 }
6547 }
6548 bp->rx_nr_rings = rx_rings;
6549 bp->cp_nr_rings = cp;
6550
6551 if (!tx || !rx || !cp || !grp || !vnic || !stat)
6552 return -ENOMEM;
6553
6554 if (!netif_is_rxfh_configured(bp->dev))
6555 bnxt_set_dflt_rss_indir_tbl(bp);
6556
6557 return rc;
6558 }
6559
6560 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6561 int ring_grps, int cp_rings, int stats,
6562 int vnics)
6563 {
6564 struct hwrm_func_vf_cfg_input req = {0};
6565 u32 flags;
6566
6567 if (!BNXT_NEW_RM(bp))
6568 return 0;
6569
6570 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6571 cp_rings, stats, vnics);
6572 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6573 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6574 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6575 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6576 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6577 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6578 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6579 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6580
6581 req.flags = cpu_to_le32(flags);
6582 return hwrm_send_message_silent(bp, &req, sizeof(req),
6583 HWRM_CMD_TIMEOUT);
6584 }
6585
6586 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6587 int ring_grps, int cp_rings, int stats,
6588 int vnics)
6589 {
6590 struct hwrm_func_cfg_input req = {0};
6591 u32 flags;
6592
6593 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6594 cp_rings, stats, vnics);
6595 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6596 if (BNXT_NEW_RM(bp)) {
6597 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6598 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6599 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6600 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6601 if (bp->flags & BNXT_FLAG_CHIP_P5)
6602 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6603 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6604 else
6605 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6606 }
6607
6608 req.flags = cpu_to_le32(flags);
6609 return hwrm_send_message_silent(bp, &req, sizeof(req),
6610 HWRM_CMD_TIMEOUT);
6611 }
6612
6613 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6614 int ring_grps, int cp_rings, int stats,
6615 int vnics)
6616 {
6617 if (bp->hwrm_spec_code < 0x10801)
6618 return 0;
6619
6620 if (BNXT_PF(bp))
6621 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6622 ring_grps, cp_rings, stats,
6623 vnics);
6624
6625 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6626 cp_rings, stats, vnics);
6627 }
6628
6629 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6630 {
6631 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6632 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6633 struct hwrm_ring_aggint_qcaps_input req = {0};
6634 int rc;
6635
6636 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6637 coal_cap->num_cmpl_dma_aggr_max = 63;
6638 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6639 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6640 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6641 coal_cap->int_lat_tmr_min_max = 65535;
6642 coal_cap->int_lat_tmr_max_max = 65535;
6643 coal_cap->num_cmpl_aggr_int_max = 65535;
6644 coal_cap->timer_units = 80;
6645
6646 if (bp->hwrm_spec_code < 0x10902)
6647 return;
6648
6649 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
6650 mutex_lock(&bp->hwrm_cmd_lock);
6651 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6652 if (!rc) {
6653 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6654 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6655 coal_cap->num_cmpl_dma_aggr_max =
6656 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6657 coal_cap->num_cmpl_dma_aggr_during_int_max =
6658 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6659 coal_cap->cmpl_aggr_dma_tmr_max =
6660 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6661 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6662 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6663 coal_cap->int_lat_tmr_min_max =
6664 le16_to_cpu(resp->int_lat_tmr_min_max);
6665 coal_cap->int_lat_tmr_max_max =
6666 le16_to_cpu(resp->int_lat_tmr_max_max);
6667 coal_cap->num_cmpl_aggr_int_max =
6668 le16_to_cpu(resp->num_cmpl_aggr_int_max);
6669 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6670 }
6671 mutex_unlock(&bp->hwrm_cmd_lock);
6672 }
6673
6674 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6675 {
6676 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6677
6678 return usec * 1000 / coal_cap->timer_units;
6679 }
6680
6681 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6682 struct bnxt_coal *hw_coal,
6683 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6684 {
6685 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6686 u32 cmpl_params = coal_cap->cmpl_params;
6687 u16 val, tmr, max, flags = 0;
6688
6689 max = hw_coal->bufs_per_record * 128;
6690 if (hw_coal->budget)
6691 max = hw_coal->bufs_per_record * hw_coal->budget;
6692 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6693
6694 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6695 req->num_cmpl_aggr_int = cpu_to_le16(val);
6696
6697 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6698 req->num_cmpl_dma_aggr = cpu_to_le16(val);
6699
6700 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6701 coal_cap->num_cmpl_dma_aggr_during_int_max);
6702 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6703
6704 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6705 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6706 req->int_lat_tmr_max = cpu_to_le16(tmr);
6707
6708 /* min timer set to 1/2 of interrupt timer */
6709 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6710 val = tmr / 2;
6711 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6712 req->int_lat_tmr_min = cpu_to_le16(val);
6713 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6714 }
6715
6716 /* buf timer set to 1/4 of interrupt timer */
6717 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6718 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6719
6720 if (cmpl_params &
6721 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6722 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6723 val = clamp_t(u16, tmr, 1,
6724 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6725 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
6726 req->enables |=
6727 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6728 }
6729
6730 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6731 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6732 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6733 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6734 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6735 req->flags = cpu_to_le16(flags);
6736 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6737 }
6738
6739 /* Caller holds bp->hwrm_cmd_lock */
6740 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6741 struct bnxt_coal *hw_coal)
6742 {
6743 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
6744 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6745 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6746 u32 nq_params = coal_cap->nq_params;
6747 u16 tmr;
6748
6749 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6750 return 0;
6751
6752 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
6753 -1, -1);
6754 req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6755 req.flags =
6756 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6757
6758 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6759 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6760 req.int_lat_tmr_min = cpu_to_le16(tmr);
6761 req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6762 return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6763 }
6764
6765 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6766 {
6767 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
6768 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6769 struct bnxt_coal coal;
6770
6771 /* Tick values in micro seconds.
6772 * 1 coal_buf x bufs_per_record = 1 completion record.
6773 */
6774 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6775
6776 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6777 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6778
6779 if (!bnapi->rx_ring)
6780 return -ENODEV;
6781
6782 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6783 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6784
6785 bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
6786
6787 req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6788
6789 return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
6790 HWRM_CMD_TIMEOUT);
6791 }
6792
6793 int bnxt_hwrm_set_coal(struct bnxt *bp)
6794 {
6795 int i, rc = 0;
6796 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
6797 req_tx = {0}, *req;
6798
6799 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6800 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6801 bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
6802 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6803
6804 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
6805 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
6806
6807 mutex_lock(&bp->hwrm_cmd_lock);
6808 for (i = 0; i < bp->cp_nr_rings; i++) {
6809 struct bnxt_napi *bnapi = bp->bnapi[i];
6810 struct bnxt_coal *hw_coal;
6811 u16 ring_id;
6812
6813 req = &req_rx;
6814 if (!bnapi->rx_ring) {
6815 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6816 req = &req_tx;
6817 } else {
6818 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6819 }
6820 req->ring_id = cpu_to_le16(ring_id);
6821
6822 rc = _hwrm_send_message(bp, req, sizeof(*req),
6823 HWRM_CMD_TIMEOUT);
6824 if (rc)
6825 break;
6826
6827 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6828 continue;
6829
6830 if (bnapi->rx_ring && bnapi->tx_ring) {
6831 req = &req_tx;
6832 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6833 req->ring_id = cpu_to_le16(ring_id);
6834 rc = _hwrm_send_message(bp, req, sizeof(*req),
6835 HWRM_CMD_TIMEOUT);
6836 if (rc)
6837 break;
6838 }
6839 if (bnapi->rx_ring)
6840 hw_coal = &bp->rx_coal;
6841 else
6842 hw_coal = &bp->tx_coal;
6843 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6844 }
6845 mutex_unlock(&bp->hwrm_cmd_lock);
6846 return rc;
6847 }
6848
6849 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6850 {
6851 struct hwrm_stat_ctx_clr_stats_input req0 = {0};
6852 struct hwrm_stat_ctx_free_input req = {0};
6853 int i;
6854
6855 if (!bp->bnapi)
6856 return;
6857
6858 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6859 return;
6860
6861 bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1);
6862 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
6863
6864 mutex_lock(&bp->hwrm_cmd_lock);
6865 for (i = 0; i < bp->cp_nr_rings; i++) {
6866 struct bnxt_napi *bnapi = bp->bnapi[i];
6867 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6868
6869 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6870 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6871 if (BNXT_FW_MAJ(bp) <= 20) {
6872 req0.stat_ctx_id = req.stat_ctx_id;
6873 _hwrm_send_message(bp, &req0, sizeof(req0),
6874 HWRM_CMD_TIMEOUT);
6875 }
6876 _hwrm_send_message(bp, &req, sizeof(req),
6877 HWRM_CMD_TIMEOUT);
6878
6879 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6880 }
6881 }
6882 mutex_unlock(&bp->hwrm_cmd_lock);
6883 }
6884
6885 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6886 {
6887 int rc = 0, i;
6888 struct hwrm_stat_ctx_alloc_input req = {0};
6889 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6890
6891 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6892 return 0;
6893
6894 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
6895
6896 req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6897 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6898
6899 mutex_lock(&bp->hwrm_cmd_lock);
6900 for (i = 0; i < bp->cp_nr_rings; i++) {
6901 struct bnxt_napi *bnapi = bp->bnapi[i];
6902 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6903
6904 req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
6905
6906 rc = _hwrm_send_message(bp, &req, sizeof(req),
6907 HWRM_CMD_TIMEOUT);
6908 if (rc)
6909 break;
6910
6911 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6912
6913 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6914 }
6915 mutex_unlock(&bp->hwrm_cmd_lock);
6916 return rc;
6917 }
6918
6919 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6920 {
6921 struct hwrm_func_qcfg_input req = {0};
6922 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6923 u32 min_db_offset = 0;
6924 u16 flags;
6925 int rc;
6926
6927 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6928 req.fid = cpu_to_le16(0xffff);
6929 mutex_lock(&bp->hwrm_cmd_lock);
6930 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6931 if (rc)
6932 goto func_qcfg_exit;
6933
6934 #ifdef CONFIG_BNXT_SRIOV
6935 if (BNXT_VF(bp)) {
6936 struct bnxt_vf_info *vf = &bp->vf;
6937
6938 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6939 } else {
6940 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6941 }
6942 #endif
6943 flags = le16_to_cpu(resp->flags);
6944 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6945 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6946 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6947 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6948 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6949 }
6950 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6951 bp->flags |= BNXT_FLAG_MULTI_HOST;
6952 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
6953 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
6954
6955 switch (resp->port_partition_type) {
6956 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6957 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6958 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6959 bp->port_partition_type = resp->port_partition_type;
6960 break;
6961 }
6962 if (bp->hwrm_spec_code < 0x10707 ||
6963 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6964 bp->br_mode = BRIDGE_MODE_VEB;
6965 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6966 bp->br_mode = BRIDGE_MODE_VEPA;
6967 else
6968 bp->br_mode = BRIDGE_MODE_UNDEF;
6969
6970 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6971 if (!bp->max_mtu)
6972 bp->max_mtu = BNXT_MAX_MTU;
6973
6974 if (bp->db_size)
6975 goto func_qcfg_exit;
6976
6977 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6978 if (BNXT_PF(bp))
6979 min_db_offset = DB_PF_OFFSET_P5;
6980 else
6981 min_db_offset = DB_VF_OFFSET_P5;
6982 }
6983 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6984 1024);
6985 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6986 bp->db_size <= min_db_offset)
6987 bp->db_size = pci_resource_len(bp->pdev, 2);
6988
6989 func_qcfg_exit:
6990 mutex_unlock(&bp->hwrm_cmd_lock);
6991 return rc;
6992 }
6993
6994 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
6995 struct hwrm_func_backing_store_qcaps_output *resp)
6996 {
6997 struct bnxt_mem_init *mem_init;
6998 u16 init_mask;
6999 u8 init_val;
7000 u8 *offset;
7001 int i;
7002
7003 init_val = resp->ctx_kind_initializer;
7004 init_mask = le16_to_cpu(resp->ctx_init_mask);
7005 offset = &resp->qp_init_offset;
7006 mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7007 for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
7008 mem_init->init_val = init_val;
7009 mem_init->offset = BNXT_MEM_INVALID_OFFSET;
7010 if (!init_mask)
7011 continue;
7012 if (i == BNXT_CTX_MEM_INIT_STAT)
7013 offset = &resp->stat_init_offset;
7014 if (init_mask & (1 << i))
7015 mem_init->offset = *offset * 4;
7016 else
7017 mem_init->init_val = 0;
7018 }
7019 ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
7020 ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
7021 ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
7022 ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
7023 ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
7024 ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
7025 }
7026
7027 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
7028 {
7029 struct hwrm_func_backing_store_qcaps_input req = {0};
7030 struct hwrm_func_backing_store_qcaps_output *resp =
7031 bp->hwrm_cmd_resp_addr;
7032 int rc;
7033
7034 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
7035 return 0;
7036
7037 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
7038 mutex_lock(&bp->hwrm_cmd_lock);
7039 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7040 if (!rc) {
7041 struct bnxt_ctx_pg_info *ctx_pg;
7042 struct bnxt_ctx_mem_info *ctx;
7043 int i, tqm_rings;
7044
7045 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
7046 if (!ctx) {
7047 rc = -ENOMEM;
7048 goto ctx_err;
7049 }
7050 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
7051 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
7052 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
7053 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
7054 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
7055 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
7056 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
7057 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
7058 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
7059 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
7060 ctx->vnic_max_vnic_entries =
7061 le16_to_cpu(resp->vnic_max_vnic_entries);
7062 ctx->vnic_max_ring_table_entries =
7063 le16_to_cpu(resp->vnic_max_ring_table_entries);
7064 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
7065 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
7066 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
7067 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
7068 ctx->tqm_min_entries_per_ring =
7069 le32_to_cpu(resp->tqm_min_entries_per_ring);
7070 ctx->tqm_max_entries_per_ring =
7071 le32_to_cpu(resp->tqm_max_entries_per_ring);
7072 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
7073 if (!ctx->tqm_entries_multiple)
7074 ctx->tqm_entries_multiple = 1;
7075 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
7076 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
7077 ctx->mrav_num_entries_units =
7078 le16_to_cpu(resp->mrav_num_entries_units);
7079 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
7080 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
7081
7082 bnxt_init_ctx_initializer(ctx, resp);
7083
7084 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
7085 if (!ctx->tqm_fp_rings_count)
7086 ctx->tqm_fp_rings_count = bp->max_q;
7087 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
7088 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
7089
7090 tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
7091 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
7092 if (!ctx_pg) {
7093 kfree(ctx);
7094 rc = -ENOMEM;
7095 goto ctx_err;
7096 }
7097 for (i = 0; i < tqm_rings; i++, ctx_pg++)
7098 ctx->tqm_mem[i] = ctx_pg;
7099 bp->ctx = ctx;
7100 } else {
7101 rc = 0;
7102 }
7103 ctx_err:
7104 mutex_unlock(&bp->hwrm_cmd_lock);
7105 return rc;
7106 }
7107
7108 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
7109 __le64 *pg_dir)
7110 {
7111 if (!rmem->nr_pages)
7112 return;
7113
7114 BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
7115 if (rmem->depth >= 1) {
7116 if (rmem->depth == 2)
7117 *pg_attr |= 2;
7118 else
7119 *pg_attr |= 1;
7120 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
7121 } else {
7122 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
7123 }
7124 }
7125
7126 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
7127 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
7128 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
7129 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
7130 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
7131 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
7132
7133 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
7134 {
7135 struct hwrm_func_backing_store_cfg_input req = {0};
7136 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7137 struct bnxt_ctx_pg_info *ctx_pg;
7138 u32 req_len = sizeof(req);
7139 __le32 *num_entries;
7140 __le64 *pg_dir;
7141 u32 flags = 0;
7142 u8 *pg_attr;
7143 u32 ena;
7144 int i;
7145
7146 if (!ctx)
7147 return 0;
7148
7149 if (req_len > bp->hwrm_max_ext_req_len)
7150 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
7151 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
7152 req.enables = cpu_to_le32(enables);
7153
7154 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
7155 ctx_pg = &ctx->qp_mem;
7156 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
7157 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
7158 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
7159 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
7160 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7161 &req.qpc_pg_size_qpc_lvl,
7162 &req.qpc_page_dir);
7163 }
7164 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
7165 ctx_pg = &ctx->srq_mem;
7166 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
7167 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
7168 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
7169 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7170 &req.srq_pg_size_srq_lvl,
7171 &req.srq_page_dir);
7172 }
7173 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
7174 ctx_pg = &ctx->cq_mem;
7175 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
7176 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
7177 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
7178 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
7179 &req.cq_page_dir);
7180 }
7181 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
7182 ctx_pg = &ctx->vnic_mem;
7183 req.vnic_num_vnic_entries =
7184 cpu_to_le16(ctx->vnic_max_vnic_entries);
7185 req.vnic_num_ring_table_entries =
7186 cpu_to_le16(ctx->vnic_max_ring_table_entries);
7187 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
7188 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7189 &req.vnic_pg_size_vnic_lvl,
7190 &req.vnic_page_dir);
7191 }
7192 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
7193 ctx_pg = &ctx->stat_mem;
7194 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
7195 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
7196 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7197 &req.stat_pg_size_stat_lvl,
7198 &req.stat_page_dir);
7199 }
7200 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
7201 ctx_pg = &ctx->mrav_mem;
7202 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
7203 if (ctx->mrav_num_entries_units)
7204 flags |=
7205 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
7206 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
7207 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7208 &req.mrav_pg_size_mrav_lvl,
7209 &req.mrav_page_dir);
7210 }
7211 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
7212 ctx_pg = &ctx->tim_mem;
7213 req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
7214 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
7215 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7216 &req.tim_pg_size_tim_lvl,
7217 &req.tim_page_dir);
7218 }
7219 for (i = 0, num_entries = &req.tqm_sp_num_entries,
7220 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
7221 pg_dir = &req.tqm_sp_page_dir,
7222 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
7223 i < BNXT_MAX_TQM_RINGS;
7224 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
7225 if (!(enables & ena))
7226 continue;
7227
7228 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
7229 ctx_pg = ctx->tqm_mem[i];
7230 *num_entries = cpu_to_le32(ctx_pg->entries);
7231 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
7232 }
7233 req.flags = cpu_to_le32(flags);
7234 return hwrm_send_message(bp, &req, req_len, HWRM_CMD_TIMEOUT);
7235 }
7236
7237 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
7238 struct bnxt_ctx_pg_info *ctx_pg)
7239 {
7240 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7241
7242 rmem->page_size = BNXT_PAGE_SIZE;
7243 rmem->pg_arr = ctx_pg->ctx_pg_arr;
7244 rmem->dma_arr = ctx_pg->ctx_dma_arr;
7245 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
7246 if (rmem->depth >= 1)
7247 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
7248 return bnxt_alloc_ring(bp, rmem);
7249 }
7250
7251 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7252 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
7253 u8 depth, struct bnxt_mem_init *mem_init)
7254 {
7255 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7256 int rc;
7257
7258 if (!mem_size)
7259 return -EINVAL;
7260
7261 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7262 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
7263 ctx_pg->nr_pages = 0;
7264 return -EINVAL;
7265 }
7266 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
7267 int nr_tbls, i;
7268
7269 rmem->depth = 2;
7270 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
7271 GFP_KERNEL);
7272 if (!ctx_pg->ctx_pg_tbl)
7273 return -ENOMEM;
7274 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
7275 rmem->nr_pages = nr_tbls;
7276 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7277 if (rc)
7278 return rc;
7279 for (i = 0; i < nr_tbls; i++) {
7280 struct bnxt_ctx_pg_info *pg_tbl;
7281
7282 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
7283 if (!pg_tbl)
7284 return -ENOMEM;
7285 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
7286 rmem = &pg_tbl->ring_mem;
7287 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
7288 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
7289 rmem->depth = 1;
7290 rmem->nr_pages = MAX_CTX_PAGES;
7291 rmem->mem_init = mem_init;
7292 if (i == (nr_tbls - 1)) {
7293 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
7294
7295 if (rem)
7296 rmem->nr_pages = rem;
7297 }
7298 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7299 if (rc)
7300 break;
7301 }
7302 } else {
7303 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7304 if (rmem->nr_pages > 1 || depth)
7305 rmem->depth = 1;
7306 rmem->mem_init = mem_init;
7307 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7308 }
7309 return rc;
7310 }
7311
7312 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7313 struct bnxt_ctx_pg_info *ctx_pg)
7314 {
7315 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7316
7317 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
7318 ctx_pg->ctx_pg_tbl) {
7319 int i, nr_tbls = rmem->nr_pages;
7320
7321 for (i = 0; i < nr_tbls; i++) {
7322 struct bnxt_ctx_pg_info *pg_tbl;
7323 struct bnxt_ring_mem_info *rmem2;
7324
7325 pg_tbl = ctx_pg->ctx_pg_tbl[i];
7326 if (!pg_tbl)
7327 continue;
7328 rmem2 = &pg_tbl->ring_mem;
7329 bnxt_free_ring(bp, rmem2);
7330 ctx_pg->ctx_pg_arr[i] = NULL;
7331 kfree(pg_tbl);
7332 ctx_pg->ctx_pg_tbl[i] = NULL;
7333 }
7334 kfree(ctx_pg->ctx_pg_tbl);
7335 ctx_pg->ctx_pg_tbl = NULL;
7336 }
7337 bnxt_free_ring(bp, rmem);
7338 ctx_pg->nr_pages = 0;
7339 }
7340
7341 static void bnxt_free_ctx_mem(struct bnxt *bp)
7342 {
7343 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7344 int i;
7345
7346 if (!ctx)
7347 return;
7348
7349 if (ctx->tqm_mem[0]) {
7350 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
7351 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
7352 kfree(ctx->tqm_mem[0]);
7353 ctx->tqm_mem[0] = NULL;
7354 }
7355
7356 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7357 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
7358 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7359 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7360 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7361 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7362 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
7363 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7364 }
7365
7366 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7367 {
7368 struct bnxt_ctx_pg_info *ctx_pg;
7369 struct bnxt_ctx_mem_info *ctx;
7370 struct bnxt_mem_init *init;
7371 u32 mem_size, ena, entries;
7372 u32 entries_sp, min;
7373 u32 num_mr, num_ah;
7374 u32 extra_srqs = 0;
7375 u32 extra_qps = 0;
7376 u8 pg_lvl = 1;
7377 int i, rc;
7378
7379 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7380 if (rc) {
7381 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7382 rc);
7383 return rc;
7384 }
7385 ctx = bp->ctx;
7386 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7387 return 0;
7388
7389 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
7390 pg_lvl = 2;
7391 extra_qps = 65536;
7392 extra_srqs = 8192;
7393 }
7394
7395 ctx_pg = &ctx->qp_mem;
7396 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7397 extra_qps;
7398 if (ctx->qp_entry_size) {
7399 mem_size = ctx->qp_entry_size * ctx_pg->entries;
7400 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7401 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7402 if (rc)
7403 return rc;
7404 }
7405
7406 ctx_pg = &ctx->srq_mem;
7407 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
7408 if (ctx->srq_entry_size) {
7409 mem_size = ctx->srq_entry_size * ctx_pg->entries;
7410 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
7411 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7412 if (rc)
7413 return rc;
7414 }
7415
7416 ctx_pg = &ctx->cq_mem;
7417 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
7418 if (ctx->cq_entry_size) {
7419 mem_size = ctx->cq_entry_size * ctx_pg->entries;
7420 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
7421 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7422 if (rc)
7423 return rc;
7424 }
7425
7426 ctx_pg = &ctx->vnic_mem;
7427 ctx_pg->entries = ctx->vnic_max_vnic_entries +
7428 ctx->vnic_max_ring_table_entries;
7429 if (ctx->vnic_entry_size) {
7430 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
7431 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
7432 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7433 if (rc)
7434 return rc;
7435 }
7436
7437 ctx_pg = &ctx->stat_mem;
7438 ctx_pg->entries = ctx->stat_max_entries;
7439 if (ctx->stat_entry_size) {
7440 mem_size = ctx->stat_entry_size * ctx_pg->entries;
7441 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
7442 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7443 if (rc)
7444 return rc;
7445 }
7446
7447 ena = 0;
7448 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7449 goto skip_rdma;
7450
7451 ctx_pg = &ctx->mrav_mem;
7452 /* 128K extra is needed to accommodate static AH context
7453 * allocation by f/w.
7454 */
7455 num_mr = 1024 * 256;
7456 num_ah = 1024 * 128;
7457 ctx_pg->entries = num_mr + num_ah;
7458 if (ctx->mrav_entry_size) {
7459 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
7460 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
7461 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
7462 if (rc)
7463 return rc;
7464 }
7465 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
7466 if (ctx->mrav_num_entries_units)
7467 ctx_pg->entries =
7468 ((num_mr / ctx->mrav_num_entries_units) << 16) |
7469 (num_ah / ctx->mrav_num_entries_units);
7470
7471 ctx_pg = &ctx->tim_mem;
7472 ctx_pg->entries = ctx->qp_mem.entries;
7473 if (ctx->tim_entry_size) {
7474 mem_size = ctx->tim_entry_size * ctx_pg->entries;
7475 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
7476 if (rc)
7477 return rc;
7478 }
7479 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7480
7481 skip_rdma:
7482 min = ctx->tqm_min_entries_per_ring;
7483 entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7484 2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7485 entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
7486 entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
7487 entries = roundup(entries, ctx->tqm_entries_multiple);
7488 entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
7489 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
7490 ctx_pg = ctx->tqm_mem[i];
7491 ctx_pg->entries = i ? entries : entries_sp;
7492 if (ctx->tqm_entry_size) {
7493 mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7494 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
7495 NULL);
7496 if (rc)
7497 return rc;
7498 }
7499 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
7500 }
7501 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7502 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
7503 if (rc) {
7504 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7505 rc);
7506 return rc;
7507 }
7508 ctx->flags |= BNXT_CTX_FLAG_INITED;
7509 return 0;
7510 }
7511
7512 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
7513 {
7514 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7515 struct hwrm_func_resource_qcaps_input req = {0};
7516 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7517 int rc;
7518
7519 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
7520 req.fid = cpu_to_le16(0xffff);
7521
7522 mutex_lock(&bp->hwrm_cmd_lock);
7523 rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
7524 HWRM_CMD_TIMEOUT);
7525 if (rc)
7526 goto hwrm_func_resc_qcaps_exit;
7527
7528 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7529 if (!all)
7530 goto hwrm_func_resc_qcaps_exit;
7531
7532 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7533 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7534 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7535 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7536 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7537 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7538 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7539 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7540 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7541 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7542 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7543 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7544 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7545 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7546 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7547 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7548
7549 if (bp->flags & BNXT_FLAG_CHIP_P5) {
7550 u16 max_msix = le16_to_cpu(resp->max_msix);
7551
7552 hw_resc->max_nqs = max_msix;
7553 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7554 }
7555
7556 if (BNXT_PF(bp)) {
7557 struct bnxt_pf_info *pf = &bp->pf;
7558
7559 pf->vf_resv_strategy =
7560 le16_to_cpu(resp->vf_reservation_strategy);
7561 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
7562 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7563 }
7564 hwrm_func_resc_qcaps_exit:
7565 mutex_unlock(&bp->hwrm_cmd_lock);
7566 return rc;
7567 }
7568
7569 /* bp->hwrm_cmd_lock already held. */
7570 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
7571 {
7572 struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7573 struct hwrm_port_mac_ptp_qcfg_input req = {0};
7574 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
7575 u8 flags;
7576 int rc;
7577
7578 if (bp->hwrm_spec_code < 0x10801) {
7579 rc = -ENODEV;
7580 goto no_ptp;
7581 }
7582
7583 req.port_id = cpu_to_le16(bp->pf.port_id);
7584 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_PTP_QCFG, -1, -1);
7585 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7586 if (rc)
7587 goto no_ptp;
7588
7589 flags = resp->flags;
7590 if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
7591 rc = -ENODEV;
7592 goto no_ptp;
7593 }
7594 if (!ptp) {
7595 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
7596 if (!ptp)
7597 return -ENOMEM;
7598 ptp->bp = bp;
7599 bp->ptp_cfg = ptp;
7600 }
7601 if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
7602 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
7603 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
7604 } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
7605 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
7606 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
7607 } else {
7608 rc = -ENODEV;
7609 goto no_ptp;
7610 }
7611 rc = bnxt_ptp_init(bp);
7612 if (!rc)
7613 return 0;
7614
7615 netdev_warn(bp->dev, "PTP initialization failed.\n");
7616
7617 no_ptp:
7618 bnxt_ptp_clear(bp);
7619 kfree(ptp);
7620 bp->ptp_cfg = NULL;
7621 return rc;
7622 }
7623
7624 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
7625 {
7626 int rc = 0;
7627 struct hwrm_func_qcaps_input req = {0};
7628 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7629 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7630 u32 flags, flags_ext;
7631
7632 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
7633 req.fid = cpu_to_le16(0xffff);
7634
7635 mutex_lock(&bp->hwrm_cmd_lock);
7636 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7637 if (rc)
7638 goto hwrm_func_qcaps_exit;
7639
7640 flags = le32_to_cpu(resp->flags);
7641 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
7642 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
7643 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
7644 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
7645 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7646 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
7647 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7648 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
7649 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7650 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
7651 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7652 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
7653 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7654 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
7655 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7656 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7657
7658 flags_ext = le32_to_cpu(resp->flags_ext);
7659 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7660 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
7661 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
7662 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
7663
7664 bp->tx_push_thresh = 0;
7665 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7666 BNXT_FW_MAJ(bp) > 217)
7667 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7668
7669 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7670 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7671 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7672 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7673 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7674 if (!hw_resc->max_hw_ring_grps)
7675 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7676 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7677 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7678 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7679
7680 if (BNXT_PF(bp)) {
7681 struct bnxt_pf_info *pf = &bp->pf;
7682
7683 pf->fw_fid = le16_to_cpu(resp->fid);
7684 pf->port_id = le16_to_cpu(resp->port_id);
7685 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
7686 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7687 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7688 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7689 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7690 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7691 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7692 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7693 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
7694 bp->flags &= ~BNXT_FLAG_WOL_CAP;
7695 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
7696 bp->flags |= BNXT_FLAG_WOL_CAP;
7697 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
7698 __bnxt_hwrm_ptp_qcfg(bp);
7699 } else {
7700 bnxt_ptp_clear(bp);
7701 kfree(bp->ptp_cfg);
7702 bp->ptp_cfg = NULL;
7703 }
7704 } else {
7705 #ifdef CONFIG_BNXT_SRIOV
7706 struct bnxt_vf_info *vf = &bp->vf;
7707
7708 vf->fw_fid = le16_to_cpu(resp->fid);
7709 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
7710 #endif
7711 }
7712
7713 hwrm_func_qcaps_exit:
7714 mutex_unlock(&bp->hwrm_cmd_lock);
7715 return rc;
7716 }
7717
7718 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7719
7720 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7721 {
7722 int rc;
7723
7724 rc = __bnxt_hwrm_func_qcaps(bp);
7725 if (rc)
7726 return rc;
7727 rc = bnxt_hwrm_queue_qportcfg(bp);
7728 if (rc) {
7729 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7730 return rc;
7731 }
7732 if (bp->hwrm_spec_code >= 0x10803) {
7733 rc = bnxt_alloc_ctx_mem(bp);
7734 if (rc)
7735 return rc;
7736 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7737 if (!rc)
7738 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7739 }
7740 return 0;
7741 }
7742
7743 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7744 {
7745 struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
7746 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7747 int rc = 0;
7748 u32 flags;
7749
7750 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7751 return 0;
7752
7753 resp = bp->hwrm_cmd_resp_addr;
7754 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
7755
7756 mutex_lock(&bp->hwrm_cmd_lock);
7757 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7758 if (rc)
7759 goto hwrm_cfa_adv_qcaps_exit;
7760
7761 flags = le32_to_cpu(resp->flags);
7762 if (flags &
7763 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7764 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7765
7766 hwrm_cfa_adv_qcaps_exit:
7767 mutex_unlock(&bp->hwrm_cmd_lock);
7768 return rc;
7769 }
7770
7771 static int __bnxt_alloc_fw_health(struct bnxt *bp)
7772 {
7773 if (bp->fw_health)
7774 return 0;
7775
7776 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7777 if (!bp->fw_health)
7778 return -ENOMEM;
7779
7780 return 0;
7781 }
7782
7783 static int bnxt_alloc_fw_health(struct bnxt *bp)
7784 {
7785 int rc;
7786
7787 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7788 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7789 return 0;
7790
7791 rc = __bnxt_alloc_fw_health(bp);
7792 if (rc) {
7793 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7794 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7795 return rc;
7796 }
7797
7798 return 0;
7799 }
7800
7801 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7802 {
7803 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7804 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7805 BNXT_FW_HEALTH_WIN_MAP_OFF);
7806 }
7807
7808 bool bnxt_is_fw_healthy(struct bnxt *bp)
7809 {
7810 if (bp->fw_health && bp->fw_health->status_reliable) {
7811 u32 fw_status;
7812
7813 fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
7814 if (fw_status && !BNXT_FW_IS_HEALTHY(fw_status))
7815 return false;
7816 }
7817
7818 return true;
7819 }
7820
7821 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
7822 {
7823 struct bnxt_fw_health *fw_health = bp->fw_health;
7824 u32 reg_type;
7825
7826 if (!fw_health || !fw_health->status_reliable)
7827 return;
7828
7829 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
7830 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7831 fw_health->status_reliable = false;
7832 }
7833
7834 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7835 {
7836 void __iomem *hs;
7837 u32 status_loc;
7838 u32 reg_type;
7839 u32 sig;
7840
7841 if (bp->fw_health)
7842 bp->fw_health->status_reliable = false;
7843
7844 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7845 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7846
7847 sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
7848 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
7849 if (!bp->chip_num) {
7850 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
7851 bp->chip_num = readl(bp->bar0 +
7852 BNXT_FW_HEALTH_WIN_BASE +
7853 BNXT_GRC_REG_CHIP_NUM);
7854 }
7855 if (!BNXT_CHIP_P5(bp))
7856 return;
7857
7858 status_loc = BNXT_GRC_REG_STATUS_P5 |
7859 BNXT_FW_HEALTH_REG_TYPE_BAR0;
7860 } else {
7861 status_loc = readl(hs + offsetof(struct hcomm_status,
7862 fw_status_loc));
7863 }
7864
7865 if (__bnxt_alloc_fw_health(bp)) {
7866 netdev_warn(bp->dev, "no memory for firmware status checks\n");
7867 return;
7868 }
7869
7870 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7871 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
7872 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
7873 __bnxt_map_fw_health_reg(bp, status_loc);
7874 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7875 BNXT_FW_HEALTH_WIN_OFF(status_loc);
7876 }
7877
7878 bp->fw_health->status_reliable = true;
7879 }
7880
7881 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7882 {
7883 struct bnxt_fw_health *fw_health = bp->fw_health;
7884 u32 reg_base = 0xffffffff;
7885 int i;
7886
7887 bp->fw_health->status_reliable = false;
7888 /* Only pre-map the monitoring GRC registers using window 3 */
7889 for (i = 0; i < 4; i++) {
7890 u32 reg = fw_health->regs[i];
7891
7892 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7893 continue;
7894 if (reg_base == 0xffffffff)
7895 reg_base = reg & BNXT_GRC_BASE_MASK;
7896 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7897 return -ERANGE;
7898 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
7899 }
7900 bp->fw_health->status_reliable = true;
7901 if (reg_base == 0xffffffff)
7902 return 0;
7903
7904 __bnxt_map_fw_health_reg(bp, reg_base);
7905 return 0;
7906 }
7907
7908 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7909 {
7910 struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7911 struct bnxt_fw_health *fw_health = bp->fw_health;
7912 struct hwrm_error_recovery_qcfg_input req = {0};
7913 int rc, i;
7914
7915 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7916 return 0;
7917
7918 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
7919 mutex_lock(&bp->hwrm_cmd_lock);
7920 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7921 if (rc)
7922 goto err_recovery_out;
7923 fw_health->flags = le32_to_cpu(resp->flags);
7924 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7925 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7926 rc = -EINVAL;
7927 goto err_recovery_out;
7928 }
7929 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7930 fw_health->master_func_wait_dsecs =
7931 le32_to_cpu(resp->master_func_wait_period);
7932 fw_health->normal_func_wait_dsecs =
7933 le32_to_cpu(resp->normal_func_wait_period);
7934 fw_health->post_reset_wait_dsecs =
7935 le32_to_cpu(resp->master_func_wait_period_after_reset);
7936 fw_health->post_reset_max_wait_dsecs =
7937 le32_to_cpu(resp->max_bailout_time_after_reset);
7938 fw_health->regs[BNXT_FW_HEALTH_REG] =
7939 le32_to_cpu(resp->fw_health_status_reg);
7940 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7941 le32_to_cpu(resp->fw_heartbeat_reg);
7942 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7943 le32_to_cpu(resp->fw_reset_cnt_reg);
7944 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7945 le32_to_cpu(resp->reset_inprogress_reg);
7946 fw_health->fw_reset_inprog_reg_mask =
7947 le32_to_cpu(resp->reset_inprogress_reg_mask);
7948 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7949 if (fw_health->fw_reset_seq_cnt >= 16) {
7950 rc = -EINVAL;
7951 goto err_recovery_out;
7952 }
7953 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7954 fw_health->fw_reset_seq_regs[i] =
7955 le32_to_cpu(resp->reset_reg[i]);
7956 fw_health->fw_reset_seq_vals[i] =
7957 le32_to_cpu(resp->reset_reg_val[i]);
7958 fw_health->fw_reset_seq_delay_msec[i] =
7959 resp->delay_after_reset[i];
7960 }
7961 err_recovery_out:
7962 mutex_unlock(&bp->hwrm_cmd_lock);
7963 if (!rc)
7964 rc = bnxt_map_fw_health_regs(bp);
7965 if (rc)
7966 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7967 return rc;
7968 }
7969
7970 static int bnxt_hwrm_func_reset(struct bnxt *bp)
7971 {
7972 struct hwrm_func_reset_input req = {0};
7973
7974 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
7975 req.enables = 0;
7976
7977 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
7978 }
7979
7980 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
7981 {
7982 struct hwrm_nvm_get_dev_info_output nvm_info;
7983
7984 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
7985 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
7986 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
7987 nvm_info.nvm_cfg_ver_upd);
7988 }
7989
7990 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7991 {
7992 int rc = 0;
7993 struct hwrm_queue_qportcfg_input req = {0};
7994 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
7995 u8 i, j, *qptr;
7996 bool no_rdma;
7997
7998 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
7999
8000 mutex_lock(&bp->hwrm_cmd_lock);
8001 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8002 if (rc)
8003 goto qportcfg_exit;
8004
8005 if (!resp->max_configurable_queues) {
8006 rc = -EINVAL;
8007 goto qportcfg_exit;
8008 }
8009 bp->max_tc = resp->max_configurable_queues;
8010 bp->max_lltc = resp->max_configurable_lossless_queues;
8011 if (bp->max_tc > BNXT_MAX_QUEUE)
8012 bp->max_tc = BNXT_MAX_QUEUE;
8013
8014 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
8015 qptr = &resp->queue_id0;
8016 for (i = 0, j = 0; i < bp->max_tc; i++) {
8017 bp->q_info[j].queue_id = *qptr;
8018 bp->q_ids[i] = *qptr++;
8019 bp->q_info[j].queue_profile = *qptr++;
8020 bp->tc_to_qidx[j] = j;
8021 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
8022 (no_rdma && BNXT_PF(bp)))
8023 j++;
8024 }
8025 bp->max_q = bp->max_tc;
8026 bp->max_tc = max_t(u8, j, 1);
8027
8028 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
8029 bp->max_tc = 1;
8030
8031 if (bp->max_lltc > bp->max_tc)
8032 bp->max_lltc = bp->max_tc;
8033
8034 qportcfg_exit:
8035 mutex_unlock(&bp->hwrm_cmd_lock);
8036 return rc;
8037 }
8038
8039 static int bnxt_hwrm_poll(struct bnxt *bp)
8040 {
8041 struct hwrm_ver_get_input req = {0};
8042 int rc;
8043
8044 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
8045 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
8046 req.hwrm_intf_min = HWRM_VERSION_MINOR;
8047 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
8048
8049 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8050 return rc;
8051 }
8052
8053 static int bnxt_hwrm_ver_get(struct bnxt *bp)
8054 {
8055 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
8056 struct hwrm_ver_get_input req = {0};
8057 u16 fw_maj, fw_min, fw_bld, fw_rsv;
8058 u32 dev_caps_cfg, hwrm_ver;
8059 int rc, len;
8060
8061 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
8062 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
8063 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
8064 req.hwrm_intf_min = HWRM_VERSION_MINOR;
8065 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
8066
8067 mutex_lock(&bp->hwrm_cmd_lock);
8068 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8069 if (rc)
8070 goto hwrm_ver_get_exit;
8071
8072 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
8073
8074 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
8075 resp->hwrm_intf_min_8b << 8 |
8076 resp->hwrm_intf_upd_8b;
8077 if (resp->hwrm_intf_maj_8b < 1) {
8078 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
8079 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
8080 resp->hwrm_intf_upd_8b);
8081 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
8082 }
8083
8084 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
8085 HWRM_VERSION_UPDATE;
8086
8087 if (bp->hwrm_spec_code > hwrm_ver)
8088 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
8089 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
8090 HWRM_VERSION_UPDATE);
8091 else
8092 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
8093 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
8094 resp->hwrm_intf_upd_8b);
8095
8096 fw_maj = le16_to_cpu(resp->hwrm_fw_major);
8097 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
8098 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
8099 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
8100 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
8101 len = FW_VER_STR_LEN;
8102 } else {
8103 fw_maj = resp->hwrm_fw_maj_8b;
8104 fw_min = resp->hwrm_fw_min_8b;
8105 fw_bld = resp->hwrm_fw_bld_8b;
8106 fw_rsv = resp->hwrm_fw_rsvd_8b;
8107 len = BC_HWRM_STR_LEN;
8108 }
8109 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
8110 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
8111 fw_rsv);
8112
8113 if (strlen(resp->active_pkg_name)) {
8114 int fw_ver_len = strlen(bp->fw_ver_str);
8115
8116 snprintf(bp->fw_ver_str + fw_ver_len,
8117 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
8118 resp->active_pkg_name);
8119 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
8120 }
8121
8122 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
8123 if (!bp->hwrm_cmd_timeout)
8124 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
8125
8126 if (resp->hwrm_intf_maj_8b >= 1) {
8127 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
8128 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
8129 }
8130 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
8131 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
8132
8133 bp->chip_num = le16_to_cpu(resp->chip_num);
8134 bp->chip_rev = resp->chip_rev;
8135 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
8136 !resp->chip_metal)
8137 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
8138
8139 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
8140 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
8141 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
8142 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
8143
8144 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
8145 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
8146
8147 if (dev_caps_cfg &
8148 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
8149 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
8150
8151 if (dev_caps_cfg &
8152 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
8153 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
8154
8155 if (dev_caps_cfg &
8156 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
8157 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
8158
8159 hwrm_ver_get_exit:
8160 mutex_unlock(&bp->hwrm_cmd_lock);
8161 return rc;
8162 }
8163
8164 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
8165 {
8166 struct hwrm_fw_set_time_input req = {0};
8167 struct tm tm;
8168 time64_t now = ktime_get_real_seconds();
8169
8170 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
8171 bp->hwrm_spec_code < 0x10400)
8172 return -EOPNOTSUPP;
8173
8174 time64_to_tm(now, 0, &tm);
8175 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
8176 req.year = cpu_to_le16(1900 + tm.tm_year);
8177 req.month = 1 + tm.tm_mon;
8178 req.day = tm.tm_mday;
8179 req.hour = tm.tm_hour;
8180 req.minute = tm.tm_min;
8181 req.second = tm.tm_sec;
8182 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8183 }
8184
8185 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
8186 {
8187 u64 sw_tmp;
8188
8189 hw &= mask;
8190 sw_tmp = (*sw & ~mask) | hw;
8191 if (hw < (*sw & mask))
8192 sw_tmp += mask + 1;
8193 WRITE_ONCE(*sw, sw_tmp);
8194 }
8195
8196 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
8197 int count, bool ignore_zero)
8198 {
8199 int i;
8200
8201 for (i = 0; i < count; i++) {
8202 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
8203
8204 if (ignore_zero && !hw)
8205 continue;
8206
8207 if (masks[i] == -1ULL)
8208 sw_stats[i] = hw;
8209 else
8210 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
8211 }
8212 }
8213
8214 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
8215 {
8216 if (!stats->hw_stats)
8217 return;
8218
8219 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8220 stats->hw_masks, stats->len / 8, false);
8221 }
8222
8223 static void bnxt_accumulate_all_stats(struct bnxt *bp)
8224 {
8225 struct bnxt_stats_mem *ring0_stats;
8226 bool ignore_zero = false;
8227 int i;
8228
8229 /* Chip bug. Counter intermittently becomes 0. */
8230 if (bp->flags & BNXT_FLAG_CHIP_P5)
8231 ignore_zero = true;
8232
8233 for (i = 0; i < bp->cp_nr_rings; i++) {
8234 struct bnxt_napi *bnapi = bp->bnapi[i];
8235 struct bnxt_cp_ring_info *cpr;
8236 struct bnxt_stats_mem *stats;
8237
8238 cpr = &bnapi->cp_ring;
8239 stats = &cpr->stats;
8240 if (!i)
8241 ring0_stats = stats;
8242 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8243 ring0_stats->hw_masks,
8244 ring0_stats->len / 8, ignore_zero);
8245 }
8246 if (bp->flags & BNXT_FLAG_PORT_STATS) {
8247 struct bnxt_stats_mem *stats = &bp->port_stats;
8248 __le64 *hw_stats = stats->hw_stats;
8249 u64 *sw_stats = stats->sw_stats;
8250 u64 *masks = stats->hw_masks;
8251 int cnt;
8252
8253 cnt = sizeof(struct rx_port_stats) / 8;
8254 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8255
8256 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8257 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8258 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8259 cnt = sizeof(struct tx_port_stats) / 8;
8260 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8261 }
8262 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
8263 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
8264 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
8265 }
8266 }
8267
8268 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
8269 {
8270 struct bnxt_pf_info *pf = &bp->pf;
8271 struct hwrm_port_qstats_input req = {0};
8272
8273 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
8274 return 0;
8275
8276 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8277 return -EOPNOTSUPP;
8278
8279 req.flags = flags;
8280 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
8281 req.port_id = cpu_to_le16(pf->port_id);
8282 req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
8283 BNXT_TX_PORT_STATS_BYTE_OFFSET);
8284 req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
8285 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8286 }
8287
8288 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
8289 {
8290 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
8291 struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
8292 struct hwrm_port_qstats_ext_input req = {0};
8293 struct bnxt_pf_info *pf = &bp->pf;
8294 u32 tx_stat_size;
8295 int rc;
8296
8297 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
8298 return 0;
8299
8300 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8301 return -EOPNOTSUPP;
8302
8303 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
8304 req.flags = flags;
8305 req.port_id = cpu_to_le16(pf->port_id);
8306 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
8307 req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
8308 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
8309 sizeof(struct tx_port_stats_ext) : 0;
8310 req.tx_stat_size = cpu_to_le16(tx_stat_size);
8311 req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
8312 mutex_lock(&bp->hwrm_cmd_lock);
8313 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8314 if (!rc) {
8315 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
8316 bp->fw_tx_stats_ext_size = tx_stat_size ?
8317 le16_to_cpu(resp->tx_stat_size) / 8 : 0;
8318 } else {
8319 bp->fw_rx_stats_ext_size = 0;
8320 bp->fw_tx_stats_ext_size = 0;
8321 }
8322 if (flags)
8323 goto qstats_done;
8324
8325 if (bp->fw_tx_stats_ext_size <=
8326 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
8327 mutex_unlock(&bp->hwrm_cmd_lock);
8328 bp->pri2cos_valid = 0;
8329 return rc;
8330 }
8331
8332 bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
8333 req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
8334
8335 rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
8336 if (!rc) {
8337 struct hwrm_queue_pri2cos_qcfg_output *resp2;
8338 u8 *pri2cos;
8339 int i, j;
8340
8341 resp2 = bp->hwrm_cmd_resp_addr;
8342 pri2cos = &resp2->pri0_cos_queue_id;
8343 for (i = 0; i < 8; i++) {
8344 u8 queue_id = pri2cos[i];
8345 u8 queue_idx;
8346
8347 /* Per port queue IDs start from 0, 10, 20, etc */
8348 queue_idx = queue_id % 10;
8349 if (queue_idx > BNXT_MAX_QUEUE) {
8350 bp->pri2cos_valid = false;
8351 goto qstats_done;
8352 }
8353 for (j = 0; j < bp->max_q; j++) {
8354 if (bp->q_ids[j] == queue_id)
8355 bp->pri2cos_idx[i] = queue_idx;
8356 }
8357 }
8358 bp->pri2cos_valid = 1;
8359 }
8360 qstats_done:
8361 mutex_unlock(&bp->hwrm_cmd_lock);
8362 return rc;
8363 }
8364
8365 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8366 {
8367 if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID)
8368 bnxt_hwrm_tunnel_dst_port_free(
8369 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8370 if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID)
8371 bnxt_hwrm_tunnel_dst_port_free(
8372 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
8373 }
8374
8375 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8376 {
8377 int rc, i;
8378 u32 tpa_flags = 0;
8379
8380 if (set_tpa)
8381 tpa_flags = bp->flags & BNXT_FLAG_TPA;
8382 else if (BNXT_NO_FW_ACCESS(bp))
8383 return 0;
8384 for (i = 0; i < bp->nr_vnics; i++) {
8385 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8386 if (rc) {
8387 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
8388 i, rc);
8389 return rc;
8390 }
8391 }
8392 return 0;
8393 }
8394
8395 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8396 {
8397 int i;
8398
8399 for (i = 0; i < bp->nr_vnics; i++)
8400 bnxt_hwrm_vnic_set_rss(bp, i, false);
8401 }
8402
8403 static void bnxt_clear_vnic(struct bnxt *bp)
8404 {
8405 if (!bp->vnic_info)
8406 return;
8407
8408 bnxt_hwrm_clear_vnic_filter(bp);
8409 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
8410 /* clear all RSS setting before free vnic ctx */
8411 bnxt_hwrm_clear_vnic_rss(bp);
8412 bnxt_hwrm_vnic_ctx_free(bp);
8413 }
8414 /* before free the vnic, undo the vnic tpa settings */
8415 if (bp->flags & BNXT_FLAG_TPA)
8416 bnxt_set_tpa(bp, false);
8417 bnxt_hwrm_vnic_free(bp);
8418 if (bp->flags & BNXT_FLAG_CHIP_P5)
8419 bnxt_hwrm_vnic_ctx_free(bp);
8420 }
8421
8422 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8423 bool irq_re_init)
8424 {
8425 bnxt_clear_vnic(bp);
8426 bnxt_hwrm_ring_free(bp, close_path);
8427 bnxt_hwrm_ring_grp_free(bp);
8428 if (irq_re_init) {
8429 bnxt_hwrm_stat_ctx_free(bp);
8430 bnxt_hwrm_free_tunnel_ports(bp);
8431 }
8432 }
8433
8434 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8435 {
8436 struct hwrm_func_cfg_input req = {0};
8437
8438 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
8439 req.fid = cpu_to_le16(0xffff);
8440 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
8441 if (br_mode == BRIDGE_MODE_VEB)
8442 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
8443 else if (br_mode == BRIDGE_MODE_VEPA)
8444 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
8445 else
8446 return -EINVAL;
8447 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8448 }
8449
8450 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8451 {
8452 struct hwrm_func_cfg_input req = {0};
8453
8454 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8455 return 0;
8456
8457 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
8458 req.fid = cpu_to_le16(0xffff);
8459 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
8460 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
8461 if (size == 128)
8462 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
8463
8464 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8465 }
8466
8467 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8468 {
8469 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
8470 int rc;
8471
8472 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8473 goto skip_rss_ctx;
8474
8475 /* allocate context for vnic */
8476 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
8477 if (rc) {
8478 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8479 vnic_id, rc);
8480 goto vnic_setup_err;
8481 }
8482 bp->rsscos_nr_ctxs++;
8483
8484 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8485 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8486 if (rc) {
8487 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8488 vnic_id, rc);
8489 goto vnic_setup_err;
8490 }
8491 bp->rsscos_nr_ctxs++;
8492 }
8493
8494 skip_rss_ctx:
8495 /* configure default vnic, ring grp */
8496 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8497 if (rc) {
8498 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8499 vnic_id, rc);
8500 goto vnic_setup_err;
8501 }
8502
8503 /* Enable RSS hashing on vnic */
8504 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8505 if (rc) {
8506 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8507 vnic_id, rc);
8508 goto vnic_setup_err;
8509 }
8510
8511 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8512 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8513 if (rc) {
8514 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8515 vnic_id, rc);
8516 }
8517 }
8518
8519 vnic_setup_err:
8520 return rc;
8521 }
8522
8523 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8524 {
8525 int rc, i, nr_ctxs;
8526
8527 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
8528 for (i = 0; i < nr_ctxs; i++) {
8529 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8530 if (rc) {
8531 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8532 vnic_id, i, rc);
8533 break;
8534 }
8535 bp->rsscos_nr_ctxs++;
8536 }
8537 if (i < nr_ctxs)
8538 return -ENOMEM;
8539
8540 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8541 if (rc) {
8542 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8543 vnic_id, rc);
8544 return rc;
8545 }
8546 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8547 if (rc) {
8548 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8549 vnic_id, rc);
8550 return rc;
8551 }
8552 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8553 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8554 if (rc) {
8555 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8556 vnic_id, rc);
8557 }
8558 }
8559 return rc;
8560 }
8561
8562 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8563 {
8564 if (bp->flags & BNXT_FLAG_CHIP_P5)
8565 return __bnxt_setup_vnic_p5(bp, vnic_id);
8566 else
8567 return __bnxt_setup_vnic(bp, vnic_id);
8568 }
8569
8570 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8571 {
8572 #ifdef CONFIG_RFS_ACCEL
8573 int i, rc = 0;
8574
8575 if (bp->flags & BNXT_FLAG_CHIP_P5)
8576 return 0;
8577
8578 for (i = 0; i < bp->rx_nr_rings; i++) {
8579 struct bnxt_vnic_info *vnic;
8580 u16 vnic_id = i + 1;
8581 u16 ring_id = i;
8582
8583 if (vnic_id >= bp->nr_vnics)
8584 break;
8585
8586 vnic = &bp->vnic_info[vnic_id];
8587 vnic->flags |= BNXT_VNIC_RFS_FLAG;
8588 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8589 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
8590 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
8591 if (rc) {
8592 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8593 vnic_id, rc);
8594 break;
8595 }
8596 rc = bnxt_setup_vnic(bp, vnic_id);
8597 if (rc)
8598 break;
8599 }
8600 return rc;
8601 #else
8602 return 0;
8603 #endif
8604 }
8605
8606 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
8607 static bool bnxt_promisc_ok(struct bnxt *bp)
8608 {
8609 #ifdef CONFIG_BNXT_SRIOV
8610 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
8611 return false;
8612 #endif
8613 return true;
8614 }
8615
8616 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8617 {
8618 unsigned int rc = 0;
8619
8620 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8621 if (rc) {
8622 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8623 rc);
8624 return rc;
8625 }
8626
8627 rc = bnxt_hwrm_vnic_cfg(bp, 1);
8628 if (rc) {
8629 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8630 rc);
8631 return rc;
8632 }
8633 return rc;
8634 }
8635
8636 static int bnxt_cfg_rx_mode(struct bnxt *);
8637 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
8638
8639 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8640 {
8641 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8642 int rc = 0;
8643 unsigned int rx_nr_rings = bp->rx_nr_rings;
8644
8645 if (irq_re_init) {
8646 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8647 if (rc) {
8648 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8649 rc);
8650 goto err_out;
8651 }
8652 }
8653
8654 rc = bnxt_hwrm_ring_alloc(bp);
8655 if (rc) {
8656 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8657 goto err_out;
8658 }
8659
8660 rc = bnxt_hwrm_ring_grp_alloc(bp);
8661 if (rc) {
8662 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8663 goto err_out;
8664 }
8665
8666 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8667 rx_nr_rings--;
8668
8669 /* default vnic 0 */
8670 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
8671 if (rc) {
8672 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8673 goto err_out;
8674 }
8675
8676 rc = bnxt_setup_vnic(bp, 0);
8677 if (rc)
8678 goto err_out;
8679
8680 if (bp->flags & BNXT_FLAG_RFS) {
8681 rc = bnxt_alloc_rfs_vnics(bp);
8682 if (rc)
8683 goto err_out;
8684 }
8685
8686 if (bp->flags & BNXT_FLAG_TPA) {
8687 rc = bnxt_set_tpa(bp, true);
8688 if (rc)
8689 goto err_out;
8690 }
8691
8692 if (BNXT_VF(bp))
8693 bnxt_update_vf_mac(bp);
8694
8695 /* Filter for default vnic 0 */
8696 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8697 if (rc) {
8698 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8699 goto err_out;
8700 }
8701 vnic->uc_filter_count = 1;
8702
8703 vnic->rx_mask = 0;
8704 if (bp->dev->flags & IFF_BROADCAST)
8705 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8706
8707 if (bp->dev->flags & IFF_PROMISC)
8708 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8709
8710 if (bp->dev->flags & IFF_ALLMULTI) {
8711 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8712 vnic->mc_list_count = 0;
8713 } else {
8714 u32 mask = 0;
8715
8716 bnxt_mc_list_updated(bp, &mask);
8717 vnic->rx_mask |= mask;
8718 }
8719
8720 rc = bnxt_cfg_rx_mode(bp);
8721 if (rc)
8722 goto err_out;
8723
8724 rc = bnxt_hwrm_set_coal(bp);
8725 if (rc)
8726 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
8727 rc);
8728
8729 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8730 rc = bnxt_setup_nitroa0_vnic(bp);
8731 if (rc)
8732 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8733 rc);
8734 }
8735
8736 if (BNXT_VF(bp)) {
8737 bnxt_hwrm_func_qcfg(bp);
8738 netdev_update_features(bp->dev);
8739 }
8740
8741 return 0;
8742
8743 err_out:
8744 bnxt_hwrm_resource_free(bp, 0, true);
8745
8746 return rc;
8747 }
8748
8749 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8750 {
8751 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8752 return 0;
8753 }
8754
8755 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8756 {
8757 bnxt_init_cp_rings(bp);
8758 bnxt_init_rx_rings(bp);
8759 bnxt_init_tx_rings(bp);
8760 bnxt_init_ring_grps(bp, irq_re_init);
8761 bnxt_init_vnics(bp);
8762
8763 return bnxt_init_chip(bp, irq_re_init);
8764 }
8765
8766 static int bnxt_set_real_num_queues(struct bnxt *bp)
8767 {
8768 int rc;
8769 struct net_device *dev = bp->dev;
8770
8771 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8772 bp->tx_nr_rings_xdp);
8773 if (rc)
8774 return rc;
8775
8776 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8777 if (rc)
8778 return rc;
8779
8780 #ifdef CONFIG_RFS_ACCEL
8781 if (bp->flags & BNXT_FLAG_RFS)
8782 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
8783 #endif
8784
8785 return rc;
8786 }
8787
8788 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8789 bool shared)
8790 {
8791 int _rx = *rx, _tx = *tx;
8792
8793 if (shared) {
8794 *rx = min_t(int, _rx, max);
8795 *tx = min_t(int, _tx, max);
8796 } else {
8797 if (max < 2)
8798 return -ENOMEM;
8799
8800 while (_rx + _tx > max) {
8801 if (_rx > _tx && _rx > 1)
8802 _rx--;
8803 else if (_tx > 1)
8804 _tx--;
8805 }
8806 *rx = _rx;
8807 *tx = _tx;
8808 }
8809 return 0;
8810 }
8811
8812 static void bnxt_setup_msix(struct bnxt *bp)
8813 {
8814 const int len = sizeof(bp->irq_tbl[0].name);
8815 struct net_device *dev = bp->dev;
8816 int tcs, i;
8817
8818 tcs = netdev_get_num_tc(dev);
8819 if (tcs) {
8820 int i, off, count;
8821
8822 for (i = 0; i < tcs; i++) {
8823 count = bp->tx_nr_rings_per_tc;
8824 off = i * count;
8825 netdev_set_tc_queue(dev, i, count, off);
8826 }
8827 }
8828
8829 for (i = 0; i < bp->cp_nr_rings; i++) {
8830 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8831 char *attr;
8832
8833 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8834 attr = "TxRx";
8835 else if (i < bp->rx_nr_rings)
8836 attr = "rx";
8837 else
8838 attr = "tx";
8839
8840 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8841 attr, i);
8842 bp->irq_tbl[map_idx].handler = bnxt_msix;
8843 }
8844 }
8845
8846 static void bnxt_setup_inta(struct bnxt *bp)
8847 {
8848 const int len = sizeof(bp->irq_tbl[0].name);
8849
8850 if (netdev_get_num_tc(bp->dev))
8851 netdev_reset_tc(bp->dev);
8852
8853 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8854 0);
8855 bp->irq_tbl[0].handler = bnxt_inta;
8856 }
8857
8858 static int bnxt_init_int_mode(struct bnxt *bp);
8859
8860 static int bnxt_setup_int_mode(struct bnxt *bp)
8861 {
8862 int rc;
8863
8864 if (!bp->irq_tbl) {
8865 rc = bnxt_init_int_mode(bp);
8866 if (rc || !bp->irq_tbl)
8867 return rc ?: -ENODEV;
8868 }
8869
8870 if (bp->flags & BNXT_FLAG_USING_MSIX)
8871 bnxt_setup_msix(bp);
8872 else
8873 bnxt_setup_inta(bp);
8874
8875 rc = bnxt_set_real_num_queues(bp);
8876 return rc;
8877 }
8878
8879 #ifdef CONFIG_RFS_ACCEL
8880 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
8881 {
8882 return bp->hw_resc.max_rsscos_ctxs;
8883 }
8884
8885 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
8886 {
8887 return bp->hw_resc.max_vnics;
8888 }
8889 #endif
8890
8891 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
8892 {
8893 return bp->hw_resc.max_stat_ctxs;
8894 }
8895
8896 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
8897 {
8898 return bp->hw_resc.max_cp_rings;
8899 }
8900
8901 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
8902 {
8903 unsigned int cp = bp->hw_resc.max_cp_rings;
8904
8905 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8906 cp -= bnxt_get_ulp_msix_num(bp);
8907
8908 return cp;
8909 }
8910
8911 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
8912 {
8913 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8914
8915 if (bp->flags & BNXT_FLAG_CHIP_P5)
8916 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
8917
8918 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
8919 }
8920
8921 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
8922 {
8923 bp->hw_resc.max_irqs = max_irqs;
8924 }
8925
8926 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
8927 {
8928 unsigned int cp;
8929
8930 cp = bnxt_get_max_func_cp_rings_for_en(bp);
8931 if (bp->flags & BNXT_FLAG_CHIP_P5)
8932 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8933 else
8934 return cp - bp->cp_nr_rings;
8935 }
8936
8937 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8938 {
8939 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
8940 }
8941
8942 int bnxt_get_avail_msix(struct bnxt *bp, int num)
8943 {
8944 int max_cp = bnxt_get_max_func_cp_rings(bp);
8945 int max_irq = bnxt_get_max_func_irqs(bp);
8946 int total_req = bp->cp_nr_rings + num;
8947 int max_idx, avail_msix;
8948
8949 max_idx = bp->total_irqs;
8950 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8951 max_idx = min_t(int, bp->total_irqs, max_cp);
8952 avail_msix = max_idx - bp->cp_nr_rings;
8953 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
8954 return avail_msix;
8955
8956 if (max_irq < total_req) {
8957 num = max_irq - bp->cp_nr_rings;
8958 if (num <= 0)
8959 return 0;
8960 }
8961 return num;
8962 }
8963
8964 static int bnxt_get_num_msix(struct bnxt *bp)
8965 {
8966 if (!BNXT_NEW_RM(bp))
8967 return bnxt_get_max_func_irqs(bp);
8968
8969 return bnxt_nq_rings_in_use(bp);
8970 }
8971
8972 static int bnxt_init_msix(struct bnxt *bp)
8973 {
8974 int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
8975 struct msix_entry *msix_ent;
8976
8977 total_vecs = bnxt_get_num_msix(bp);
8978 max = bnxt_get_max_func_irqs(bp);
8979 if (total_vecs > max)
8980 total_vecs = max;
8981
8982 if (!total_vecs)
8983 return 0;
8984
8985 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8986 if (!msix_ent)
8987 return -ENOMEM;
8988
8989 for (i = 0; i < total_vecs; i++) {
8990 msix_ent[i].entry = i;
8991 msix_ent[i].vector = 0;
8992 }
8993
8994 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8995 min = 2;
8996
8997 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
8998 ulp_msix = bnxt_get_ulp_msix_num(bp);
8999 if (total_vecs < 0 || total_vecs < ulp_msix) {
9000 rc = -ENODEV;
9001 goto msix_setup_exit;
9002 }
9003
9004 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
9005 if (bp->irq_tbl) {
9006 for (i = 0; i < total_vecs; i++)
9007 bp->irq_tbl[i].vector = msix_ent[i].vector;
9008
9009 bp->total_irqs = total_vecs;
9010 /* Trim rings based upon num of vectors allocated */
9011 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
9012 total_vecs - ulp_msix, min == 1);
9013 if (rc)
9014 goto msix_setup_exit;
9015
9016 bp->cp_nr_rings = (min == 1) ?
9017 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
9018 bp->tx_nr_rings + bp->rx_nr_rings;
9019
9020 } else {
9021 rc = -ENOMEM;
9022 goto msix_setup_exit;
9023 }
9024 bp->flags |= BNXT_FLAG_USING_MSIX;
9025 kfree(msix_ent);
9026 return 0;
9027
9028 msix_setup_exit:
9029 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
9030 kfree(bp->irq_tbl);
9031 bp->irq_tbl = NULL;
9032 pci_disable_msix(bp->pdev);
9033 kfree(msix_ent);
9034 return rc;
9035 }
9036
9037 static int bnxt_init_inta(struct bnxt *bp)
9038 {
9039 bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
9040 if (!bp->irq_tbl)
9041 return -ENOMEM;
9042
9043 bp->total_irqs = 1;
9044 bp->rx_nr_rings = 1;
9045 bp->tx_nr_rings = 1;
9046 bp->cp_nr_rings = 1;
9047 bp->flags |= BNXT_FLAG_SHARED_RINGS;
9048 bp->irq_tbl[0].vector = bp->pdev->irq;
9049 return 0;
9050 }
9051
9052 static int bnxt_init_int_mode(struct bnxt *bp)
9053 {
9054 int rc = -ENODEV;
9055
9056 if (bp->flags & BNXT_FLAG_MSIX_CAP)
9057 rc = bnxt_init_msix(bp);
9058
9059 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
9060 /* fallback to INTA */
9061 rc = bnxt_init_inta(bp);
9062 }
9063 return rc;
9064 }
9065
9066 static void bnxt_clear_int_mode(struct bnxt *bp)
9067 {
9068 if (bp->flags & BNXT_FLAG_USING_MSIX)
9069 pci_disable_msix(bp->pdev);
9070
9071 kfree(bp->irq_tbl);
9072 bp->irq_tbl = NULL;
9073 bp->flags &= ~BNXT_FLAG_USING_MSIX;
9074 }
9075
9076 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
9077 {
9078 int tcs = netdev_get_num_tc(bp->dev);
9079 bool irq_cleared = false;
9080 int rc;
9081
9082 if (!bnxt_need_reserve_rings(bp))
9083 return 0;
9084
9085 if (irq_re_init && BNXT_NEW_RM(bp) &&
9086 bnxt_get_num_msix(bp) != bp->total_irqs) {
9087 bnxt_ulp_irq_stop(bp);
9088 bnxt_clear_int_mode(bp);
9089 irq_cleared = true;
9090 }
9091 rc = __bnxt_reserve_rings(bp);
9092 if (irq_cleared) {
9093 if (!rc)
9094 rc = bnxt_init_int_mode(bp);
9095 bnxt_ulp_irq_restart(bp, rc);
9096 }
9097 if (rc) {
9098 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
9099 return rc;
9100 }
9101 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
9102 netdev_err(bp->dev, "tx ring reservation failure\n");
9103 netdev_reset_tc(bp->dev);
9104 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
9105 return -ENOMEM;
9106 }
9107 return 0;
9108 }
9109
9110 static void bnxt_free_irq(struct bnxt *bp)
9111 {
9112 struct bnxt_irq *irq;
9113 int i;
9114
9115 #ifdef CONFIG_RFS_ACCEL
9116 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
9117 bp->dev->rx_cpu_rmap = NULL;
9118 #endif
9119 if (!bp->irq_tbl || !bp->bnapi)
9120 return;
9121
9122 for (i = 0; i < bp->cp_nr_rings; i++) {
9123 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9124
9125 irq = &bp->irq_tbl[map_idx];
9126 if (irq->requested) {
9127 if (irq->have_cpumask) {
9128 irq_set_affinity_hint(irq->vector, NULL);
9129 free_cpumask_var(irq->cpu_mask);
9130 irq->have_cpumask = 0;
9131 }
9132 free_irq(irq->vector, bp->bnapi[i]);
9133 }
9134
9135 irq->requested = 0;
9136 }
9137 }
9138
9139 static int bnxt_request_irq(struct bnxt *bp)
9140 {
9141 int i, j, rc = 0;
9142 unsigned long flags = 0;
9143 #ifdef CONFIG_RFS_ACCEL
9144 struct cpu_rmap *rmap;
9145 #endif
9146
9147 rc = bnxt_setup_int_mode(bp);
9148 if (rc) {
9149 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
9150 rc);
9151 return rc;
9152 }
9153 #ifdef CONFIG_RFS_ACCEL
9154 rmap = bp->dev->rx_cpu_rmap;
9155 #endif
9156 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
9157 flags = IRQF_SHARED;
9158
9159 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
9160 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9161 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
9162
9163 #ifdef CONFIG_RFS_ACCEL
9164 if (rmap && bp->bnapi[i]->rx_ring) {
9165 rc = irq_cpu_rmap_add(rmap, irq->vector);
9166 if (rc)
9167 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
9168 j);
9169 j++;
9170 }
9171 #endif
9172 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
9173 bp->bnapi[i]);
9174 if (rc)
9175 break;
9176
9177 irq->requested = 1;
9178
9179 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
9180 int numa_node = dev_to_node(&bp->pdev->dev);
9181
9182 irq->have_cpumask = 1;
9183 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
9184 irq->cpu_mask);
9185 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
9186 if (rc) {
9187 netdev_warn(bp->dev,
9188 "Set affinity failed, IRQ = %d\n",
9189 irq->vector);
9190 break;
9191 }
9192 }
9193 }
9194 return rc;
9195 }
9196
9197 static void bnxt_del_napi(struct bnxt *bp)
9198 {
9199 int i;
9200
9201 if (!bp->bnapi)
9202 return;
9203
9204 for (i = 0; i < bp->cp_nr_rings; i++) {
9205 struct bnxt_napi *bnapi = bp->bnapi[i];
9206
9207 __netif_napi_del(&bnapi->napi);
9208 }
9209 /* We called __netif_napi_del(), we need
9210 * to respect an RCU grace period before freeing napi structures.
9211 */
9212 synchronize_net();
9213 }
9214
9215 static void bnxt_init_napi(struct bnxt *bp)
9216 {
9217 int i;
9218 unsigned int cp_nr_rings = bp->cp_nr_rings;
9219 struct bnxt_napi *bnapi;
9220
9221 if (bp->flags & BNXT_FLAG_USING_MSIX) {
9222 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
9223
9224 if (bp->flags & BNXT_FLAG_CHIP_P5)
9225 poll_fn = bnxt_poll_p5;
9226 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
9227 cp_nr_rings--;
9228 for (i = 0; i < cp_nr_rings; i++) {
9229 bnapi = bp->bnapi[i];
9230 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
9231 }
9232 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9233 bnapi = bp->bnapi[cp_nr_rings];
9234 netif_napi_add(bp->dev, &bnapi->napi,
9235 bnxt_poll_nitroa0, 64);
9236 }
9237 } else {
9238 bnapi = bp->bnapi[0];
9239 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
9240 }
9241 }
9242
9243 static void bnxt_disable_napi(struct bnxt *bp)
9244 {
9245 int i;
9246
9247 if (!bp->bnapi ||
9248 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
9249 return;
9250
9251 for (i = 0; i < bp->cp_nr_rings; i++) {
9252 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
9253
9254 napi_disable(&bp->bnapi[i]->napi);
9255 if (bp->bnapi[i]->rx_ring)
9256 cancel_work_sync(&cpr->dim.work);
9257 }
9258 }
9259
9260 static void bnxt_enable_napi(struct bnxt *bp)
9261 {
9262 int i;
9263
9264 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
9265 for (i = 0; i < bp->cp_nr_rings; i++) {
9266 struct bnxt_napi *bnapi = bp->bnapi[i];
9267 struct bnxt_cp_ring_info *cpr;
9268
9269 cpr = &bnapi->cp_ring;
9270 if (bnapi->in_reset)
9271 cpr->sw_stats.rx.rx_resets++;
9272 bnapi->in_reset = false;
9273
9274 if (bnapi->rx_ring) {
9275 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
9276 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
9277 }
9278 napi_enable(&bnapi->napi);
9279 }
9280 }
9281
9282 void bnxt_tx_disable(struct bnxt *bp)
9283 {
9284 int i;
9285 struct bnxt_tx_ring_info *txr;
9286
9287 if (bp->tx_ring) {
9288 for (i = 0; i < bp->tx_nr_rings; i++) {
9289 txr = &bp->tx_ring[i];
9290 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
9291 }
9292 }
9293 /* Make sure napi polls see @dev_state change */
9294 synchronize_net();
9295 /* Drop carrier first to prevent TX timeout */
9296 netif_carrier_off(bp->dev);
9297 /* Stop all TX queues */
9298 netif_tx_disable(bp->dev);
9299 }
9300
9301 void bnxt_tx_enable(struct bnxt *bp)
9302 {
9303 int i;
9304 struct bnxt_tx_ring_info *txr;
9305
9306 for (i = 0; i < bp->tx_nr_rings; i++) {
9307 txr = &bp->tx_ring[i];
9308 WRITE_ONCE(txr->dev_state, 0);
9309 }
9310 /* Make sure napi polls see @dev_state change */
9311 synchronize_net();
9312 netif_tx_wake_all_queues(bp->dev);
9313 if (bp->link_info.link_up)
9314 netif_carrier_on(bp->dev);
9315 }
9316
9317 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
9318 {
9319 u8 active_fec = link_info->active_fec_sig_mode &
9320 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
9321
9322 switch (active_fec) {
9323 default:
9324 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
9325 return "None";
9326 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
9327 return "Clause 74 BaseR";
9328 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
9329 return "Clause 91 RS(528,514)";
9330 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
9331 return "Clause 91 RS544_1XN";
9332 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
9333 return "Clause 91 RS(544,514)";
9334 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
9335 return "Clause 91 RS272_1XN";
9336 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
9337 return "Clause 91 RS(272,257)";
9338 }
9339 }
9340
9341 static void bnxt_report_link(struct bnxt *bp)
9342 {
9343 if (bp->link_info.link_up) {
9344 const char *signal = "";
9345 const char *flow_ctrl;
9346 const char *duplex;
9347 u32 speed;
9348 u16 fec;
9349
9350 netif_carrier_on(bp->dev);
9351 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9352 if (speed == SPEED_UNKNOWN) {
9353 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9354 return;
9355 }
9356 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9357 duplex = "full";
9358 else
9359 duplex = "half";
9360 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9361 flow_ctrl = "ON - receive & transmit";
9362 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9363 flow_ctrl = "ON - transmit";
9364 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9365 flow_ctrl = "ON - receive";
9366 else
9367 flow_ctrl = "none";
9368 if (bp->link_info.phy_qcfg_resp.option_flags &
9369 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
9370 u8 sig_mode = bp->link_info.active_fec_sig_mode &
9371 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
9372 switch (sig_mode) {
9373 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
9374 signal = "(NRZ) ";
9375 break;
9376 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
9377 signal = "(PAM4) ";
9378 break;
9379 default:
9380 break;
9381 }
9382 }
9383 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
9384 speed, signal, duplex, flow_ctrl);
9385 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
9386 netdev_info(bp->dev, "EEE is %s\n",
9387 bp->eee.eee_active ? "active" :
9388 "not active");
9389 fec = bp->link_info.fec_cfg;
9390 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
9391 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
9392 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
9393 bnxt_report_fec(&bp->link_info));
9394 } else {
9395 netif_carrier_off(bp->dev);
9396 netdev_err(bp->dev, "NIC Link is Down\n");
9397 }
9398 }
9399
9400 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
9401 {
9402 if (!resp->supported_speeds_auto_mode &&
9403 !resp->supported_speeds_force_mode &&
9404 !resp->supported_pam4_speeds_auto_mode &&
9405 !resp->supported_pam4_speeds_force_mode)
9406 return true;
9407 return false;
9408 }
9409
9410 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9411 {
9412 int rc = 0;
9413 struct hwrm_port_phy_qcaps_input req = {0};
9414 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
9415 struct bnxt_link_info *link_info = &bp->link_info;
9416
9417 if (bp->hwrm_spec_code < 0x10201)
9418 return 0;
9419
9420 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
9421
9422 mutex_lock(&bp->hwrm_cmd_lock);
9423 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9424 if (rc)
9425 goto hwrm_phy_qcaps_exit;
9426
9427 bp->phy_flags = resp->flags;
9428 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
9429 struct ethtool_eee *eee = &bp->eee;
9430 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
9431
9432 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9433 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9434 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
9435 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9436 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
9437 }
9438
9439 if (bp->hwrm_spec_code >= 0x10a01) {
9440 if (bnxt_phy_qcaps_no_speed(resp)) {
9441 link_info->phy_state = BNXT_PHY_STATE_DISABLED;
9442 netdev_warn(bp->dev, "Ethernet link disabled\n");
9443 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
9444 link_info->phy_state = BNXT_PHY_STATE_ENABLED;
9445 netdev_info(bp->dev, "Ethernet link enabled\n");
9446 /* Phy re-enabled, reprobe the speeds */
9447 link_info->support_auto_speeds = 0;
9448 link_info->support_pam4_auto_speeds = 0;
9449 }
9450 }
9451 if (resp->supported_speeds_auto_mode)
9452 link_info->support_auto_speeds =
9453 le16_to_cpu(resp->supported_speeds_auto_mode);
9454 if (resp->supported_pam4_speeds_auto_mode)
9455 link_info->support_pam4_auto_speeds =
9456 le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
9457
9458 bp->port_count = resp->port_cnt;
9459
9460 hwrm_phy_qcaps_exit:
9461 mutex_unlock(&bp->hwrm_cmd_lock);
9462 return rc;
9463 }
9464
9465 static bool bnxt_support_dropped(u16 advertising, u16 supported)
9466 {
9467 u16 diff = advertising ^ supported;
9468
9469 return ((supported | diff) != supported);
9470 }
9471
9472 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
9473 {
9474 int rc = 0;
9475 struct bnxt_link_info *link_info = &bp->link_info;
9476 struct hwrm_port_phy_qcfg_input req = {0};
9477 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9478 u8 link_up = link_info->link_up;
9479 bool support_changed = false;
9480
9481 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
9482
9483 mutex_lock(&bp->hwrm_cmd_lock);
9484 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9485 if (rc) {
9486 mutex_unlock(&bp->hwrm_cmd_lock);
9487 return rc;
9488 }
9489
9490 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
9491 link_info->phy_link_status = resp->link;
9492 link_info->duplex = resp->duplex_cfg;
9493 if (bp->hwrm_spec_code >= 0x10800)
9494 link_info->duplex = resp->duplex_state;
9495 link_info->pause = resp->pause;
9496 link_info->auto_mode = resp->auto_mode;
9497 link_info->auto_pause_setting = resp->auto_pause;
9498 link_info->lp_pause = resp->link_partner_adv_pause;
9499 link_info->force_pause_setting = resp->force_pause;
9500 link_info->duplex_setting = resp->duplex_cfg;
9501 if (link_info->phy_link_status == BNXT_LINK_LINK)
9502 link_info->link_speed = le16_to_cpu(resp->link_speed);
9503 else
9504 link_info->link_speed = 0;
9505 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
9506 link_info->force_pam4_link_speed =
9507 le16_to_cpu(resp->force_pam4_link_speed);
9508 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
9509 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
9510 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
9511 link_info->auto_pam4_link_speeds =
9512 le16_to_cpu(resp->auto_pam4_link_speed_mask);
9513 link_info->lp_auto_link_speeds =
9514 le16_to_cpu(resp->link_partner_adv_speeds);
9515 link_info->lp_auto_pam4_link_speeds =
9516 resp->link_partner_pam4_adv_speeds;
9517 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
9518 link_info->phy_ver[0] = resp->phy_maj;
9519 link_info->phy_ver[1] = resp->phy_min;
9520 link_info->phy_ver[2] = resp->phy_bld;
9521 link_info->media_type = resp->media_type;
9522 link_info->phy_type = resp->phy_type;
9523 link_info->transceiver = resp->xcvr_pkg_type;
9524 link_info->phy_addr = resp->eee_config_phy_addr &
9525 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
9526 link_info->module_status = resp->module_status;
9527
9528 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
9529 struct ethtool_eee *eee = &bp->eee;
9530 u16 fw_speeds;
9531
9532 eee->eee_active = 0;
9533 if (resp->eee_config_phy_addr &
9534 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
9535 eee->eee_active = 1;
9536 fw_speeds = le16_to_cpu(
9537 resp->link_partner_adv_eee_link_speed_mask);
9538 eee->lp_advertised =
9539 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9540 }
9541
9542 /* Pull initial EEE config */
9543 if (!chng_link_state) {
9544 if (resp->eee_config_phy_addr &
9545 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
9546 eee->eee_enabled = 1;
9547
9548 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
9549 eee->advertised =
9550 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9551
9552 if (resp->eee_config_phy_addr &
9553 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
9554 __le32 tmr;
9555
9556 eee->tx_lpi_enabled = 1;
9557 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
9558 eee->tx_lpi_timer = le32_to_cpu(tmr) &
9559 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
9560 }
9561 }
9562 }
9563
9564 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
9565 if (bp->hwrm_spec_code >= 0x10504) {
9566 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
9567 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
9568 }
9569 /* TODO: need to add more logic to report VF link */
9570 if (chng_link_state) {
9571 if (link_info->phy_link_status == BNXT_LINK_LINK)
9572 link_info->link_up = 1;
9573 else
9574 link_info->link_up = 0;
9575 if (link_up != link_info->link_up)
9576 bnxt_report_link(bp);
9577 } else {
9578 /* alwasy link down if not require to update link state */
9579 link_info->link_up = 0;
9580 }
9581 mutex_unlock(&bp->hwrm_cmd_lock);
9582
9583 if (!BNXT_PHY_CFG_ABLE(bp))
9584 return 0;
9585
9586 /* Check if any advertised speeds are no longer supported. The caller
9587 * holds the link_lock mutex, so we can modify link_info settings.
9588 */
9589 if (bnxt_support_dropped(link_info->advertising,
9590 link_info->support_auto_speeds)) {
9591 link_info->advertising = link_info->support_auto_speeds;
9592 support_changed = true;
9593 }
9594 if (bnxt_support_dropped(link_info->advertising_pam4,
9595 link_info->support_pam4_auto_speeds)) {
9596 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
9597 support_changed = true;
9598 }
9599 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
9600 bnxt_hwrm_set_link_setting(bp, true, false);
9601 return 0;
9602 }
9603
9604 static void bnxt_get_port_module_status(struct bnxt *bp)
9605 {
9606 struct bnxt_link_info *link_info = &bp->link_info;
9607 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
9608 u8 module_status;
9609
9610 if (bnxt_update_link(bp, true))
9611 return;
9612
9613 module_status = link_info->module_status;
9614 switch (module_status) {
9615 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
9616 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
9617 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
9618 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9619 bp->pf.port_id);
9620 if (bp->hwrm_spec_code >= 0x10201) {
9621 netdev_warn(bp->dev, "Module part number %s\n",
9622 resp->phy_vendor_partnumber);
9623 }
9624 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
9625 netdev_warn(bp->dev, "TX is disabled\n");
9626 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
9627 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9628 }
9629 }
9630
9631 static void
9632 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9633 {
9634 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
9635 if (bp->hwrm_spec_code >= 0x10201)
9636 req->auto_pause =
9637 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
9638 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9639 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
9640 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9641 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
9642 req->enables |=
9643 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9644 } else {
9645 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9646 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
9647 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9648 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9649 req->enables |=
9650 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
9651 if (bp->hwrm_spec_code >= 0x10201) {
9652 req->auto_pause = req->force_pause;
9653 req->enables |= cpu_to_le32(
9654 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9655 }
9656 }
9657 }
9658
9659 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9660 {
9661 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9662 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9663 if (bp->link_info.advertising) {
9664 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9665 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9666 }
9667 if (bp->link_info.advertising_pam4) {
9668 req->enables |=
9669 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
9670 req->auto_link_pam4_speed_mask =
9671 cpu_to_le16(bp->link_info.advertising_pam4);
9672 }
9673 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
9674 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
9675 } else {
9676 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
9677 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9678 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9679 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
9680 } else {
9681 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9682 }
9683 }
9684
9685 /* tell chimp that the setting takes effect immediately */
9686 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9687 }
9688
9689 int bnxt_hwrm_set_pause(struct bnxt *bp)
9690 {
9691 struct hwrm_port_phy_cfg_input req = {0};
9692 int rc;
9693
9694 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9695 bnxt_hwrm_set_pause_common(bp, &req);
9696
9697 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9698 bp->link_info.force_link_chng)
9699 bnxt_hwrm_set_link_common(bp, &req);
9700
9701 mutex_lock(&bp->hwrm_cmd_lock);
9702 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9703 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9704 /* since changing of pause setting doesn't trigger any link
9705 * change event, the driver needs to update the current pause
9706 * result upon successfully return of the phy_cfg command
9707 */
9708 bp->link_info.pause =
9709 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9710 bp->link_info.auto_pause_setting = 0;
9711 if (!bp->link_info.force_link_chng)
9712 bnxt_report_link(bp);
9713 }
9714 bp->link_info.force_link_chng = false;
9715 mutex_unlock(&bp->hwrm_cmd_lock);
9716 return rc;
9717 }
9718
9719 static void bnxt_hwrm_set_eee(struct bnxt *bp,
9720 struct hwrm_port_phy_cfg_input *req)
9721 {
9722 struct ethtool_eee *eee = &bp->eee;
9723
9724 if (eee->eee_enabled) {
9725 u16 eee_speeds;
9726 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9727
9728 if (eee->tx_lpi_enabled)
9729 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9730 else
9731 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9732
9733 req->flags |= cpu_to_le32(flags);
9734 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9735 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9736 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9737 } else {
9738 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9739 }
9740 }
9741
9742 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
9743 {
9744 struct hwrm_port_phy_cfg_input req = {0};
9745
9746 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9747 if (set_pause)
9748 bnxt_hwrm_set_pause_common(bp, &req);
9749
9750 bnxt_hwrm_set_link_common(bp, &req);
9751
9752 if (set_eee)
9753 bnxt_hwrm_set_eee(bp, &req);
9754 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9755 }
9756
9757 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9758 {
9759 struct hwrm_port_phy_cfg_input req = {0};
9760
9761 if (!BNXT_SINGLE_PF(bp))
9762 return 0;
9763
9764 if (pci_num_vf(bp->pdev) &&
9765 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
9766 return 0;
9767
9768 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9769 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
9770 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9771 }
9772
9773 static int bnxt_fw_init_one(struct bnxt *bp);
9774
9775 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
9776 {
9777 #ifdef CONFIG_TEE_BNXT_FW
9778 int rc = tee_bnxt_fw_load();
9779
9780 if (rc)
9781 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
9782
9783 return rc;
9784 #else
9785 netdev_err(bp->dev, "OP-TEE not supported\n");
9786 return -ENODEV;
9787 #endif
9788 }
9789
9790 static int bnxt_try_recover_fw(struct bnxt *bp)
9791 {
9792 if (bp->fw_health && bp->fw_health->status_reliable) {
9793 int retry = 0, rc;
9794 u32 sts;
9795
9796 mutex_lock(&bp->hwrm_cmd_lock);
9797 do {
9798 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
9799 rc = bnxt_hwrm_poll(bp);
9800 if (!BNXT_FW_IS_BOOTING(sts) &&
9801 !BNXT_FW_IS_RECOVERING(sts))
9802 break;
9803 retry++;
9804 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
9805 mutex_unlock(&bp->hwrm_cmd_lock);
9806
9807 if (!BNXT_FW_IS_HEALTHY(sts)) {
9808 netdev_err(bp->dev,
9809 "Firmware not responding, status: 0x%x\n",
9810 sts);
9811 rc = -ENODEV;
9812 }
9813 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
9814 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
9815 return bnxt_fw_reset_via_optee(bp);
9816 }
9817 return rc;
9818 }
9819
9820 return -ENODEV;
9821 }
9822
9823 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
9824 {
9825 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
9826 struct hwrm_func_drv_if_change_input req = {0};
9827 bool fw_reset = !bp->irq_tbl;
9828 bool resc_reinit = false;
9829 int rc, retry = 0;
9830 u32 flags = 0;
9831
9832 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
9833 return 0;
9834
9835 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
9836 if (up)
9837 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
9838 mutex_lock(&bp->hwrm_cmd_lock);
9839 while (retry < BNXT_FW_IF_RETRY) {
9840 rc = _hwrm_send_message(bp, &req, sizeof(req),
9841 HWRM_CMD_TIMEOUT);
9842 if (rc != -EAGAIN)
9843 break;
9844
9845 msleep(50);
9846 retry++;
9847 }
9848 if (!rc)
9849 flags = le32_to_cpu(resp->flags);
9850 mutex_unlock(&bp->hwrm_cmd_lock);
9851
9852 if (rc == -EAGAIN)
9853 return rc;
9854 if (rc && up) {
9855 rc = bnxt_try_recover_fw(bp);
9856 fw_reset = true;
9857 }
9858 if (rc)
9859 return rc;
9860
9861 if (!up) {
9862 bnxt_inv_fw_health_reg(bp);
9863 return 0;
9864 }
9865
9866 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
9867 resc_reinit = true;
9868 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
9869 fw_reset = true;
9870 else if (bp->fw_health && !bp->fw_health->status_reliable)
9871 bnxt_try_map_fw_health_reg(bp);
9872
9873 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
9874 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
9875 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9876 return -ENODEV;
9877 }
9878 if (resc_reinit || fw_reset) {
9879 if (fw_reset) {
9880 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9881 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9882 bnxt_ulp_stop(bp);
9883 bnxt_free_ctx_mem(bp);
9884 kfree(bp->ctx);
9885 bp->ctx = NULL;
9886 bnxt_dcb_free(bp);
9887 rc = bnxt_fw_init_one(bp);
9888 if (rc) {
9889 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9890 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9891 return rc;
9892 }
9893 bnxt_clear_int_mode(bp);
9894 rc = bnxt_init_int_mode(bp);
9895 if (rc) {
9896 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9897 netdev_err(bp->dev, "init int mode failed\n");
9898 return rc;
9899 }
9900 }
9901 if (BNXT_NEW_RM(bp)) {
9902 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9903
9904 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9905 if (rc)
9906 netdev_err(bp->dev, "resc_qcaps failed\n");
9907
9908 hw_resc->resv_cp_rings = 0;
9909 hw_resc->resv_stat_ctxs = 0;
9910 hw_resc->resv_irqs = 0;
9911 hw_resc->resv_tx_rings = 0;
9912 hw_resc->resv_rx_rings = 0;
9913 hw_resc->resv_hw_ring_grps = 0;
9914 hw_resc->resv_vnics = 0;
9915 if (!fw_reset) {
9916 bp->tx_nr_rings = 0;
9917 bp->rx_nr_rings = 0;
9918 }
9919 }
9920 }
9921 return rc;
9922 }
9923
9924 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
9925 {
9926 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
9927 struct hwrm_port_led_qcaps_input req = {0};
9928 struct bnxt_pf_info *pf = &bp->pf;
9929 int rc;
9930
9931 bp->num_leds = 0;
9932 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
9933 return 0;
9934
9935 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
9936 req.port_id = cpu_to_le16(pf->port_id);
9937 mutex_lock(&bp->hwrm_cmd_lock);
9938 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9939 if (rc) {
9940 mutex_unlock(&bp->hwrm_cmd_lock);
9941 return rc;
9942 }
9943 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
9944 int i;
9945
9946 bp->num_leds = resp->num_leds;
9947 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
9948 bp->num_leds);
9949 for (i = 0; i < bp->num_leds; i++) {
9950 struct bnxt_led_info *led = &bp->leds[i];
9951 __le16 caps = led->led_state_caps;
9952
9953 if (!led->led_group_id ||
9954 !BNXT_LED_ALT_BLINK_CAP(caps)) {
9955 bp->num_leds = 0;
9956 break;
9957 }
9958 }
9959 }
9960 mutex_unlock(&bp->hwrm_cmd_lock);
9961 return 0;
9962 }
9963
9964 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
9965 {
9966 struct hwrm_wol_filter_alloc_input req = {0};
9967 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
9968 int rc;
9969
9970 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
9971 req.port_id = cpu_to_le16(bp->pf.port_id);
9972 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
9973 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
9974 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
9975 mutex_lock(&bp->hwrm_cmd_lock);
9976 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9977 if (!rc)
9978 bp->wol_filter_id = resp->wol_filter_id;
9979 mutex_unlock(&bp->hwrm_cmd_lock);
9980 return rc;
9981 }
9982
9983 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
9984 {
9985 struct hwrm_wol_filter_free_input req = {0};
9986
9987 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
9988 req.port_id = cpu_to_le16(bp->pf.port_id);
9989 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
9990 req.wol_filter_id = bp->wol_filter_id;
9991 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9992 }
9993
9994 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
9995 {
9996 struct hwrm_wol_filter_qcfg_input req = {0};
9997 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9998 u16 next_handle = 0;
9999 int rc;
10000
10001 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
10002 req.port_id = cpu_to_le16(bp->pf.port_id);
10003 req.handle = cpu_to_le16(handle);
10004 mutex_lock(&bp->hwrm_cmd_lock);
10005 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10006 if (!rc) {
10007 next_handle = le16_to_cpu(resp->next_handle);
10008 if (next_handle != 0) {
10009 if (resp->wol_type ==
10010 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
10011 bp->wol = 1;
10012 bp->wol_filter_id = resp->wol_filter_id;
10013 }
10014 }
10015 }
10016 mutex_unlock(&bp->hwrm_cmd_lock);
10017 return next_handle;
10018 }
10019
10020 static void bnxt_get_wol_settings(struct bnxt *bp)
10021 {
10022 u16 handle = 0;
10023
10024 bp->wol = 0;
10025 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
10026 return;
10027
10028 do {
10029 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
10030 } while (handle && handle != 0xffff);
10031 }
10032
10033 #ifdef CONFIG_BNXT_HWMON
10034 static ssize_t bnxt_show_temp(struct device *dev,
10035 struct device_attribute *devattr, char *buf)
10036 {
10037 struct hwrm_temp_monitor_query_input req = {0};
10038 struct hwrm_temp_monitor_query_output *resp;
10039 struct bnxt *bp = dev_get_drvdata(dev);
10040 u32 len = 0;
10041 int rc;
10042
10043 resp = bp->hwrm_cmd_resp_addr;
10044 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
10045 mutex_lock(&bp->hwrm_cmd_lock);
10046 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10047 if (!rc)
10048 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
10049 mutex_unlock(&bp->hwrm_cmd_lock);
10050 if (rc)
10051 return rc;
10052 return len;
10053 }
10054 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
10055
10056 static struct attribute *bnxt_attrs[] = {
10057 &sensor_dev_attr_temp1_input.dev_attr.attr,
10058 NULL
10059 };
10060 ATTRIBUTE_GROUPS(bnxt);
10061
10062 static void bnxt_hwmon_close(struct bnxt *bp)
10063 {
10064 if (bp->hwmon_dev) {
10065 hwmon_device_unregister(bp->hwmon_dev);
10066 bp->hwmon_dev = NULL;
10067 }
10068 }
10069
10070 static void bnxt_hwmon_open(struct bnxt *bp)
10071 {
10072 struct hwrm_temp_monitor_query_input req = {0};
10073 struct pci_dev *pdev = bp->pdev;
10074 int rc;
10075
10076 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
10077 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10078 if (rc == -EACCES || rc == -EOPNOTSUPP) {
10079 bnxt_hwmon_close(bp);
10080 return;
10081 }
10082
10083 if (bp->hwmon_dev)
10084 return;
10085
10086 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
10087 DRV_MODULE_NAME, bp,
10088 bnxt_groups);
10089 if (IS_ERR(bp->hwmon_dev)) {
10090 bp->hwmon_dev = NULL;
10091 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
10092 }
10093 }
10094 #else
10095 static void bnxt_hwmon_close(struct bnxt *bp)
10096 {
10097 }
10098
10099 static void bnxt_hwmon_open(struct bnxt *bp)
10100 {
10101 }
10102 #endif
10103
10104 static bool bnxt_eee_config_ok(struct bnxt *bp)
10105 {
10106 struct ethtool_eee *eee = &bp->eee;
10107 struct bnxt_link_info *link_info = &bp->link_info;
10108
10109 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
10110 return true;
10111
10112 if (eee->eee_enabled) {
10113 u32 advertising =
10114 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
10115
10116 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10117 eee->eee_enabled = 0;
10118 return false;
10119 }
10120 if (eee->advertised & ~advertising) {
10121 eee->advertised = advertising & eee->supported;
10122 return false;
10123 }
10124 }
10125 return true;
10126 }
10127
10128 static int bnxt_update_phy_setting(struct bnxt *bp)
10129 {
10130 int rc;
10131 bool update_link = false;
10132 bool update_pause = false;
10133 bool update_eee = false;
10134 struct bnxt_link_info *link_info = &bp->link_info;
10135
10136 rc = bnxt_update_link(bp, true);
10137 if (rc) {
10138 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
10139 rc);
10140 return rc;
10141 }
10142 if (!BNXT_SINGLE_PF(bp))
10143 return 0;
10144
10145 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10146 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
10147 link_info->req_flow_ctrl)
10148 update_pause = true;
10149 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10150 link_info->force_pause_setting != link_info->req_flow_ctrl)
10151 update_pause = true;
10152 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10153 if (BNXT_AUTO_MODE(link_info->auto_mode))
10154 update_link = true;
10155 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
10156 link_info->req_link_speed != link_info->force_link_speed)
10157 update_link = true;
10158 else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
10159 link_info->req_link_speed != link_info->force_pam4_link_speed)
10160 update_link = true;
10161 if (link_info->req_duplex != link_info->duplex_setting)
10162 update_link = true;
10163 } else {
10164 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
10165 update_link = true;
10166 if (link_info->advertising != link_info->auto_link_speeds ||
10167 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
10168 update_link = true;
10169 }
10170
10171 /* The last close may have shutdown the link, so need to call
10172 * PHY_CFG to bring it back up.
10173 */
10174 if (!bp->link_info.link_up)
10175 update_link = true;
10176
10177 if (!bnxt_eee_config_ok(bp))
10178 update_eee = true;
10179
10180 if (update_link)
10181 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
10182 else if (update_pause)
10183 rc = bnxt_hwrm_set_pause(bp);
10184 if (rc) {
10185 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
10186 rc);
10187 return rc;
10188 }
10189
10190 return rc;
10191 }
10192
10193 /* Common routine to pre-map certain register block to different GRC window.
10194 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
10195 * in PF and 3 windows in VF that can be customized to map in different
10196 * register blocks.
10197 */
10198 static void bnxt_preset_reg_win(struct bnxt *bp)
10199 {
10200 if (BNXT_PF(bp)) {
10201 /* CAG registers map to GRC window #4 */
10202 writel(BNXT_CAG_REG_BASE,
10203 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
10204 }
10205 }
10206
10207 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
10208
10209 static int bnxt_reinit_after_abort(struct bnxt *bp)
10210 {
10211 int rc;
10212
10213 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10214 return -EBUSY;
10215
10216 if (bp->dev->reg_state == NETREG_UNREGISTERED)
10217 return -ENODEV;
10218
10219 rc = bnxt_fw_init_one(bp);
10220 if (!rc) {
10221 bnxt_clear_int_mode(bp);
10222 rc = bnxt_init_int_mode(bp);
10223 if (!rc) {
10224 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10225 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10226 }
10227 }
10228 return rc;
10229 }
10230
10231 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10232 {
10233 int rc = 0;
10234
10235 bnxt_preset_reg_win(bp);
10236 netif_carrier_off(bp->dev);
10237 if (irq_re_init) {
10238 /* Reserve rings now if none were reserved at driver probe. */
10239 rc = bnxt_init_dflt_ring_mode(bp);
10240 if (rc) {
10241 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
10242 return rc;
10243 }
10244 }
10245 rc = bnxt_reserve_rings(bp, irq_re_init);
10246 if (rc)
10247 return rc;
10248 if ((bp->flags & BNXT_FLAG_RFS) &&
10249 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
10250 /* disable RFS if falling back to INTA */
10251 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
10252 bp->flags &= ~BNXT_FLAG_RFS;
10253 }
10254
10255 rc = bnxt_alloc_mem(bp, irq_re_init);
10256 if (rc) {
10257 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10258 goto open_err_free_mem;
10259 }
10260
10261 if (irq_re_init) {
10262 bnxt_init_napi(bp);
10263 rc = bnxt_request_irq(bp);
10264 if (rc) {
10265 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
10266 goto open_err_irq;
10267 }
10268 }
10269
10270 rc = bnxt_init_nic(bp, irq_re_init);
10271 if (rc) {
10272 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10273 goto open_err_irq;
10274 }
10275
10276 bnxt_enable_napi(bp);
10277 bnxt_debug_dev_init(bp);
10278
10279 if (link_re_init) {
10280 mutex_lock(&bp->link_lock);
10281 rc = bnxt_update_phy_setting(bp);
10282 mutex_unlock(&bp->link_lock);
10283 if (rc) {
10284 netdev_warn(bp->dev, "failed to update phy settings\n");
10285 if (BNXT_SINGLE_PF(bp)) {
10286 bp->link_info.phy_retry = true;
10287 bp->link_info.phy_retry_expires =
10288 jiffies + 5 * HZ;
10289 }
10290 }
10291 }
10292
10293 if (irq_re_init)
10294 udp_tunnel_nic_reset_ntf(bp->dev);
10295
10296 set_bit(BNXT_STATE_OPEN, &bp->state);
10297 bnxt_enable_int(bp);
10298 /* Enable TX queues */
10299 bnxt_tx_enable(bp);
10300 mod_timer(&bp->timer, jiffies + bp->current_interval);
10301 /* Poll link status and check for SFP+ module status */
10302 bnxt_get_port_module_status(bp);
10303
10304 /* VF-reps may need to be re-opened after the PF is re-opened */
10305 if (BNXT_PF(bp))
10306 bnxt_vf_reps_open(bp);
10307 return 0;
10308
10309 open_err_irq:
10310 bnxt_del_napi(bp);
10311
10312 open_err_free_mem:
10313 bnxt_free_skbs(bp);
10314 bnxt_free_irq(bp);
10315 bnxt_free_mem(bp, true);
10316 return rc;
10317 }
10318
10319 /* rtnl_lock held */
10320 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10321 {
10322 int rc = 0;
10323
10324 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
10325 rc = -EIO;
10326 if (!rc)
10327 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
10328 if (rc) {
10329 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
10330 dev_close(bp->dev);
10331 }
10332 return rc;
10333 }
10334
10335 /* rtnl_lock held, open the NIC half way by allocating all resources, but
10336 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
10337 * self tests.
10338 */
10339 int bnxt_half_open_nic(struct bnxt *bp)
10340 {
10341 int rc = 0;
10342
10343 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10344 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
10345 rc = -ENODEV;
10346 goto half_open_err;
10347 }
10348
10349 rc = bnxt_alloc_mem(bp, false);
10350 if (rc) {
10351 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10352 goto half_open_err;
10353 }
10354 rc = bnxt_init_nic(bp, false);
10355 if (rc) {
10356 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10357 goto half_open_err;
10358 }
10359 return 0;
10360
10361 half_open_err:
10362 bnxt_free_skbs(bp);
10363 bnxt_free_mem(bp, false);
10364 dev_close(bp->dev);
10365 return rc;
10366 }
10367
10368 /* rtnl_lock held, this call can only be made after a previous successful
10369 * call to bnxt_half_open_nic().
10370 */
10371 void bnxt_half_close_nic(struct bnxt *bp)
10372 {
10373 bnxt_hwrm_resource_free(bp, false, false);
10374 bnxt_free_skbs(bp);
10375 bnxt_free_mem(bp, false);
10376 }
10377
10378 static void bnxt_reenable_sriov(struct bnxt *bp)
10379 {
10380 if (BNXT_PF(bp)) {
10381 struct bnxt_pf_info *pf = &bp->pf;
10382 int n = pf->active_vfs;
10383
10384 if (n)
10385 bnxt_cfg_hw_sriov(bp, &n, true);
10386 }
10387 }
10388
10389 static int bnxt_open(struct net_device *dev)
10390 {
10391 struct bnxt *bp = netdev_priv(dev);
10392 int rc;
10393
10394 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10395 rc = bnxt_reinit_after_abort(bp);
10396 if (rc) {
10397 if (rc == -EBUSY)
10398 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
10399 else
10400 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
10401 return -ENODEV;
10402 }
10403 }
10404
10405 rc = bnxt_hwrm_if_change(bp, true);
10406 if (rc)
10407 return rc;
10408
10409 rc = __bnxt_open_nic(bp, true, true);
10410 if (rc) {
10411 bnxt_hwrm_if_change(bp, false);
10412 } else {
10413 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
10414 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10415 bnxt_ulp_start(bp, 0);
10416 bnxt_reenable_sriov(bp);
10417 }
10418 }
10419 bnxt_hwmon_open(bp);
10420 }
10421
10422 return rc;
10423 }
10424
10425 static bool bnxt_drv_busy(struct bnxt *bp)
10426 {
10427 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
10428 test_bit(BNXT_STATE_READ_STATS, &bp->state));
10429 }
10430
10431 static void bnxt_get_ring_stats(struct bnxt *bp,
10432 struct rtnl_link_stats64 *stats);
10433
10434 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
10435 bool link_re_init)
10436 {
10437 /* Close the VF-reps before closing PF */
10438 if (BNXT_PF(bp))
10439 bnxt_vf_reps_close(bp);
10440
10441 /* Change device state to avoid TX queue wake up's */
10442 bnxt_tx_disable(bp);
10443
10444 clear_bit(BNXT_STATE_OPEN, &bp->state);
10445 smp_mb__after_atomic();
10446 while (bnxt_drv_busy(bp))
10447 msleep(20);
10448
10449 /* Flush rings and and disable interrupts */
10450 bnxt_shutdown_nic(bp, irq_re_init);
10451
10452 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
10453
10454 bnxt_debug_dev_exit(bp);
10455 bnxt_disable_napi(bp);
10456 del_timer_sync(&bp->timer);
10457 bnxt_free_skbs(bp);
10458
10459 /* Save ring stats before shutdown */
10460 if (bp->bnapi && irq_re_init)
10461 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
10462 if (irq_re_init) {
10463 bnxt_free_irq(bp);
10464 bnxt_del_napi(bp);
10465 }
10466 bnxt_free_mem(bp, irq_re_init);
10467 }
10468
10469 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10470 {
10471 int rc = 0;
10472
10473 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10474 /* If we get here, it means firmware reset is in progress
10475 * while we are trying to close. We can safely proceed with
10476 * the close because we are holding rtnl_lock(). Some firmware
10477 * messages may fail as we proceed to close. We set the
10478 * ABORT_ERR flag here so that the FW reset thread will later
10479 * abort when it gets the rtnl_lock() and sees the flag.
10480 */
10481 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10482 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10483 }
10484
10485 #ifdef CONFIG_BNXT_SRIOV
10486 if (bp->sriov_cfg) {
10487 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10488 !bp->sriov_cfg,
10489 BNXT_SRIOV_CFG_WAIT_TMO);
10490 if (rc)
10491 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
10492 }
10493 #endif
10494 __bnxt_close_nic(bp, irq_re_init, link_re_init);
10495 return rc;
10496 }
10497
10498 static int bnxt_close(struct net_device *dev)
10499 {
10500 struct bnxt *bp = netdev_priv(dev);
10501
10502 bnxt_hwmon_close(bp);
10503 bnxt_close_nic(bp, true, true);
10504 bnxt_hwrm_shutdown_link(bp);
10505 bnxt_hwrm_if_change(bp, false);
10506 return 0;
10507 }
10508
10509 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10510 u16 *val)
10511 {
10512 struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
10513 struct hwrm_port_phy_mdio_read_input req = {0};
10514 int rc;
10515
10516 if (bp->hwrm_spec_code < 0x10a00)
10517 return -EOPNOTSUPP;
10518
10519 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
10520 req.port_id = cpu_to_le16(bp->pf.port_id);
10521 req.phy_addr = phy_addr;
10522 req.reg_addr = cpu_to_le16(reg & 0x1f);
10523 if (mdio_phy_id_is_c45(phy_addr)) {
10524 req.cl45_mdio = 1;
10525 req.phy_addr = mdio_phy_id_prtad(phy_addr);
10526 req.dev_addr = mdio_phy_id_devad(phy_addr);
10527 req.reg_addr = cpu_to_le16(reg);
10528 }
10529
10530 mutex_lock(&bp->hwrm_cmd_lock);
10531 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10532 if (!rc)
10533 *val = le16_to_cpu(resp->reg_data);
10534 mutex_unlock(&bp->hwrm_cmd_lock);
10535 return rc;
10536 }
10537
10538 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10539 u16 val)
10540 {
10541 struct hwrm_port_phy_mdio_write_input req = {0};
10542
10543 if (bp->hwrm_spec_code < 0x10a00)
10544 return -EOPNOTSUPP;
10545
10546 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
10547 req.port_id = cpu_to_le16(bp->pf.port_id);
10548 req.phy_addr = phy_addr;
10549 req.reg_addr = cpu_to_le16(reg & 0x1f);
10550 if (mdio_phy_id_is_c45(phy_addr)) {
10551 req.cl45_mdio = 1;
10552 req.phy_addr = mdio_phy_id_prtad(phy_addr);
10553 req.dev_addr = mdio_phy_id_devad(phy_addr);
10554 req.reg_addr = cpu_to_le16(reg);
10555 }
10556 req.reg_data = cpu_to_le16(val);
10557
10558 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10559 }
10560
10561 /* rtnl_lock held */
10562 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10563 {
10564 struct mii_ioctl_data *mdio = if_mii(ifr);
10565 struct bnxt *bp = netdev_priv(dev);
10566 int rc;
10567
10568 switch (cmd) {
10569 case SIOCGMIIPHY:
10570 mdio->phy_id = bp->link_info.phy_addr;
10571
10572 fallthrough;
10573 case SIOCGMIIREG: {
10574 u16 mii_regval = 0;
10575
10576 if (!netif_running(dev))
10577 return -EAGAIN;
10578
10579 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10580 &mii_regval);
10581 mdio->val_out = mii_regval;
10582 return rc;
10583 }
10584
10585 case SIOCSMIIREG:
10586 if (!netif_running(dev))
10587 return -EAGAIN;
10588
10589 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10590 mdio->val_in);
10591
10592 case SIOCSHWTSTAMP:
10593 return bnxt_hwtstamp_set(dev, ifr);
10594
10595 case SIOCGHWTSTAMP:
10596 return bnxt_hwtstamp_get(dev, ifr);
10597
10598 default:
10599 /* do nothing */
10600 break;
10601 }
10602 return -EOPNOTSUPP;
10603 }
10604
10605 static void bnxt_get_ring_stats(struct bnxt *bp,
10606 struct rtnl_link_stats64 *stats)
10607 {
10608 int i;
10609
10610 for (i = 0; i < bp->cp_nr_rings; i++) {
10611 struct bnxt_napi *bnapi = bp->bnapi[i];
10612 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10613 u64 *sw = cpr->stats.sw_stats;
10614
10615 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
10616 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10617 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
10618
10619 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
10620 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
10621 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
10622
10623 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
10624 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
10625 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
10626
10627 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
10628 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
10629 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
10630
10631 stats->rx_missed_errors +=
10632 BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
10633
10634 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10635
10636 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
10637
10638 stats->rx_dropped +=
10639 cpr->sw_stats.rx.rx_netpoll_discards +
10640 cpr->sw_stats.rx.rx_oom_discards;
10641 }
10642 }
10643
10644 static void bnxt_add_prev_stats(struct bnxt *bp,
10645 struct rtnl_link_stats64 *stats)
10646 {
10647 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10648
10649 stats->rx_packets += prev_stats->rx_packets;
10650 stats->tx_packets += prev_stats->tx_packets;
10651 stats->rx_bytes += prev_stats->rx_bytes;
10652 stats->tx_bytes += prev_stats->tx_bytes;
10653 stats->rx_missed_errors += prev_stats->rx_missed_errors;
10654 stats->multicast += prev_stats->multicast;
10655 stats->rx_dropped += prev_stats->rx_dropped;
10656 stats->tx_dropped += prev_stats->tx_dropped;
10657 }
10658
10659 static void
10660 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
10661 {
10662 struct bnxt *bp = netdev_priv(dev);
10663
10664 set_bit(BNXT_STATE_READ_STATS, &bp->state);
10665 /* Make sure bnxt_close_nic() sees that we are reading stats before
10666 * we check the BNXT_STATE_OPEN flag.
10667 */
10668 smp_mb__after_atomic();
10669 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10670 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10671 *stats = bp->net_stats_prev;
10672 return;
10673 }
10674
10675 bnxt_get_ring_stats(bp, stats);
10676 bnxt_add_prev_stats(bp, stats);
10677
10678 if (bp->flags & BNXT_FLAG_PORT_STATS) {
10679 u64 *rx = bp->port_stats.sw_stats;
10680 u64 *tx = bp->port_stats.sw_stats +
10681 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10682
10683 stats->rx_crc_errors =
10684 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
10685 stats->rx_frame_errors =
10686 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
10687 stats->rx_length_errors =
10688 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
10689 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
10690 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
10691 stats->rx_errors =
10692 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
10693 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
10694 stats->collisions =
10695 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
10696 stats->tx_fifo_errors =
10697 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
10698 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
10699 }
10700 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10701 }
10702
10703 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10704 {
10705 struct net_device *dev = bp->dev;
10706 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10707 struct netdev_hw_addr *ha;
10708 u8 *haddr;
10709 int mc_count = 0;
10710 bool update = false;
10711 int off = 0;
10712
10713 netdev_for_each_mc_addr(ha, dev) {
10714 if (mc_count >= BNXT_MAX_MC_ADDRS) {
10715 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10716 vnic->mc_list_count = 0;
10717 return false;
10718 }
10719 haddr = ha->addr;
10720 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
10721 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
10722 update = true;
10723 }
10724 off += ETH_ALEN;
10725 mc_count++;
10726 }
10727 if (mc_count)
10728 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
10729
10730 if (mc_count != vnic->mc_list_count) {
10731 vnic->mc_list_count = mc_count;
10732 update = true;
10733 }
10734 return update;
10735 }
10736
10737 static bool bnxt_uc_list_updated(struct bnxt *bp)
10738 {
10739 struct net_device *dev = bp->dev;
10740 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10741 struct netdev_hw_addr *ha;
10742 int off = 0;
10743
10744 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
10745 return true;
10746
10747 netdev_for_each_uc_addr(ha, dev) {
10748 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
10749 return true;
10750
10751 off += ETH_ALEN;
10752 }
10753 return false;
10754 }
10755
10756 static void bnxt_set_rx_mode(struct net_device *dev)
10757 {
10758 struct bnxt *bp = netdev_priv(dev);
10759 struct bnxt_vnic_info *vnic;
10760 bool mc_update = false;
10761 bool uc_update;
10762 u32 mask;
10763
10764 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
10765 return;
10766
10767 vnic = &bp->vnic_info[0];
10768 mask = vnic->rx_mask;
10769 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
10770 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
10771 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
10772 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
10773
10774 if (dev->flags & IFF_PROMISC)
10775 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10776
10777 uc_update = bnxt_uc_list_updated(bp);
10778
10779 if (dev->flags & IFF_BROADCAST)
10780 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
10781 if (dev->flags & IFF_ALLMULTI) {
10782 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10783 vnic->mc_list_count = 0;
10784 } else {
10785 mc_update = bnxt_mc_list_updated(bp, &mask);
10786 }
10787
10788 if (mask != vnic->rx_mask || uc_update || mc_update) {
10789 vnic->rx_mask = mask;
10790
10791 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
10792 bnxt_queue_sp_work(bp);
10793 }
10794 }
10795
10796 static int bnxt_cfg_rx_mode(struct bnxt *bp)
10797 {
10798 struct net_device *dev = bp->dev;
10799 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10800 struct netdev_hw_addr *ha;
10801 int i, off = 0, rc;
10802 bool uc_update;
10803
10804 netif_addr_lock_bh(dev);
10805 uc_update = bnxt_uc_list_updated(bp);
10806 netif_addr_unlock_bh(dev);
10807
10808 if (!uc_update)
10809 goto skip_uc;
10810
10811 mutex_lock(&bp->hwrm_cmd_lock);
10812 for (i = 1; i < vnic->uc_filter_count; i++) {
10813 struct hwrm_cfa_l2_filter_free_input req = {0};
10814
10815 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
10816 -1);
10817
10818 req.l2_filter_id = vnic->fw_l2_filter_id[i];
10819
10820 rc = _hwrm_send_message(bp, &req, sizeof(req),
10821 HWRM_CMD_TIMEOUT);
10822 }
10823 mutex_unlock(&bp->hwrm_cmd_lock);
10824
10825 vnic->uc_filter_count = 1;
10826
10827 netif_addr_lock_bh(dev);
10828 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
10829 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10830 } else {
10831 netdev_for_each_uc_addr(ha, dev) {
10832 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
10833 off += ETH_ALEN;
10834 vnic->uc_filter_count++;
10835 }
10836 }
10837 netif_addr_unlock_bh(dev);
10838
10839 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
10840 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
10841 if (rc) {
10842 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
10843 rc);
10844 vnic->uc_filter_count = i;
10845 return rc;
10846 }
10847 }
10848
10849 skip_uc:
10850 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
10851 !bnxt_promisc_ok(bp))
10852 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10853 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10854 if (rc && vnic->mc_list_count) {
10855 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
10856 rc);
10857 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10858 vnic->mc_list_count = 0;
10859 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10860 }
10861 if (rc)
10862 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
10863 rc);
10864
10865 return rc;
10866 }
10867
10868 static bool bnxt_can_reserve_rings(struct bnxt *bp)
10869 {
10870 #ifdef CONFIG_BNXT_SRIOV
10871 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
10872 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10873
10874 /* No minimum rings were provisioned by the PF. Don't
10875 * reserve rings by default when device is down.
10876 */
10877 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
10878 return true;
10879
10880 if (!netif_running(bp->dev))
10881 return false;
10882 }
10883 #endif
10884 return true;
10885 }
10886
10887 /* If the chip and firmware supports RFS */
10888 static bool bnxt_rfs_supported(struct bnxt *bp)
10889 {
10890 if (bp->flags & BNXT_FLAG_CHIP_P5) {
10891 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
10892 return true;
10893 return false;
10894 }
10895 /* 212 firmware is broken for aRFS */
10896 if (BNXT_FW_MAJ(bp) == 212)
10897 return false;
10898 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
10899 return true;
10900 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10901 return true;
10902 return false;
10903 }
10904
10905 /* If runtime conditions support RFS */
10906 static bool bnxt_rfs_capable(struct bnxt *bp)
10907 {
10908 #ifdef CONFIG_RFS_ACCEL
10909 int vnics, max_vnics, max_rss_ctxs;
10910
10911 if (bp->flags & BNXT_FLAG_CHIP_P5)
10912 return bnxt_rfs_supported(bp);
10913 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
10914 return false;
10915
10916 vnics = 1 + bp->rx_nr_rings;
10917 max_vnics = bnxt_get_max_func_vnics(bp);
10918 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
10919
10920 /* RSS contexts not a limiting factor */
10921 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10922 max_rss_ctxs = max_vnics;
10923 if (vnics > max_vnics || vnics > max_rss_ctxs) {
10924 if (bp->rx_nr_rings > 1)
10925 netdev_warn(bp->dev,
10926 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
10927 min(max_rss_ctxs - 1, max_vnics - 1));
10928 return false;
10929 }
10930
10931 if (!BNXT_NEW_RM(bp))
10932 return true;
10933
10934 if (vnics == bp->hw_resc.resv_vnics)
10935 return true;
10936
10937 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
10938 if (vnics <= bp->hw_resc.resv_vnics)
10939 return true;
10940
10941 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
10942 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
10943 return false;
10944 #else
10945 return false;
10946 #endif
10947 }
10948
10949 static netdev_features_t bnxt_fix_features(struct net_device *dev,
10950 netdev_features_t features)
10951 {
10952 struct bnxt *bp = netdev_priv(dev);
10953 netdev_features_t vlan_features;
10954
10955 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
10956 features &= ~NETIF_F_NTUPLE;
10957
10958 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10959 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10960
10961 if (!(features & NETIF_F_GRO))
10962 features &= ~NETIF_F_GRO_HW;
10963
10964 if (features & NETIF_F_GRO_HW)
10965 features &= ~NETIF_F_LRO;
10966
10967 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
10968 * turned on or off together.
10969 */
10970 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
10971 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
10972 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10973 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10974 else if (vlan_features)
10975 features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
10976 }
10977 #ifdef CONFIG_BNXT_SRIOV
10978 if (BNXT_VF(bp) && bp->vf.vlan)
10979 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10980 #endif
10981 return features;
10982 }
10983
10984 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
10985 {
10986 struct bnxt *bp = netdev_priv(dev);
10987 u32 flags = bp->flags;
10988 u32 changes;
10989 int rc = 0;
10990 bool re_init = false;
10991 bool update_tpa = false;
10992
10993 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
10994 if (features & NETIF_F_GRO_HW)
10995 flags |= BNXT_FLAG_GRO;
10996 else if (features & NETIF_F_LRO)
10997 flags |= BNXT_FLAG_LRO;
10998
10999 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
11000 flags &= ~BNXT_FLAG_TPA;
11001
11002 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
11003 flags |= BNXT_FLAG_STRIP_VLAN;
11004
11005 if (features & NETIF_F_NTUPLE)
11006 flags |= BNXT_FLAG_RFS;
11007
11008 changes = flags ^ bp->flags;
11009 if (changes & BNXT_FLAG_TPA) {
11010 update_tpa = true;
11011 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
11012 (flags & BNXT_FLAG_TPA) == 0 ||
11013 (bp->flags & BNXT_FLAG_CHIP_P5))
11014 re_init = true;
11015 }
11016
11017 if (changes & ~BNXT_FLAG_TPA)
11018 re_init = true;
11019
11020 if (flags != bp->flags) {
11021 u32 old_flags = bp->flags;
11022
11023 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11024 bp->flags = flags;
11025 if (update_tpa)
11026 bnxt_set_ring_params(bp);
11027 return rc;
11028 }
11029
11030 if (re_init) {
11031 bnxt_close_nic(bp, false, false);
11032 bp->flags = flags;
11033 if (update_tpa)
11034 bnxt_set_ring_params(bp);
11035
11036 return bnxt_open_nic(bp, false, false);
11037 }
11038 if (update_tpa) {
11039 bp->flags = flags;
11040 rc = bnxt_set_tpa(bp,
11041 (flags & BNXT_FLAG_TPA) ?
11042 true : false);
11043 if (rc)
11044 bp->flags = old_flags;
11045 }
11046 }
11047 return rc;
11048 }
11049
11050 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
11051 u8 **nextp)
11052 {
11053 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
11054 int hdr_count = 0;
11055 u8 *nexthdr;
11056 int start;
11057
11058 /* Check that there are at most 2 IPv6 extension headers, no
11059 * fragment header, and each is <= 64 bytes.
11060 */
11061 start = nw_off + sizeof(*ip6h);
11062 nexthdr = &ip6h->nexthdr;
11063 while (ipv6_ext_hdr(*nexthdr)) {
11064 struct ipv6_opt_hdr *hp;
11065 int hdrlen;
11066
11067 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
11068 *nexthdr == NEXTHDR_FRAGMENT)
11069 return false;
11070 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
11071 skb_headlen(skb), NULL);
11072 if (!hp)
11073 return false;
11074 if (*nexthdr == NEXTHDR_AUTH)
11075 hdrlen = ipv6_authlen(hp);
11076 else
11077 hdrlen = ipv6_optlen(hp);
11078
11079 if (hdrlen > 64)
11080 return false;
11081 nexthdr = &hp->nexthdr;
11082 start += hdrlen;
11083 hdr_count++;
11084 }
11085 if (nextp) {
11086 /* Caller will check inner protocol */
11087 if (skb->encapsulation) {
11088 *nextp = nexthdr;
11089 return true;
11090 }
11091 *nextp = NULL;
11092 }
11093 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
11094 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
11095 }
11096
11097 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
11098 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
11099 {
11100 struct udphdr *uh = udp_hdr(skb);
11101 __be16 udp_port = uh->dest;
11102
11103 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
11104 return false;
11105 if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
11106 struct ethhdr *eh = inner_eth_hdr(skb);
11107
11108 switch (eh->h_proto) {
11109 case htons(ETH_P_IP):
11110 return true;
11111 case htons(ETH_P_IPV6):
11112 return bnxt_exthdr_check(bp, skb,
11113 skb_inner_network_offset(skb),
11114 NULL);
11115 }
11116 }
11117 return false;
11118 }
11119
11120 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
11121 {
11122 switch (l4_proto) {
11123 case IPPROTO_UDP:
11124 return bnxt_udp_tunl_check(bp, skb);
11125 case IPPROTO_IPIP:
11126 return true;
11127 case IPPROTO_GRE: {
11128 switch (skb->inner_protocol) {
11129 default:
11130 return false;
11131 case htons(ETH_P_IP):
11132 return true;
11133 case htons(ETH_P_IPV6):
11134 fallthrough;
11135 }
11136 }
11137 case IPPROTO_IPV6:
11138 /* Check ext headers of inner ipv6 */
11139 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
11140 NULL);
11141 }
11142 return false;
11143 }
11144
11145 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
11146 struct net_device *dev,
11147 netdev_features_t features)
11148 {
11149 struct bnxt *bp = netdev_priv(dev);
11150 u8 *l4_proto;
11151
11152 features = vlan_features_check(skb, features);
11153 switch (vlan_get_protocol(skb)) {
11154 case htons(ETH_P_IP):
11155 if (!skb->encapsulation)
11156 return features;
11157 l4_proto = &ip_hdr(skb)->protocol;
11158 if (bnxt_tunl_check(bp, skb, *l4_proto))
11159 return features;
11160 break;
11161 case htons(ETH_P_IPV6):
11162 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
11163 &l4_proto))
11164 break;
11165 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
11166 return features;
11167 break;
11168 }
11169 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11170 }
11171
11172 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
11173 u32 *reg_buf)
11174 {
11175 struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr;
11176 struct hwrm_dbg_read_direct_input req = {0};
11177 __le32 *dbg_reg_buf;
11178 dma_addr_t mapping;
11179 int rc, i;
11180
11181 dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4,
11182 &mapping, GFP_KERNEL);
11183 if (!dbg_reg_buf)
11184 return -ENOMEM;
11185 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1);
11186 req.host_dest_addr = cpu_to_le64(mapping);
11187 req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
11188 req.read_len32 = cpu_to_le32(num_words);
11189 mutex_lock(&bp->hwrm_cmd_lock);
11190 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11191 if (rc || resp->error_code) {
11192 rc = -EIO;
11193 goto dbg_rd_reg_exit;
11194 }
11195 for (i = 0; i < num_words; i++)
11196 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
11197
11198 dbg_rd_reg_exit:
11199 mutex_unlock(&bp->hwrm_cmd_lock);
11200 dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping);
11201 return rc;
11202 }
11203
11204 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
11205 u32 ring_id, u32 *prod, u32 *cons)
11206 {
11207 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
11208 struct hwrm_dbg_ring_info_get_input req = {0};
11209 int rc;
11210
11211 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
11212 req.ring_type = ring_type;
11213 req.fw_ring_id = cpu_to_le32(ring_id);
11214 mutex_lock(&bp->hwrm_cmd_lock);
11215 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11216 if (!rc) {
11217 *prod = le32_to_cpu(resp->producer_index);
11218 *cons = le32_to_cpu(resp->consumer_index);
11219 }
11220 mutex_unlock(&bp->hwrm_cmd_lock);
11221 return rc;
11222 }
11223
11224 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
11225 {
11226 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
11227 int i = bnapi->index;
11228
11229 if (!txr)
11230 return;
11231
11232 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
11233 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
11234 txr->tx_cons);
11235 }
11236
11237 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
11238 {
11239 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
11240 int i = bnapi->index;
11241
11242 if (!rxr)
11243 return;
11244
11245 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
11246 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
11247 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
11248 rxr->rx_sw_agg_prod);
11249 }
11250
11251 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
11252 {
11253 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
11254 int i = bnapi->index;
11255
11256 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
11257 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
11258 }
11259
11260 static void bnxt_dbg_dump_states(struct bnxt *bp)
11261 {
11262 int i;
11263 struct bnxt_napi *bnapi;
11264
11265 for (i = 0; i < bp->cp_nr_rings; i++) {
11266 bnapi = bp->bnapi[i];
11267 if (netif_msg_drv(bp)) {
11268 bnxt_dump_tx_sw_state(bnapi);
11269 bnxt_dump_rx_sw_state(bnapi);
11270 bnxt_dump_cp_sw_state(bnapi);
11271 }
11272 }
11273 }
11274
11275 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
11276 {
11277 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
11278 struct hwrm_ring_reset_input req = {0};
11279 struct bnxt_napi *bnapi = rxr->bnapi;
11280 struct bnxt_cp_ring_info *cpr;
11281 u16 cp_ring_id;
11282
11283 cpr = &bnapi->cp_ring;
11284 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
11285 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_RESET, cp_ring_id, -1);
11286 req.ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
11287 req.ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
11288 return hwrm_send_message_silent(bp, &req, sizeof(req),
11289 HWRM_CMD_TIMEOUT);
11290 }
11291
11292 static void bnxt_reset_task(struct bnxt *bp, bool silent)
11293 {
11294 if (!silent)
11295 bnxt_dbg_dump_states(bp);
11296 if (netif_running(bp->dev)) {
11297 int rc;
11298
11299 if (silent) {
11300 bnxt_close_nic(bp, false, false);
11301 bnxt_open_nic(bp, false, false);
11302 } else {
11303 bnxt_ulp_stop(bp);
11304 bnxt_close_nic(bp, true, false);
11305 rc = bnxt_open_nic(bp, true, false);
11306 bnxt_ulp_start(bp, rc);
11307 }
11308 }
11309 }
11310
11311 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
11312 {
11313 struct bnxt *bp = netdev_priv(dev);
11314
11315 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
11316 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
11317 bnxt_queue_sp_work(bp);
11318 }
11319
11320 static void bnxt_fw_health_check(struct bnxt *bp)
11321 {
11322 struct bnxt_fw_health *fw_health = bp->fw_health;
11323 u32 val;
11324
11325 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11326 return;
11327
11328 if (fw_health->tmr_counter) {
11329 fw_health->tmr_counter--;
11330 return;
11331 }
11332
11333 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11334 if (val == fw_health->last_fw_heartbeat)
11335 goto fw_reset;
11336
11337 fw_health->last_fw_heartbeat = val;
11338
11339 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11340 if (val != fw_health->last_fw_reset_cnt)
11341 goto fw_reset;
11342
11343 fw_health->tmr_counter = fw_health->tmr_multiplier;
11344 return;
11345
11346 fw_reset:
11347 set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
11348 bnxt_queue_sp_work(bp);
11349 }
11350
11351 static void bnxt_timer(struct timer_list *t)
11352 {
11353 struct bnxt *bp = from_timer(bp, t, timer);
11354 struct net_device *dev = bp->dev;
11355
11356 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
11357 return;
11358
11359 if (atomic_read(&bp->intr_sem) != 0)
11360 goto bnxt_restart_timer;
11361
11362 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
11363 bnxt_fw_health_check(bp);
11364
11365 if (bp->link_info.link_up && bp->stats_coal_ticks) {
11366 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
11367 bnxt_queue_sp_work(bp);
11368 }
11369
11370 if (bnxt_tc_flower_enabled(bp)) {
11371 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
11372 bnxt_queue_sp_work(bp);
11373 }
11374
11375 #ifdef CONFIG_RFS_ACCEL
11376 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
11377 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11378 bnxt_queue_sp_work(bp);
11379 }
11380 #endif /*CONFIG_RFS_ACCEL*/
11381
11382 if (bp->link_info.phy_retry) {
11383 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
11384 bp->link_info.phy_retry = false;
11385 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
11386 } else {
11387 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
11388 bnxt_queue_sp_work(bp);
11389 }
11390 }
11391
11392 if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
11393 netif_carrier_ok(dev)) {
11394 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
11395 bnxt_queue_sp_work(bp);
11396 }
11397 bnxt_restart_timer:
11398 mod_timer(&bp->timer, jiffies + bp->current_interval);
11399 }
11400
11401 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
11402 {
11403 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
11404 * set. If the device is being closed, bnxt_close() may be holding
11405 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
11406 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
11407 */
11408 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11409 rtnl_lock();
11410 }
11411
11412 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
11413 {
11414 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11415 rtnl_unlock();
11416 }
11417
11418 /* Only called from bnxt_sp_task() */
11419 static void bnxt_reset(struct bnxt *bp, bool silent)
11420 {
11421 bnxt_rtnl_lock_sp(bp);
11422 if (test_bit(BNXT_STATE_OPEN, &bp->state))
11423 bnxt_reset_task(bp, silent);
11424 bnxt_rtnl_unlock_sp(bp);
11425 }
11426
11427 /* Only called from bnxt_sp_task() */
11428 static void bnxt_rx_ring_reset(struct bnxt *bp)
11429 {
11430 int i;
11431
11432 bnxt_rtnl_lock_sp(bp);
11433 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11434 bnxt_rtnl_unlock_sp(bp);
11435 return;
11436 }
11437 /* Disable and flush TPA before resetting the RX ring */
11438 if (bp->flags & BNXT_FLAG_TPA)
11439 bnxt_set_tpa(bp, false);
11440 for (i = 0; i < bp->rx_nr_rings; i++) {
11441 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
11442 struct bnxt_cp_ring_info *cpr;
11443 int rc;
11444
11445 if (!rxr->bnapi->in_reset)
11446 continue;
11447
11448 rc = bnxt_hwrm_rx_ring_reset(bp, i);
11449 if (rc) {
11450 if (rc == -EINVAL || rc == -EOPNOTSUPP)
11451 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
11452 else
11453 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
11454 rc);
11455 bnxt_reset_task(bp, true);
11456 break;
11457 }
11458 bnxt_free_one_rx_ring_skbs(bp, i);
11459 rxr->rx_prod = 0;
11460 rxr->rx_agg_prod = 0;
11461 rxr->rx_sw_agg_prod = 0;
11462 rxr->rx_next_cons = 0;
11463 rxr->bnapi->in_reset = false;
11464 bnxt_alloc_one_rx_ring(bp, i);
11465 cpr = &rxr->bnapi->cp_ring;
11466 cpr->sw_stats.rx.rx_resets++;
11467 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11468 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
11469 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
11470 }
11471 if (bp->flags & BNXT_FLAG_TPA)
11472 bnxt_set_tpa(bp, true);
11473 bnxt_rtnl_unlock_sp(bp);
11474 }
11475
11476 static void bnxt_fw_reset_close(struct bnxt *bp)
11477 {
11478 bnxt_ulp_stop(bp);
11479 /* When firmware is in fatal state, quiesce device and disable
11480 * bus master to prevent any potential bad DMAs before freeing
11481 * kernel memory.
11482 */
11483 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
11484 u16 val = 0;
11485
11486 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
11487 if (val == 0xffff)
11488 bp->fw_reset_min_dsecs = 0;
11489 bnxt_tx_disable(bp);
11490 bnxt_disable_napi(bp);
11491 bnxt_disable_int_sync(bp);
11492 bnxt_free_irq(bp);
11493 bnxt_clear_int_mode(bp);
11494 pci_disable_device(bp->pdev);
11495 }
11496 __bnxt_close_nic(bp, true, false);
11497 bnxt_vf_reps_free(bp);
11498 bnxt_clear_int_mode(bp);
11499 bnxt_hwrm_func_drv_unrgtr(bp);
11500 if (pci_is_enabled(bp->pdev))
11501 pci_disable_device(bp->pdev);
11502 bnxt_free_ctx_mem(bp);
11503 kfree(bp->ctx);
11504 bp->ctx = NULL;
11505 }
11506
11507 static bool is_bnxt_fw_ok(struct bnxt *bp)
11508 {
11509 struct bnxt_fw_health *fw_health = bp->fw_health;
11510 bool no_heartbeat = false, has_reset = false;
11511 u32 val;
11512
11513 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11514 if (val == fw_health->last_fw_heartbeat)
11515 no_heartbeat = true;
11516
11517 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11518 if (val != fw_health->last_fw_reset_cnt)
11519 has_reset = true;
11520
11521 if (!no_heartbeat && has_reset)
11522 return true;
11523
11524 return false;
11525 }
11526
11527 /* rtnl_lock is acquired before calling this function */
11528 static void bnxt_force_fw_reset(struct bnxt *bp)
11529 {
11530 struct bnxt_fw_health *fw_health = bp->fw_health;
11531 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
11532 u32 wait_dsecs;
11533
11534 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
11535 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11536 return;
11537
11538 if (ptp) {
11539 spin_lock_bh(&ptp->ptp_lock);
11540 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11541 spin_unlock_bh(&ptp->ptp_lock);
11542 } else {
11543 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11544 }
11545 bnxt_fw_reset_close(bp);
11546 wait_dsecs = fw_health->master_func_wait_dsecs;
11547 if (fw_health->master) {
11548 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
11549 wait_dsecs = 0;
11550 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11551 } else {
11552 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
11553 wait_dsecs = fw_health->normal_func_wait_dsecs;
11554 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11555 }
11556
11557 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
11558 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
11559 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11560 }
11561
11562 void bnxt_fw_exception(struct bnxt *bp)
11563 {
11564 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
11565 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11566 bnxt_rtnl_lock_sp(bp);
11567 bnxt_force_fw_reset(bp);
11568 bnxt_rtnl_unlock_sp(bp);
11569 }
11570
11571 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
11572 * < 0 on error.
11573 */
11574 static int bnxt_get_registered_vfs(struct bnxt *bp)
11575 {
11576 #ifdef CONFIG_BNXT_SRIOV
11577 int rc;
11578
11579 if (!BNXT_PF(bp))
11580 return 0;
11581
11582 rc = bnxt_hwrm_func_qcfg(bp);
11583 if (rc) {
11584 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
11585 return rc;
11586 }
11587 if (bp->pf.registered_vfs)
11588 return bp->pf.registered_vfs;
11589 if (bp->sriov_cfg)
11590 return 1;
11591 #endif
11592 return 0;
11593 }
11594
11595 void bnxt_fw_reset(struct bnxt *bp)
11596 {
11597 bnxt_rtnl_lock_sp(bp);
11598 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11599 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11600 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
11601 int n = 0, tmo;
11602
11603 if (ptp) {
11604 spin_lock_bh(&ptp->ptp_lock);
11605 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11606 spin_unlock_bh(&ptp->ptp_lock);
11607 } else {
11608 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11609 }
11610 if (bp->pf.active_vfs &&
11611 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11612 n = bnxt_get_registered_vfs(bp);
11613 if (n < 0) {
11614 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11615 n);
11616 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11617 dev_close(bp->dev);
11618 goto fw_reset_exit;
11619 } else if (n > 0) {
11620 u16 vf_tmo_dsecs = n * 10;
11621
11622 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11623 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11624 bp->fw_reset_state =
11625 BNXT_FW_RESET_STATE_POLL_VF;
11626 bnxt_queue_fw_reset_work(bp, HZ / 10);
11627 goto fw_reset_exit;
11628 }
11629 bnxt_fw_reset_close(bp);
11630 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11631 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11632 tmo = HZ / 10;
11633 } else {
11634 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11635 tmo = bp->fw_reset_min_dsecs * HZ / 10;
11636 }
11637 bnxt_queue_fw_reset_work(bp, tmo);
11638 }
11639 fw_reset_exit:
11640 bnxt_rtnl_unlock_sp(bp);
11641 }
11642
11643 static void bnxt_chk_missed_irq(struct bnxt *bp)
11644 {
11645 int i;
11646
11647 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11648 return;
11649
11650 for (i = 0; i < bp->cp_nr_rings; i++) {
11651 struct bnxt_napi *bnapi = bp->bnapi[i];
11652 struct bnxt_cp_ring_info *cpr;
11653 u32 fw_ring_id;
11654 int j;
11655
11656 if (!bnapi)
11657 continue;
11658
11659 cpr = &bnapi->cp_ring;
11660 for (j = 0; j < 2; j++) {
11661 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
11662 u32 val[2];
11663
11664 if (!cpr2 || cpr2->has_more_work ||
11665 !bnxt_has_work(bp, cpr2))
11666 continue;
11667
11668 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
11669 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
11670 continue;
11671 }
11672 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
11673 bnxt_dbg_hwrm_ring_info_get(bp,
11674 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
11675 fw_ring_id, &val[0], &val[1]);
11676 cpr->sw_stats.cmn.missed_irqs++;
11677 }
11678 }
11679 }
11680
11681 static void bnxt_cfg_ntp_filters(struct bnxt *);
11682
11683 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
11684 {
11685 struct bnxt_link_info *link_info = &bp->link_info;
11686
11687 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
11688 link_info->autoneg = BNXT_AUTONEG_SPEED;
11689 if (bp->hwrm_spec_code >= 0x10201) {
11690 if (link_info->auto_pause_setting &
11691 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
11692 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11693 } else {
11694 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11695 }
11696 link_info->advertising = link_info->auto_link_speeds;
11697 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
11698 } else {
11699 link_info->req_link_speed = link_info->force_link_speed;
11700 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
11701 if (link_info->force_pam4_link_speed) {
11702 link_info->req_link_speed =
11703 link_info->force_pam4_link_speed;
11704 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
11705 }
11706 link_info->req_duplex = link_info->duplex_setting;
11707 }
11708 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
11709 link_info->req_flow_ctrl =
11710 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
11711 else
11712 link_info->req_flow_ctrl = link_info->force_pause_setting;
11713 }
11714
11715 static void bnxt_fw_echo_reply(struct bnxt *bp)
11716 {
11717 struct bnxt_fw_health *fw_health = bp->fw_health;
11718 struct hwrm_func_echo_response_input req = {0};
11719
11720 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_ECHO_RESPONSE, -1, -1);
11721 req.event_data1 = cpu_to_le32(fw_health->echo_req_data1);
11722 req.event_data2 = cpu_to_le32(fw_health->echo_req_data2);
11723 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11724 }
11725
11726 static void bnxt_sp_task(struct work_struct *work)
11727 {
11728 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
11729
11730 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11731 smp_mb__after_atomic();
11732 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11733 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11734 return;
11735 }
11736
11737 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
11738 bnxt_cfg_rx_mode(bp);
11739
11740 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
11741 bnxt_cfg_ntp_filters(bp);
11742 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
11743 bnxt_hwrm_exec_fwd_req(bp);
11744 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
11745 bnxt_hwrm_port_qstats(bp, 0);
11746 bnxt_hwrm_port_qstats_ext(bp, 0);
11747 bnxt_accumulate_all_stats(bp);
11748 }
11749
11750 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
11751 int rc;
11752
11753 mutex_lock(&bp->link_lock);
11754 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
11755 &bp->sp_event))
11756 bnxt_hwrm_phy_qcaps(bp);
11757
11758 rc = bnxt_update_link(bp, true);
11759 if (rc)
11760 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
11761 rc);
11762
11763 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
11764 &bp->sp_event))
11765 bnxt_init_ethtool_link_settings(bp);
11766 mutex_unlock(&bp->link_lock);
11767 }
11768 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
11769 int rc;
11770
11771 mutex_lock(&bp->link_lock);
11772 rc = bnxt_update_phy_setting(bp);
11773 mutex_unlock(&bp->link_lock);
11774 if (rc) {
11775 netdev_warn(bp->dev, "update phy settings retry failed\n");
11776 } else {
11777 bp->link_info.phy_retry = false;
11778 netdev_info(bp->dev, "update phy settings retry succeeded\n");
11779 }
11780 }
11781 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
11782 mutex_lock(&bp->link_lock);
11783 bnxt_get_port_module_status(bp);
11784 mutex_unlock(&bp->link_lock);
11785 }
11786
11787 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
11788 bnxt_tc_flow_stats_work(bp);
11789
11790 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
11791 bnxt_chk_missed_irq(bp);
11792
11793 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
11794 bnxt_fw_echo_reply(bp);
11795
11796 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
11797 * must be the last functions to be called before exiting.
11798 */
11799 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
11800 bnxt_reset(bp, false);
11801
11802 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
11803 bnxt_reset(bp, true);
11804
11805 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
11806 bnxt_rx_ring_reset(bp);
11807
11808 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
11809 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
11810
11811 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
11812 if (!is_bnxt_fw_ok(bp))
11813 bnxt_devlink_health_report(bp,
11814 BNXT_FW_EXCEPTION_SP_EVENT);
11815 }
11816
11817 smp_mb__before_atomic();
11818 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11819 }
11820
11821 /* Under rtnl_lock */
11822 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
11823 int tx_xdp)
11824 {
11825 int max_rx, max_tx, tx_sets = 1;
11826 int tx_rings_needed, stats;
11827 int rx_rings = rx;
11828 int cp, vnics, rc;
11829
11830 if (tcs)
11831 tx_sets = tcs;
11832
11833 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
11834 if (rc)
11835 return rc;
11836
11837 if (max_rx < rx)
11838 return -ENOMEM;
11839
11840 tx_rings_needed = tx * tx_sets + tx_xdp;
11841 if (max_tx < tx_rings_needed)
11842 return -ENOMEM;
11843
11844 vnics = 1;
11845 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
11846 vnics += rx_rings;
11847
11848 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11849 rx_rings <<= 1;
11850 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
11851 stats = cp;
11852 if (BNXT_NEW_RM(bp)) {
11853 cp += bnxt_get_ulp_msix_num(bp);
11854 stats += bnxt_get_ulp_stat_ctxs(bp);
11855 }
11856 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
11857 stats, vnics);
11858 }
11859
11860 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
11861 {
11862 if (bp->bar2) {
11863 pci_iounmap(pdev, bp->bar2);
11864 bp->bar2 = NULL;
11865 }
11866
11867 if (bp->bar1) {
11868 pci_iounmap(pdev, bp->bar1);
11869 bp->bar1 = NULL;
11870 }
11871
11872 if (bp->bar0) {
11873 pci_iounmap(pdev, bp->bar0);
11874 bp->bar0 = NULL;
11875 }
11876 }
11877
11878 static void bnxt_cleanup_pci(struct bnxt *bp)
11879 {
11880 bnxt_unmap_bars(bp, bp->pdev);
11881 pci_release_regions(bp->pdev);
11882 if (pci_is_enabled(bp->pdev))
11883 pci_disable_device(bp->pdev);
11884 }
11885
11886 static void bnxt_init_dflt_coal(struct bnxt *bp)
11887 {
11888 struct bnxt_coal *coal;
11889
11890 /* Tick values in micro seconds.
11891 * 1 coal_buf x bufs_per_record = 1 completion record.
11892 */
11893 coal = &bp->rx_coal;
11894 coal->coal_ticks = 10;
11895 coal->coal_bufs = 30;
11896 coal->coal_ticks_irq = 1;
11897 coal->coal_bufs_irq = 2;
11898 coal->idle_thresh = 50;
11899 coal->bufs_per_record = 2;
11900 coal->budget = 64; /* NAPI budget */
11901
11902 coal = &bp->tx_coal;
11903 coal->coal_ticks = 28;
11904 coal->coal_bufs = 30;
11905 coal->coal_ticks_irq = 2;
11906 coal->coal_bufs_irq = 2;
11907 coal->bufs_per_record = 1;
11908
11909 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
11910 }
11911
11912 static int bnxt_fw_init_one_p1(struct bnxt *bp)
11913 {
11914 int rc;
11915
11916 bp->fw_cap = 0;
11917 rc = bnxt_hwrm_ver_get(bp);
11918 bnxt_try_map_fw_health_reg(bp);
11919 if (rc) {
11920 rc = bnxt_try_recover_fw(bp);
11921 if (rc)
11922 return rc;
11923 rc = bnxt_hwrm_ver_get(bp);
11924 if (rc)
11925 return rc;
11926 }
11927
11928 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
11929 bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
11930 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
11931 if (rc)
11932 return rc;
11933 }
11934 bnxt_nvm_cfg_ver_get(bp);
11935
11936 rc = bnxt_hwrm_func_reset(bp);
11937 if (rc)
11938 return -ENODEV;
11939
11940 bnxt_hwrm_fw_set_time(bp);
11941 return 0;
11942 }
11943
11944 static int bnxt_fw_init_one_p2(struct bnxt *bp)
11945 {
11946 int rc;
11947
11948 /* Get the MAX capabilities for this function */
11949 rc = bnxt_hwrm_func_qcaps(bp);
11950 if (rc) {
11951 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
11952 rc);
11953 return -ENODEV;
11954 }
11955
11956 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
11957 if (rc)
11958 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
11959 rc);
11960
11961 if (bnxt_alloc_fw_health(bp)) {
11962 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
11963 } else {
11964 rc = bnxt_hwrm_error_recovery_qcfg(bp);
11965 if (rc)
11966 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
11967 rc);
11968 }
11969
11970 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
11971 if (rc)
11972 return -ENODEV;
11973
11974 bnxt_hwrm_func_qcfg(bp);
11975 bnxt_hwrm_vnic_qcaps(bp);
11976 bnxt_hwrm_port_led_qcaps(bp);
11977 bnxt_ethtool_init(bp);
11978 bnxt_dcb_init(bp);
11979 return 0;
11980 }
11981
11982 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
11983 {
11984 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
11985 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
11986 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
11987 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
11988 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
11989 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
11990 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
11991 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
11992 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
11993 }
11994 }
11995
11996 static void bnxt_set_dflt_rfs(struct bnxt *bp)
11997 {
11998 struct net_device *dev = bp->dev;
11999
12000 dev->hw_features &= ~NETIF_F_NTUPLE;
12001 dev->features &= ~NETIF_F_NTUPLE;
12002 bp->flags &= ~BNXT_FLAG_RFS;
12003 if (bnxt_rfs_supported(bp)) {
12004 dev->hw_features |= NETIF_F_NTUPLE;
12005 if (bnxt_rfs_capable(bp)) {
12006 bp->flags |= BNXT_FLAG_RFS;
12007 dev->features |= NETIF_F_NTUPLE;
12008 }
12009 }
12010 }
12011
12012 static void bnxt_fw_init_one_p3(struct bnxt *bp)
12013 {
12014 struct pci_dev *pdev = bp->pdev;
12015
12016 bnxt_set_dflt_rss_hash_type(bp);
12017 bnxt_set_dflt_rfs(bp);
12018
12019 bnxt_get_wol_settings(bp);
12020 if (bp->flags & BNXT_FLAG_WOL_CAP)
12021 device_set_wakeup_enable(&pdev->dev, bp->wol);
12022 else
12023 device_set_wakeup_capable(&pdev->dev, false);
12024
12025 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
12026 bnxt_hwrm_coal_params_qcaps(bp);
12027 }
12028
12029 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
12030
12031 static int bnxt_fw_init_one(struct bnxt *bp)
12032 {
12033 int rc;
12034
12035 rc = bnxt_fw_init_one_p1(bp);
12036 if (rc) {
12037 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
12038 return rc;
12039 }
12040 rc = bnxt_fw_init_one_p2(bp);
12041 if (rc) {
12042 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
12043 return rc;
12044 }
12045 rc = bnxt_probe_phy(bp, false);
12046 if (rc)
12047 return rc;
12048 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
12049 if (rc)
12050 return rc;
12051
12052 /* In case fw capabilities have changed, destroy the unneeded
12053 * reporters and create newly capable ones.
12054 */
12055 bnxt_dl_fw_reporters_destroy(bp, false);
12056 bnxt_dl_fw_reporters_create(bp);
12057 bnxt_fw_init_one_p3(bp);
12058 return 0;
12059 }
12060
12061 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
12062 {
12063 struct bnxt_fw_health *fw_health = bp->fw_health;
12064 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
12065 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
12066 u32 reg_type, reg_off, delay_msecs;
12067
12068 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
12069 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
12070 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
12071 switch (reg_type) {
12072 case BNXT_FW_HEALTH_REG_TYPE_CFG:
12073 pci_write_config_dword(bp->pdev, reg_off, val);
12074 break;
12075 case BNXT_FW_HEALTH_REG_TYPE_GRC:
12076 writel(reg_off & BNXT_GRC_BASE_MASK,
12077 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
12078 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
12079 fallthrough;
12080 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
12081 writel(val, bp->bar0 + reg_off);
12082 break;
12083 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
12084 writel(val, bp->bar1 + reg_off);
12085 break;
12086 }
12087 if (delay_msecs) {
12088 pci_read_config_dword(bp->pdev, 0, &val);
12089 msleep(delay_msecs);
12090 }
12091 }
12092
12093 static void bnxt_reset_all(struct bnxt *bp)
12094 {
12095 struct bnxt_fw_health *fw_health = bp->fw_health;
12096 int i, rc;
12097
12098 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12099 bnxt_fw_reset_via_optee(bp);
12100 bp->fw_reset_timestamp = jiffies;
12101 return;
12102 }
12103
12104 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
12105 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
12106 bnxt_fw_reset_writel(bp, i);
12107 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
12108 struct hwrm_fw_reset_input req = {0};
12109
12110 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1,
12111 HWRM_TARGET_ID_KONG);
12112 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
12113 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
12114 req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
12115 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
12116 if (rc != -ENODEV)
12117 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
12118 }
12119 bp->fw_reset_timestamp = jiffies;
12120 }
12121
12122 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
12123 {
12124 return time_after(jiffies, bp->fw_reset_timestamp +
12125 (bp->fw_reset_max_dsecs * HZ / 10));
12126 }
12127
12128 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
12129 {
12130 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12131 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
12132 bnxt_ulp_start(bp, rc);
12133 bnxt_dl_health_status_update(bp, false);
12134 }
12135 bp->fw_reset_state = 0;
12136 dev_close(bp->dev);
12137 }
12138
12139 static void bnxt_fw_reset_task(struct work_struct *work)
12140 {
12141 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
12142 int rc = 0;
12143
12144 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12145 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
12146 return;
12147 }
12148
12149 switch (bp->fw_reset_state) {
12150 case BNXT_FW_RESET_STATE_POLL_VF: {
12151 int n = bnxt_get_registered_vfs(bp);
12152 int tmo;
12153
12154 if (n < 0) {
12155 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
12156 n, jiffies_to_msecs(jiffies -
12157 bp->fw_reset_timestamp));
12158 goto fw_reset_abort;
12159 } else if (n > 0) {
12160 if (bnxt_fw_reset_timeout(bp)) {
12161 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12162 bp->fw_reset_state = 0;
12163 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
12164 n);
12165 return;
12166 }
12167 bnxt_queue_fw_reset_work(bp, HZ / 10);
12168 return;
12169 }
12170 bp->fw_reset_timestamp = jiffies;
12171 rtnl_lock();
12172 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
12173 bnxt_fw_reset_abort(bp, rc);
12174 rtnl_unlock();
12175 return;
12176 }
12177 bnxt_fw_reset_close(bp);
12178 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12179 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
12180 tmo = HZ / 10;
12181 } else {
12182 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12183 tmo = bp->fw_reset_min_dsecs * HZ / 10;
12184 }
12185 rtnl_unlock();
12186 bnxt_queue_fw_reset_work(bp, tmo);
12187 return;
12188 }
12189 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
12190 u32 val;
12191
12192 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12193 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
12194 !bnxt_fw_reset_timeout(bp)) {
12195 bnxt_queue_fw_reset_work(bp, HZ / 5);
12196 return;
12197 }
12198
12199 if (!bp->fw_health->master) {
12200 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
12201
12202 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12203 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
12204 return;
12205 }
12206 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
12207 }
12208 fallthrough;
12209 case BNXT_FW_RESET_STATE_RESET_FW:
12210 bnxt_reset_all(bp);
12211 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12212 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
12213 return;
12214 case BNXT_FW_RESET_STATE_ENABLE_DEV:
12215 bnxt_inv_fw_health_reg(bp);
12216 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
12217 !bp->fw_reset_min_dsecs) {
12218 u16 val;
12219
12220 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
12221 if (val == 0xffff) {
12222 if (bnxt_fw_reset_timeout(bp)) {
12223 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
12224 rc = -ETIMEDOUT;
12225 goto fw_reset_abort;
12226 }
12227 bnxt_queue_fw_reset_work(bp, HZ / 1000);
12228 return;
12229 }
12230 }
12231 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
12232 if (pci_enable_device(bp->pdev)) {
12233 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
12234 rc = -ENODEV;
12235 goto fw_reset_abort;
12236 }
12237 pci_set_master(bp->pdev);
12238 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
12239 fallthrough;
12240 case BNXT_FW_RESET_STATE_POLL_FW:
12241 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
12242 rc = bnxt_hwrm_poll(bp);
12243 if (rc) {
12244 if (bnxt_fw_reset_timeout(bp)) {
12245 netdev_err(bp->dev, "Firmware reset aborted\n");
12246 goto fw_reset_abort_status;
12247 }
12248 bnxt_queue_fw_reset_work(bp, HZ / 5);
12249 return;
12250 }
12251 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
12252 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
12253 fallthrough;
12254 case BNXT_FW_RESET_STATE_OPENING:
12255 while (!rtnl_trylock()) {
12256 bnxt_queue_fw_reset_work(bp, HZ / 10);
12257 return;
12258 }
12259 rc = bnxt_open(bp->dev);
12260 if (rc) {
12261 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
12262 bnxt_fw_reset_abort(bp, rc);
12263 rtnl_unlock();
12264 return;
12265 }
12266
12267 bp->fw_reset_state = 0;
12268 /* Make sure fw_reset_state is 0 before clearing the flag */
12269 smp_mb__before_atomic();
12270 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12271 bnxt_ulp_start(bp, 0);
12272 bnxt_reenable_sriov(bp);
12273 bnxt_vf_reps_alloc(bp);
12274 bnxt_vf_reps_open(bp);
12275 bnxt_ptp_reapply_pps(bp);
12276 bnxt_dl_health_recovery_done(bp);
12277 bnxt_dl_health_status_update(bp, true);
12278 rtnl_unlock();
12279 break;
12280 }
12281 return;
12282
12283 fw_reset_abort_status:
12284 if (bp->fw_health->status_reliable ||
12285 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
12286 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12287
12288 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
12289 }
12290 fw_reset_abort:
12291 rtnl_lock();
12292 bnxt_fw_reset_abort(bp, rc);
12293 rtnl_unlock();
12294 }
12295
12296 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
12297 {
12298 int rc;
12299 struct bnxt *bp = netdev_priv(dev);
12300
12301 SET_NETDEV_DEV(dev, &pdev->dev);
12302
12303 /* enable device (incl. PCI PM wakeup), and bus-mastering */
12304 rc = pci_enable_device(pdev);
12305 if (rc) {
12306 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
12307 goto init_err;
12308 }
12309
12310 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12311 dev_err(&pdev->dev,
12312 "Cannot find PCI device base address, aborting\n");
12313 rc = -ENODEV;
12314 goto init_err_disable;
12315 }
12316
12317 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12318 if (rc) {
12319 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
12320 goto init_err_disable;
12321 }
12322
12323 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
12324 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12325 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
12326 rc = -EIO;
12327 goto init_err_release;
12328 }
12329
12330 pci_set_master(pdev);
12331
12332 bp->dev = dev;
12333 bp->pdev = pdev;
12334
12335 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
12336 * determines the BAR size.
12337 */
12338 bp->bar0 = pci_ioremap_bar(pdev, 0);
12339 if (!bp->bar0) {
12340 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
12341 rc = -ENOMEM;
12342 goto init_err_release;
12343 }
12344
12345 bp->bar2 = pci_ioremap_bar(pdev, 4);
12346 if (!bp->bar2) {
12347 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
12348 rc = -ENOMEM;
12349 goto init_err_release;
12350 }
12351
12352 pci_enable_pcie_error_reporting(pdev);
12353
12354 INIT_WORK(&bp->sp_task, bnxt_sp_task);
12355 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
12356
12357 spin_lock_init(&bp->ntp_fltr_lock);
12358 #if BITS_PER_LONG == 32
12359 spin_lock_init(&bp->db_lock);
12360 #endif
12361
12362 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
12363 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
12364
12365 bnxt_init_dflt_coal(bp);
12366
12367 timer_setup(&bp->timer, bnxt_timer, 0);
12368 bp->current_interval = BNXT_TIMER_INTERVAL;
12369
12370 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
12371 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
12372
12373 clear_bit(BNXT_STATE_OPEN, &bp->state);
12374 return 0;
12375
12376 init_err_release:
12377 bnxt_unmap_bars(bp, pdev);
12378 pci_release_regions(pdev);
12379
12380 init_err_disable:
12381 pci_disable_device(pdev);
12382
12383 init_err:
12384 return rc;
12385 }
12386
12387 /* rtnl_lock held */
12388 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
12389 {
12390 struct sockaddr *addr = p;
12391 struct bnxt *bp = netdev_priv(dev);
12392 int rc = 0;
12393
12394 if (!is_valid_ether_addr(addr->sa_data))
12395 return -EADDRNOTAVAIL;
12396
12397 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
12398 return 0;
12399
12400 rc = bnxt_approve_mac(bp, addr->sa_data, true);
12401 if (rc)
12402 return rc;
12403
12404 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12405 if (netif_running(dev)) {
12406 bnxt_close_nic(bp, false, false);
12407 rc = bnxt_open_nic(bp, false, false);
12408 }
12409
12410 return rc;
12411 }
12412
12413 /* rtnl_lock held */
12414 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
12415 {
12416 struct bnxt *bp = netdev_priv(dev);
12417
12418 if (netif_running(dev))
12419 bnxt_close_nic(bp, true, false);
12420
12421 dev->mtu = new_mtu;
12422 bnxt_set_ring_params(bp);
12423
12424 if (netif_running(dev))
12425 return bnxt_open_nic(bp, true, false);
12426
12427 return 0;
12428 }
12429
12430 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
12431 {
12432 struct bnxt *bp = netdev_priv(dev);
12433 bool sh = false;
12434 int rc;
12435
12436 if (tc > bp->max_tc) {
12437 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
12438 tc, bp->max_tc);
12439 return -EINVAL;
12440 }
12441
12442 if (netdev_get_num_tc(dev) == tc)
12443 return 0;
12444
12445 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
12446 sh = true;
12447
12448 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
12449 sh, tc, bp->tx_nr_rings_xdp);
12450 if (rc)
12451 return rc;
12452
12453 /* Needs to close the device and do hw resource re-allocations */
12454 if (netif_running(bp->dev))
12455 bnxt_close_nic(bp, true, false);
12456
12457 if (tc) {
12458 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
12459 netdev_set_num_tc(dev, tc);
12460 } else {
12461 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12462 netdev_reset_tc(dev);
12463 }
12464 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
12465 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
12466 bp->tx_nr_rings + bp->rx_nr_rings;
12467
12468 if (netif_running(bp->dev))
12469 return bnxt_open_nic(bp, true, false);
12470
12471 return 0;
12472 }
12473
12474 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
12475 void *cb_priv)
12476 {
12477 struct bnxt *bp = cb_priv;
12478
12479 if (!bnxt_tc_flower_enabled(bp) ||
12480 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
12481 return -EOPNOTSUPP;
12482
12483 switch (type) {
12484 case TC_SETUP_CLSFLOWER:
12485 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
12486 default:
12487 return -EOPNOTSUPP;
12488 }
12489 }
12490
12491 LIST_HEAD(bnxt_block_cb_list);
12492
12493 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
12494 void *type_data)
12495 {
12496 struct bnxt *bp = netdev_priv(dev);
12497
12498 switch (type) {
12499 case TC_SETUP_BLOCK:
12500 return flow_block_cb_setup_simple(type_data,
12501 &bnxt_block_cb_list,
12502 bnxt_setup_tc_block_cb,
12503 bp, bp, true);
12504 case TC_SETUP_QDISC_MQPRIO: {
12505 struct tc_mqprio_qopt *mqprio = type_data;
12506
12507 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
12508
12509 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
12510 }
12511 default:
12512 return -EOPNOTSUPP;
12513 }
12514 }
12515
12516 #ifdef CONFIG_RFS_ACCEL
12517 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
12518 struct bnxt_ntuple_filter *f2)
12519 {
12520 struct flow_keys *keys1 = &f1->fkeys;
12521 struct flow_keys *keys2 = &f2->fkeys;
12522
12523 if (keys1->basic.n_proto != keys2->basic.n_proto ||
12524 keys1->basic.ip_proto != keys2->basic.ip_proto)
12525 return false;
12526
12527 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
12528 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
12529 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
12530 return false;
12531 } else {
12532 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
12533 sizeof(keys1->addrs.v6addrs.src)) ||
12534 memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
12535 sizeof(keys1->addrs.v6addrs.dst)))
12536 return false;
12537 }
12538
12539 if (keys1->ports.ports == keys2->ports.ports &&
12540 keys1->control.flags == keys2->control.flags &&
12541 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
12542 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
12543 return true;
12544
12545 return false;
12546 }
12547
12548 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
12549 u16 rxq_index, u32 flow_id)
12550 {
12551 struct bnxt *bp = netdev_priv(dev);
12552 struct bnxt_ntuple_filter *fltr, *new_fltr;
12553 struct flow_keys *fkeys;
12554 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
12555 int rc = 0, idx, bit_id, l2_idx = 0;
12556 struct hlist_head *head;
12557 u32 flags;
12558
12559 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
12560 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12561 int off = 0, j;
12562
12563 netif_addr_lock_bh(dev);
12564 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
12565 if (ether_addr_equal(eth->h_dest,
12566 vnic->uc_list + off)) {
12567 l2_idx = j + 1;
12568 break;
12569 }
12570 }
12571 netif_addr_unlock_bh(dev);
12572 if (!l2_idx)
12573 return -EINVAL;
12574 }
12575 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
12576 if (!new_fltr)
12577 return -ENOMEM;
12578
12579 fkeys = &new_fltr->fkeys;
12580 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
12581 rc = -EPROTONOSUPPORT;
12582 goto err_free;
12583 }
12584
12585 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
12586 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
12587 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
12588 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
12589 rc = -EPROTONOSUPPORT;
12590 goto err_free;
12591 }
12592 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
12593 bp->hwrm_spec_code < 0x10601) {
12594 rc = -EPROTONOSUPPORT;
12595 goto err_free;
12596 }
12597 flags = fkeys->control.flags;
12598 if (((flags & FLOW_DIS_ENCAPSULATION) &&
12599 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
12600 rc = -EPROTONOSUPPORT;
12601 goto err_free;
12602 }
12603
12604 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
12605 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
12606
12607 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
12608 head = &bp->ntp_fltr_hash_tbl[idx];
12609 rcu_read_lock();
12610 hlist_for_each_entry_rcu(fltr, head, hash) {
12611 if (bnxt_fltr_match(fltr, new_fltr)) {
12612 rcu_read_unlock();
12613 rc = 0;
12614 goto err_free;
12615 }
12616 }
12617 rcu_read_unlock();
12618
12619 spin_lock_bh(&bp->ntp_fltr_lock);
12620 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
12621 BNXT_NTP_FLTR_MAX_FLTR, 0);
12622 if (bit_id < 0) {
12623 spin_unlock_bh(&bp->ntp_fltr_lock);
12624 rc = -ENOMEM;
12625 goto err_free;
12626 }
12627
12628 new_fltr->sw_id = (u16)bit_id;
12629 new_fltr->flow_id = flow_id;
12630 new_fltr->l2_fltr_idx = l2_idx;
12631 new_fltr->rxq = rxq_index;
12632 hlist_add_head_rcu(&new_fltr->hash, head);
12633 bp->ntp_fltr_count++;
12634 spin_unlock_bh(&bp->ntp_fltr_lock);
12635
12636 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
12637 bnxt_queue_sp_work(bp);
12638
12639 return new_fltr->sw_id;
12640
12641 err_free:
12642 kfree(new_fltr);
12643 return rc;
12644 }
12645
12646 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12647 {
12648 int i;
12649
12650 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
12651 struct hlist_head *head;
12652 struct hlist_node *tmp;
12653 struct bnxt_ntuple_filter *fltr;
12654 int rc;
12655
12656 head = &bp->ntp_fltr_hash_tbl[i];
12657 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
12658 bool del = false;
12659
12660 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
12661 if (rps_may_expire_flow(bp->dev, fltr->rxq,
12662 fltr->flow_id,
12663 fltr->sw_id)) {
12664 bnxt_hwrm_cfa_ntuple_filter_free(bp,
12665 fltr);
12666 del = true;
12667 }
12668 } else {
12669 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
12670 fltr);
12671 if (rc)
12672 del = true;
12673 else
12674 set_bit(BNXT_FLTR_VALID, &fltr->state);
12675 }
12676
12677 if (del) {
12678 spin_lock_bh(&bp->ntp_fltr_lock);
12679 hlist_del_rcu(&fltr->hash);
12680 bp->ntp_fltr_count--;
12681 spin_unlock_bh(&bp->ntp_fltr_lock);
12682 synchronize_rcu();
12683 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
12684 kfree(fltr);
12685 }
12686 }
12687 }
12688 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
12689 netdev_info(bp->dev, "Receive PF driver unload event!\n");
12690 }
12691
12692 #else
12693
12694 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12695 {
12696 }
12697
12698 #endif /* CONFIG_RFS_ACCEL */
12699
12700 static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
12701 {
12702 struct bnxt *bp = netdev_priv(netdev);
12703 struct udp_tunnel_info ti;
12704 unsigned int cmd;
12705
12706 udp_tunnel_nic_get_port(netdev, table, 0, &ti);
12707 if (ti.type == UDP_TUNNEL_TYPE_VXLAN) {
12708 bp->vxlan_port = ti.port;
12709 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
12710 } else {
12711 bp->nge_port = ti.port;
12712 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
12713 }
12714
12715 if (ti.port)
12716 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
12717
12718 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
12719 }
12720
12721 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
12722 .sync_table = bnxt_udp_tunnel_sync,
12723 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
12724 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
12725 .tables = {
12726 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
12727 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
12728 },
12729 };
12730
12731 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12732 struct net_device *dev, u32 filter_mask,
12733 int nlflags)
12734 {
12735 struct bnxt *bp = netdev_priv(dev);
12736
12737 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
12738 nlflags, filter_mask, NULL);
12739 }
12740
12741 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
12742 u16 flags, struct netlink_ext_ack *extack)
12743 {
12744 struct bnxt *bp = netdev_priv(dev);
12745 struct nlattr *attr, *br_spec;
12746 int rem, rc = 0;
12747
12748 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
12749 return -EOPNOTSUPP;
12750
12751 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12752 if (!br_spec)
12753 return -EINVAL;
12754
12755 nla_for_each_nested(attr, br_spec, rem) {
12756 u16 mode;
12757
12758 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12759 continue;
12760
12761 if (nla_len(attr) < sizeof(mode))
12762 return -EINVAL;
12763
12764 mode = nla_get_u16(attr);
12765 if (mode == bp->br_mode)
12766 break;
12767
12768 rc = bnxt_hwrm_set_br_mode(bp, mode);
12769 if (!rc)
12770 bp->br_mode = mode;
12771 break;
12772 }
12773 return rc;
12774 }
12775
12776 int bnxt_get_port_parent_id(struct net_device *dev,
12777 struct netdev_phys_item_id *ppid)
12778 {
12779 struct bnxt *bp = netdev_priv(dev);
12780
12781 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
12782 return -EOPNOTSUPP;
12783
12784 /* The PF and it's VF-reps only support the switchdev framework */
12785 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
12786 return -EOPNOTSUPP;
12787
12788 ppid->id_len = sizeof(bp->dsn);
12789 memcpy(ppid->id, bp->dsn, ppid->id_len);
12790
12791 return 0;
12792 }
12793
12794 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
12795 {
12796 struct bnxt *bp = netdev_priv(dev);
12797
12798 return &bp->dl_port;
12799 }
12800
12801 static const struct net_device_ops bnxt_netdev_ops = {
12802 .ndo_open = bnxt_open,
12803 .ndo_start_xmit = bnxt_start_xmit,
12804 .ndo_stop = bnxt_close,
12805 .ndo_get_stats64 = bnxt_get_stats64,
12806 .ndo_set_rx_mode = bnxt_set_rx_mode,
12807 .ndo_eth_ioctl = bnxt_ioctl,
12808 .ndo_validate_addr = eth_validate_addr,
12809 .ndo_set_mac_address = bnxt_change_mac_addr,
12810 .ndo_change_mtu = bnxt_change_mtu,
12811 .ndo_fix_features = bnxt_fix_features,
12812 .ndo_set_features = bnxt_set_features,
12813 .ndo_features_check = bnxt_features_check,
12814 .ndo_tx_timeout = bnxt_tx_timeout,
12815 #ifdef CONFIG_BNXT_SRIOV
12816 .ndo_get_vf_config = bnxt_get_vf_config,
12817 .ndo_set_vf_mac = bnxt_set_vf_mac,
12818 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
12819 .ndo_set_vf_rate = bnxt_set_vf_bw,
12820 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
12821 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
12822 .ndo_set_vf_trust = bnxt_set_vf_trust,
12823 #endif
12824 .ndo_setup_tc = bnxt_setup_tc,
12825 #ifdef CONFIG_RFS_ACCEL
12826 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
12827 #endif
12828 .ndo_bpf = bnxt_xdp,
12829 .ndo_xdp_xmit = bnxt_xdp_xmit,
12830 .ndo_bridge_getlink = bnxt_bridge_getlink,
12831 .ndo_bridge_setlink = bnxt_bridge_setlink,
12832 .ndo_get_devlink_port = bnxt_get_devlink_port,
12833 };
12834
12835 static void bnxt_remove_one(struct pci_dev *pdev)
12836 {
12837 struct net_device *dev = pci_get_drvdata(pdev);
12838 struct bnxt *bp = netdev_priv(dev);
12839
12840 if (BNXT_PF(bp))
12841 bnxt_sriov_disable(bp);
12842
12843 if (BNXT_PF(bp))
12844 devlink_port_type_clear(&bp->dl_port);
12845
12846 bnxt_ptp_clear(bp);
12847 pci_disable_pcie_error_reporting(pdev);
12848 unregister_netdev(dev);
12849 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12850 /* Flush any pending tasks */
12851 cancel_work_sync(&bp->sp_task);
12852 cancel_delayed_work_sync(&bp->fw_reset_task);
12853 bp->sp_event = 0;
12854
12855 bnxt_dl_fw_reporters_destroy(bp, true);
12856 bnxt_dl_unregister(bp);
12857 bnxt_shutdown_tc(bp);
12858
12859 bnxt_clear_int_mode(bp);
12860 bnxt_hwrm_func_drv_unrgtr(bp);
12861 bnxt_free_hwrm_resources(bp);
12862 bnxt_free_hwrm_short_cmd_req(bp);
12863 bnxt_ethtool_free(bp);
12864 bnxt_dcb_free(bp);
12865 kfree(bp->edev);
12866 bp->edev = NULL;
12867 kfree(bp->ptp_cfg);
12868 bp->ptp_cfg = NULL;
12869 kfree(bp->fw_health);
12870 bp->fw_health = NULL;
12871 bnxt_cleanup_pci(bp);
12872 bnxt_free_ctx_mem(bp);
12873 kfree(bp->ctx);
12874 bp->ctx = NULL;
12875 kfree(bp->rss_indir_tbl);
12876 bp->rss_indir_tbl = NULL;
12877 bnxt_free_port_stats(bp);
12878 free_netdev(dev);
12879 }
12880
12881 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
12882 {
12883 int rc = 0;
12884 struct bnxt_link_info *link_info = &bp->link_info;
12885
12886 bp->phy_flags = 0;
12887 rc = bnxt_hwrm_phy_qcaps(bp);
12888 if (rc) {
12889 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
12890 rc);
12891 return rc;
12892 }
12893 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
12894 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
12895 else
12896 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
12897 if (!fw_dflt)
12898 return 0;
12899
12900 rc = bnxt_update_link(bp, false);
12901 if (rc) {
12902 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
12903 rc);
12904 return rc;
12905 }
12906
12907 /* Older firmware does not have supported_auto_speeds, so assume
12908 * that all supported speeds can be autonegotiated.
12909 */
12910 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
12911 link_info->support_auto_speeds = link_info->support_speeds;
12912
12913 bnxt_init_ethtool_link_settings(bp);
12914 return 0;
12915 }
12916
12917 static int bnxt_get_max_irq(struct pci_dev *pdev)
12918 {
12919 u16 ctrl;
12920
12921 if (!pdev->msix_cap)
12922 return 1;
12923
12924 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
12925 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
12926 }
12927
12928 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12929 int *max_cp)
12930 {
12931 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12932 int max_ring_grps = 0, max_irq;
12933
12934 *max_tx = hw_resc->max_tx_rings;
12935 *max_rx = hw_resc->max_rx_rings;
12936 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
12937 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
12938 bnxt_get_ulp_msix_num(bp),
12939 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
12940 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
12941 *max_cp = min_t(int, *max_cp, max_irq);
12942 max_ring_grps = hw_resc->max_hw_ring_grps;
12943 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
12944 *max_cp -= 1;
12945 *max_rx -= 2;
12946 }
12947 if (bp->flags & BNXT_FLAG_AGG_RINGS)
12948 *max_rx >>= 1;
12949 if (bp->flags & BNXT_FLAG_CHIP_P5) {
12950 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
12951 /* On P5 chips, max_cp output param should be available NQs */
12952 *max_cp = max_irq;
12953 }
12954 *max_rx = min_t(int, *max_rx, max_ring_grps);
12955 }
12956
12957 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
12958 {
12959 int rx, tx, cp;
12960
12961 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
12962 *max_rx = rx;
12963 *max_tx = tx;
12964 if (!rx || !tx || !cp)
12965 return -ENOMEM;
12966
12967 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
12968 }
12969
12970 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12971 bool shared)
12972 {
12973 int rc;
12974
12975 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12976 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
12977 /* Not enough rings, try disabling agg rings. */
12978 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
12979 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12980 if (rc) {
12981 /* set BNXT_FLAG_AGG_RINGS back for consistency */
12982 bp->flags |= BNXT_FLAG_AGG_RINGS;
12983 return rc;
12984 }
12985 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
12986 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12987 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12988 bnxt_set_ring_params(bp);
12989 }
12990
12991 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
12992 int max_cp, max_stat, max_irq;
12993
12994 /* Reserve minimum resources for RoCE */
12995 max_cp = bnxt_get_max_func_cp_rings(bp);
12996 max_stat = bnxt_get_max_func_stat_ctxs(bp);
12997 max_irq = bnxt_get_max_func_irqs(bp);
12998 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
12999 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
13000 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
13001 return 0;
13002
13003 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
13004 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
13005 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
13006 max_cp = min_t(int, max_cp, max_irq);
13007 max_cp = min_t(int, max_cp, max_stat);
13008 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
13009 if (rc)
13010 rc = 0;
13011 }
13012 return rc;
13013 }
13014
13015 /* In initial default shared ring setting, each shared ring must have a
13016 * RX/TX ring pair.
13017 */
13018 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
13019 {
13020 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
13021 bp->rx_nr_rings = bp->cp_nr_rings;
13022 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
13023 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
13024 }
13025
13026 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
13027 {
13028 int dflt_rings, max_rx_rings, max_tx_rings, rc;
13029
13030 if (!bnxt_can_reserve_rings(bp))
13031 return 0;
13032
13033 if (sh)
13034 bp->flags |= BNXT_FLAG_SHARED_RINGS;
13035 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
13036 /* Reduce default rings on multi-port cards so that total default
13037 * rings do not exceed CPU count.
13038 */
13039 if (bp->port_count > 1) {
13040 int max_rings =
13041 max_t(int, num_online_cpus() / bp->port_count, 1);
13042
13043 dflt_rings = min_t(int, dflt_rings, max_rings);
13044 }
13045 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
13046 if (rc)
13047 return rc;
13048 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
13049 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
13050 if (sh)
13051 bnxt_trim_dflt_sh_rings(bp);
13052 else
13053 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
13054 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
13055
13056 rc = __bnxt_reserve_rings(bp);
13057 if (rc)
13058 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
13059 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13060 if (sh)
13061 bnxt_trim_dflt_sh_rings(bp);
13062
13063 /* Rings may have been trimmed, re-reserve the trimmed rings. */
13064 if (bnxt_need_reserve_rings(bp)) {
13065 rc = __bnxt_reserve_rings(bp);
13066 if (rc)
13067 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
13068 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13069 }
13070 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
13071 bp->rx_nr_rings++;
13072 bp->cp_nr_rings++;
13073 }
13074 if (rc) {
13075 bp->tx_nr_rings = 0;
13076 bp->rx_nr_rings = 0;
13077 }
13078 return rc;
13079 }
13080
13081 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
13082 {
13083 int rc;
13084
13085 if (bp->tx_nr_rings)
13086 return 0;
13087
13088 bnxt_ulp_irq_stop(bp);
13089 bnxt_clear_int_mode(bp);
13090 rc = bnxt_set_dflt_rings(bp, true);
13091 if (rc) {
13092 netdev_err(bp->dev, "Not enough rings available.\n");
13093 goto init_dflt_ring_err;
13094 }
13095 rc = bnxt_init_int_mode(bp);
13096 if (rc)
13097 goto init_dflt_ring_err;
13098
13099 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13100 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
13101 bp->flags |= BNXT_FLAG_RFS;
13102 bp->dev->features |= NETIF_F_NTUPLE;
13103 }
13104 init_dflt_ring_err:
13105 bnxt_ulp_irq_restart(bp, rc);
13106 return rc;
13107 }
13108
13109 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
13110 {
13111 int rc;
13112
13113 ASSERT_RTNL();
13114 bnxt_hwrm_func_qcaps(bp);
13115
13116 if (netif_running(bp->dev))
13117 __bnxt_close_nic(bp, true, false);
13118
13119 bnxt_ulp_irq_stop(bp);
13120 bnxt_clear_int_mode(bp);
13121 rc = bnxt_init_int_mode(bp);
13122 bnxt_ulp_irq_restart(bp, rc);
13123
13124 if (netif_running(bp->dev)) {
13125 if (rc)
13126 dev_close(bp->dev);
13127 else
13128 rc = bnxt_open_nic(bp, true, false);
13129 }
13130
13131 return rc;
13132 }
13133
13134 static int bnxt_init_mac_addr(struct bnxt *bp)
13135 {
13136 int rc = 0;
13137
13138 if (BNXT_PF(bp)) {
13139 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
13140 } else {
13141 #ifdef CONFIG_BNXT_SRIOV
13142 struct bnxt_vf_info *vf = &bp->vf;
13143 bool strict_approval = true;
13144
13145 if (is_valid_ether_addr(vf->mac_addr)) {
13146 /* overwrite netdev dev_addr with admin VF MAC */
13147 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
13148 /* Older PF driver or firmware may not approve this
13149 * correctly.
13150 */
13151 strict_approval = false;
13152 } else {
13153 eth_hw_addr_random(bp->dev);
13154 }
13155 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
13156 #endif
13157 }
13158 return rc;
13159 }
13160
13161 #define BNXT_VPD_LEN 512
13162 static void bnxt_vpd_read_info(struct bnxt *bp)
13163 {
13164 struct pci_dev *pdev = bp->pdev;
13165 int i, len, pos, ro_size, size;
13166 ssize_t vpd_size;
13167 u8 *vpd_data;
13168
13169 vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL);
13170 if (!vpd_data)
13171 return;
13172
13173 vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data);
13174 if (vpd_size <= 0) {
13175 netdev_err(bp->dev, "Unable to read VPD\n");
13176 goto exit;
13177 }
13178
13179 i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
13180 if (i < 0) {
13181 netdev_err(bp->dev, "VPD READ-Only not found\n");
13182 goto exit;
13183 }
13184
13185 i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
13186 if (i < 0) {
13187 netdev_err(bp->dev, "VPD READ-Only not found\n");
13188 goto exit;
13189 }
13190
13191 ro_size = pci_vpd_lrdt_size(&vpd_data[i]);
13192 i += PCI_VPD_LRDT_TAG_SIZE;
13193 if (i + ro_size > vpd_size)
13194 goto exit;
13195
13196 pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
13197 PCI_VPD_RO_KEYWORD_PARTNO);
13198 if (pos < 0)
13199 goto read_sn;
13200
13201 len = pci_vpd_info_field_size(&vpd_data[pos]);
13202 pos += PCI_VPD_INFO_FLD_HDR_SIZE;
13203 if (len + pos > vpd_size)
13204 goto read_sn;
13205
13206 size = min(len, BNXT_VPD_FLD_LEN - 1);
13207 memcpy(bp->board_partno, &vpd_data[pos], size);
13208
13209 read_sn:
13210 pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
13211 PCI_VPD_RO_KEYWORD_SERIALNO);
13212 if (pos < 0)
13213 goto exit;
13214
13215 len = pci_vpd_info_field_size(&vpd_data[pos]);
13216 pos += PCI_VPD_INFO_FLD_HDR_SIZE;
13217 if (len + pos > vpd_size)
13218 goto exit;
13219
13220 size = min(len, BNXT_VPD_FLD_LEN - 1);
13221 memcpy(bp->board_serialno, &vpd_data[pos], size);
13222 exit:
13223 kfree(vpd_data);
13224 }
13225
13226 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
13227 {
13228 struct pci_dev *pdev = bp->pdev;
13229 u64 qword;
13230
13231 qword = pci_get_dsn(pdev);
13232 if (!qword) {
13233 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
13234 return -EOPNOTSUPP;
13235 }
13236
13237 put_unaligned_le64(qword, dsn);
13238
13239 bp->flags |= BNXT_FLAG_DSN_VALID;
13240 return 0;
13241 }
13242
13243 static int bnxt_map_db_bar(struct bnxt *bp)
13244 {
13245 if (!bp->db_size)
13246 return -ENODEV;
13247 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
13248 if (!bp->bar1)
13249 return -ENOMEM;
13250 return 0;
13251 }
13252
13253 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
13254 {
13255 struct net_device *dev;
13256 struct bnxt *bp;
13257 int rc, max_irqs;
13258
13259 if (pci_is_bridge(pdev))
13260 return -ENODEV;
13261
13262 /* Clear any pending DMA transactions from crash kernel
13263 * while loading driver in capture kernel.
13264 */
13265 if (is_kdump_kernel()) {
13266 pci_clear_master(pdev);
13267 pcie_flr(pdev);
13268 }
13269
13270 max_irqs = bnxt_get_max_irq(pdev);
13271 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
13272 if (!dev)
13273 return -ENOMEM;
13274
13275 bp = netdev_priv(dev);
13276 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
13277 bnxt_set_max_func_irqs(bp, max_irqs);
13278
13279 if (bnxt_vf_pciid(ent->driver_data))
13280 bp->flags |= BNXT_FLAG_VF;
13281
13282 if (pdev->msix_cap)
13283 bp->flags |= BNXT_FLAG_MSIX_CAP;
13284
13285 rc = bnxt_init_board(pdev, dev);
13286 if (rc < 0)
13287 goto init_err_free;
13288
13289 dev->netdev_ops = &bnxt_netdev_ops;
13290 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
13291 dev->ethtool_ops = &bnxt_ethtool_ops;
13292 pci_set_drvdata(pdev, dev);
13293
13294 rc = bnxt_alloc_hwrm_resources(bp);
13295 if (rc)
13296 goto init_err_pci_clean;
13297
13298 mutex_init(&bp->hwrm_cmd_lock);
13299 mutex_init(&bp->link_lock);
13300
13301 rc = bnxt_fw_init_one_p1(bp);
13302 if (rc)
13303 goto init_err_pci_clean;
13304
13305 if (BNXT_PF(bp))
13306 bnxt_vpd_read_info(bp);
13307
13308 if (BNXT_CHIP_P5(bp)) {
13309 bp->flags |= BNXT_FLAG_CHIP_P5;
13310 if (BNXT_CHIP_SR2(bp))
13311 bp->flags |= BNXT_FLAG_CHIP_SR2;
13312 }
13313
13314 rc = bnxt_alloc_rss_indir_tbl(bp);
13315 if (rc)
13316 goto init_err_pci_clean;
13317
13318 rc = bnxt_fw_init_one_p2(bp);
13319 if (rc)
13320 goto init_err_pci_clean;
13321
13322 rc = bnxt_map_db_bar(bp);
13323 if (rc) {
13324 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
13325 rc);
13326 goto init_err_pci_clean;
13327 }
13328
13329 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13330 NETIF_F_TSO | NETIF_F_TSO6 |
13331 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13332 NETIF_F_GSO_IPXIP4 |
13333 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13334 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
13335 NETIF_F_RXCSUM | NETIF_F_GRO;
13336
13337 if (BNXT_SUPPORTS_TPA(bp))
13338 dev->hw_features |= NETIF_F_LRO;
13339
13340 dev->hw_enc_features =
13341 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13342 NETIF_F_TSO | NETIF_F_TSO6 |
13343 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13344 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13345 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
13346 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
13347
13348 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
13349 NETIF_F_GSO_GRE_CSUM;
13350 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
13351 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
13352 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13353 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
13354 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
13355 if (BNXT_SUPPORTS_TPA(bp))
13356 dev->hw_features |= NETIF_F_GRO_HW;
13357 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
13358 if (dev->features & NETIF_F_GRO_HW)
13359 dev->features &= ~NETIF_F_LRO;
13360 dev->priv_flags |= IFF_UNICAST_FLT;
13361
13362 #ifdef CONFIG_BNXT_SRIOV
13363 init_waitqueue_head(&bp->sriov_cfg_wait);
13364 mutex_init(&bp->sriov_lock);
13365 #endif
13366 if (BNXT_SUPPORTS_TPA(bp)) {
13367 bp->gro_func = bnxt_gro_func_5730x;
13368 if (BNXT_CHIP_P4(bp))
13369 bp->gro_func = bnxt_gro_func_5731x;
13370 else if (BNXT_CHIP_P5(bp))
13371 bp->gro_func = bnxt_gro_func_5750x;
13372 }
13373 if (!BNXT_CHIP_P4_PLUS(bp))
13374 bp->flags |= BNXT_FLAG_DOUBLE_DB;
13375
13376 rc = bnxt_init_mac_addr(bp);
13377 if (rc) {
13378 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
13379 rc = -EADDRNOTAVAIL;
13380 goto init_err_pci_clean;
13381 }
13382
13383 if (BNXT_PF(bp)) {
13384 /* Read the adapter's DSN to use as the eswitch switch_id */
13385 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
13386 }
13387
13388 /* MTU range: 60 - FW defined max */
13389 dev->min_mtu = ETH_ZLEN;
13390 dev->max_mtu = bp->max_mtu;
13391
13392 rc = bnxt_probe_phy(bp, true);
13393 if (rc)
13394 goto init_err_pci_clean;
13395
13396 bnxt_set_rx_skb_mode(bp, false);
13397 bnxt_set_tpa_flags(bp);
13398 bnxt_set_ring_params(bp);
13399 rc = bnxt_set_dflt_rings(bp, true);
13400 if (rc) {
13401 netdev_err(bp->dev, "Not enough rings available.\n");
13402 rc = -ENOMEM;
13403 goto init_err_pci_clean;
13404 }
13405
13406 bnxt_fw_init_one_p3(bp);
13407
13408 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13409 bp->flags |= BNXT_FLAG_STRIP_VLAN;
13410
13411 rc = bnxt_init_int_mode(bp);
13412 if (rc)
13413 goto init_err_pci_clean;
13414
13415 /* No TC has been set yet and rings may have been trimmed due to
13416 * limited MSIX, so we re-initialize the TX rings per TC.
13417 */
13418 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13419
13420 if (BNXT_PF(bp)) {
13421 if (!bnxt_pf_wq) {
13422 bnxt_pf_wq =
13423 create_singlethread_workqueue("bnxt_pf_wq");
13424 if (!bnxt_pf_wq) {
13425 dev_err(&pdev->dev, "Unable to create workqueue.\n");
13426 rc = -ENOMEM;
13427 goto init_err_pci_clean;
13428 }
13429 }
13430 rc = bnxt_init_tc(bp);
13431 if (rc)
13432 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
13433 rc);
13434 }
13435
13436 bnxt_inv_fw_health_reg(bp);
13437 bnxt_dl_register(bp);
13438
13439 rc = register_netdev(dev);
13440 if (rc)
13441 goto init_err_cleanup;
13442
13443 if (BNXT_PF(bp))
13444 devlink_port_type_eth_set(&bp->dl_port, bp->dev);
13445 bnxt_dl_fw_reporters_create(bp);
13446
13447 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
13448 board_info[ent->driver_data].name,
13449 (long)pci_resource_start(pdev, 0), dev->dev_addr);
13450 pcie_print_link_status(pdev);
13451
13452 pci_save_state(pdev);
13453 return 0;
13454
13455 init_err_cleanup:
13456 bnxt_dl_unregister(bp);
13457 bnxt_shutdown_tc(bp);
13458 bnxt_clear_int_mode(bp);
13459
13460 init_err_pci_clean:
13461 bnxt_hwrm_func_drv_unrgtr(bp);
13462 bnxt_free_hwrm_short_cmd_req(bp);
13463 bnxt_free_hwrm_resources(bp);
13464 bnxt_ethtool_free(bp);
13465 bnxt_ptp_clear(bp);
13466 kfree(bp->ptp_cfg);
13467 bp->ptp_cfg = NULL;
13468 kfree(bp->fw_health);
13469 bp->fw_health = NULL;
13470 bnxt_cleanup_pci(bp);
13471 bnxt_free_ctx_mem(bp);
13472 kfree(bp->ctx);
13473 bp->ctx = NULL;
13474 kfree(bp->rss_indir_tbl);
13475 bp->rss_indir_tbl = NULL;
13476
13477 init_err_free:
13478 free_netdev(dev);
13479 return rc;
13480 }
13481
13482 static void bnxt_shutdown(struct pci_dev *pdev)
13483 {
13484 struct net_device *dev = pci_get_drvdata(pdev);
13485 struct bnxt *bp;
13486
13487 if (!dev)
13488 return;
13489
13490 rtnl_lock();
13491 bp = netdev_priv(dev);
13492 if (!bp)
13493 goto shutdown_exit;
13494
13495 if (netif_running(dev))
13496 dev_close(dev);
13497
13498 bnxt_ulp_shutdown(bp);
13499 bnxt_clear_int_mode(bp);
13500 pci_disable_device(pdev);
13501
13502 if (system_state == SYSTEM_POWER_OFF) {
13503 pci_wake_from_d3(pdev, bp->wol);
13504 pci_set_power_state(pdev, PCI_D3hot);
13505 }
13506
13507 shutdown_exit:
13508 rtnl_unlock();
13509 }
13510
13511 #ifdef CONFIG_PM_SLEEP
13512 static int bnxt_suspend(struct device *device)
13513 {
13514 struct net_device *dev = dev_get_drvdata(device);
13515 struct bnxt *bp = netdev_priv(dev);
13516 int rc = 0;
13517
13518 rtnl_lock();
13519 bnxt_ulp_stop(bp);
13520 if (netif_running(dev)) {
13521 netif_device_detach(dev);
13522 rc = bnxt_close(dev);
13523 }
13524 bnxt_hwrm_func_drv_unrgtr(bp);
13525 pci_disable_device(bp->pdev);
13526 bnxt_free_ctx_mem(bp);
13527 kfree(bp->ctx);
13528 bp->ctx = NULL;
13529 rtnl_unlock();
13530 return rc;
13531 }
13532
13533 static int bnxt_resume(struct device *device)
13534 {
13535 struct net_device *dev = dev_get_drvdata(device);
13536 struct bnxt *bp = netdev_priv(dev);
13537 int rc = 0;
13538
13539 rtnl_lock();
13540 rc = pci_enable_device(bp->pdev);
13541 if (rc) {
13542 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
13543 rc);
13544 goto resume_exit;
13545 }
13546 pci_set_master(bp->pdev);
13547 if (bnxt_hwrm_ver_get(bp)) {
13548 rc = -ENODEV;
13549 goto resume_exit;
13550 }
13551 rc = bnxt_hwrm_func_reset(bp);
13552 if (rc) {
13553 rc = -EBUSY;
13554 goto resume_exit;
13555 }
13556
13557 rc = bnxt_hwrm_func_qcaps(bp);
13558 if (rc)
13559 goto resume_exit;
13560
13561 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
13562 rc = -ENODEV;
13563 goto resume_exit;
13564 }
13565
13566 bnxt_get_wol_settings(bp);
13567 if (netif_running(dev)) {
13568 rc = bnxt_open(dev);
13569 if (!rc)
13570 netif_device_attach(dev);
13571 }
13572
13573 resume_exit:
13574 bnxt_ulp_start(bp, rc);
13575 if (!rc)
13576 bnxt_reenable_sriov(bp);
13577 rtnl_unlock();
13578 return rc;
13579 }
13580
13581 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
13582 #define BNXT_PM_OPS (&bnxt_pm_ops)
13583
13584 #else
13585
13586 #define BNXT_PM_OPS NULL
13587
13588 #endif /* CONFIG_PM_SLEEP */
13589
13590 /**
13591 * bnxt_io_error_detected - called when PCI error is detected
13592 * @pdev: Pointer to PCI device
13593 * @state: The current pci connection state
13594 *
13595 * This function is called after a PCI bus error affecting
13596 * this device has been detected.
13597 */
13598 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
13599 pci_channel_state_t state)
13600 {
13601 struct net_device *netdev = pci_get_drvdata(pdev);
13602 struct bnxt *bp = netdev_priv(netdev);
13603
13604 netdev_info(netdev, "PCI I/O error detected\n");
13605
13606 rtnl_lock();
13607 netif_device_detach(netdev);
13608
13609 bnxt_ulp_stop(bp);
13610
13611 if (state == pci_channel_io_perm_failure) {
13612 rtnl_unlock();
13613 return PCI_ERS_RESULT_DISCONNECT;
13614 }
13615
13616 if (state == pci_channel_io_frozen)
13617 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
13618
13619 if (netif_running(netdev))
13620 bnxt_close(netdev);
13621
13622 if (pci_is_enabled(pdev))
13623 pci_disable_device(pdev);
13624 bnxt_free_ctx_mem(bp);
13625 kfree(bp->ctx);
13626 bp->ctx = NULL;
13627 rtnl_unlock();
13628
13629 /* Request a slot slot reset. */
13630 return PCI_ERS_RESULT_NEED_RESET;
13631 }
13632
13633 /**
13634 * bnxt_io_slot_reset - called after the pci bus has been reset.
13635 * @pdev: Pointer to PCI device
13636 *
13637 * Restart the card from scratch, as if from a cold-boot.
13638 * At this point, the card has exprienced a hard reset,
13639 * followed by fixups by BIOS, and has its config space
13640 * set up identically to what it was at cold boot.
13641 */
13642 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
13643 {
13644 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
13645 struct net_device *netdev = pci_get_drvdata(pdev);
13646 struct bnxt *bp = netdev_priv(netdev);
13647 int err = 0, off;
13648
13649 netdev_info(bp->dev, "PCI Slot Reset\n");
13650
13651 rtnl_lock();
13652
13653 if (pci_enable_device(pdev)) {
13654 dev_err(&pdev->dev,
13655 "Cannot re-enable PCI device after reset.\n");
13656 } else {
13657 pci_set_master(pdev);
13658 /* Upon fatal error, our device internal logic that latches to
13659 * BAR value is getting reset and will restore only upon
13660 * rewritting the BARs.
13661 *
13662 * As pci_restore_state() does not re-write the BARs if the
13663 * value is same as saved value earlier, driver needs to
13664 * write the BARs to 0 to force restore, in case of fatal error.
13665 */
13666 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
13667 &bp->state)) {
13668 for (off = PCI_BASE_ADDRESS_0;
13669 off <= PCI_BASE_ADDRESS_5; off += 4)
13670 pci_write_config_dword(bp->pdev, off, 0);
13671 }
13672 pci_restore_state(pdev);
13673 pci_save_state(pdev);
13674
13675 err = bnxt_hwrm_func_reset(bp);
13676 if (!err)
13677 result = PCI_ERS_RESULT_RECOVERED;
13678 }
13679
13680 rtnl_unlock();
13681
13682 return result;
13683 }
13684
13685 /**
13686 * bnxt_io_resume - called when traffic can start flowing again.
13687 * @pdev: Pointer to PCI device
13688 *
13689 * This callback is called when the error recovery driver tells
13690 * us that its OK to resume normal operation.
13691 */
13692 static void bnxt_io_resume(struct pci_dev *pdev)
13693 {
13694 struct net_device *netdev = pci_get_drvdata(pdev);
13695 struct bnxt *bp = netdev_priv(netdev);
13696 int err;
13697
13698 netdev_info(bp->dev, "PCI Slot Resume\n");
13699 rtnl_lock();
13700
13701 err = bnxt_hwrm_func_qcaps(bp);
13702 if (!err && netif_running(netdev))
13703 err = bnxt_open(netdev);
13704
13705 bnxt_ulp_start(bp, err);
13706 if (!err) {
13707 bnxt_reenable_sriov(bp);
13708 netif_device_attach(netdev);
13709 }
13710
13711 rtnl_unlock();
13712 }
13713
13714 static const struct pci_error_handlers bnxt_err_handler = {
13715 .error_detected = bnxt_io_error_detected,
13716 .slot_reset = bnxt_io_slot_reset,
13717 .resume = bnxt_io_resume
13718 };
13719
13720 static struct pci_driver bnxt_pci_driver = {
13721 .name = DRV_MODULE_NAME,
13722 .id_table = bnxt_pci_tbl,
13723 .probe = bnxt_init_one,
13724 .remove = bnxt_remove_one,
13725 .shutdown = bnxt_shutdown,
13726 .driver.pm = BNXT_PM_OPS,
13727 .err_handler = &bnxt_err_handler,
13728 #if defined(CONFIG_BNXT_SRIOV)
13729 .sriov_configure = bnxt_sriov_configure,
13730 #endif
13731 };
13732
13733 static int __init bnxt_init(void)
13734 {
13735 bnxt_debug_init();
13736 return pci_register_driver(&bnxt_pci_driver);
13737 }
13738
13739 static void __exit bnxt_exit(void)
13740 {
13741 pci_unregister_driver(&bnxt_pci_driver);
13742 if (bnxt_pf_wq)
13743 destroy_workqueue(bnxt_pf_wq);
13744 bnxt_debug_exit();
13745 }
13746
13747 module_init(bnxt_init);
13748 module_exit(bnxt_exit);