]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/ethernet/broadcom/bnxt/bnxt.c
Merge branch 'for-v5.14' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm...
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2019 Broadcom Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/ip.h>
41 #include <net/tcp.h>
42 #include <net/udp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <net/udp_tunnel.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/ptp_clock_kernel.h>
53 #include <linux/timecounter.h>
54 #include <linux/cpu_rmap.h>
55 #include <linux/cpumask.h>
56 #include <net/pkt_cls.h>
57 #include <linux/hwmon.h>
58 #include <linux/hwmon-sysfs.h>
59 #include <net/page_pool.h>
60
61 #include "bnxt_hsi.h"
62 #include "bnxt.h"
63 #include "bnxt_ulp.h"
64 #include "bnxt_sriov.h"
65 #include "bnxt_ethtool.h"
66 #include "bnxt_dcb.h"
67 #include "bnxt_xdp.h"
68 #include "bnxt_ptp.h"
69 #include "bnxt_vfr.h"
70 #include "bnxt_tc.h"
71 #include "bnxt_devlink.h"
72 #include "bnxt_debugfs.h"
73
74 #define BNXT_TX_TIMEOUT (5 * HZ)
75 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW)
76
77 MODULE_LICENSE("GPL");
78 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
79
80 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
81 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
82 #define BNXT_RX_COPY_THRESH 256
83
84 #define BNXT_TX_PUSH_THRESH 164
85
86 enum board_idx {
87 BCM57301,
88 BCM57302,
89 BCM57304,
90 BCM57417_NPAR,
91 BCM58700,
92 BCM57311,
93 BCM57312,
94 BCM57402,
95 BCM57404,
96 BCM57406,
97 BCM57402_NPAR,
98 BCM57407,
99 BCM57412,
100 BCM57414,
101 BCM57416,
102 BCM57417,
103 BCM57412_NPAR,
104 BCM57314,
105 BCM57417_SFP,
106 BCM57416_SFP,
107 BCM57404_NPAR,
108 BCM57406_NPAR,
109 BCM57407_SFP,
110 BCM57407_NPAR,
111 BCM57414_NPAR,
112 BCM57416_NPAR,
113 BCM57452,
114 BCM57454,
115 BCM5745x_NPAR,
116 BCM57508,
117 BCM57504,
118 BCM57502,
119 BCM57508_NPAR,
120 BCM57504_NPAR,
121 BCM57502_NPAR,
122 BCM58802,
123 BCM58804,
124 BCM58808,
125 NETXTREME_E_VF,
126 NETXTREME_C_VF,
127 NETXTREME_S_VF,
128 NETXTREME_C_VF_HV,
129 NETXTREME_E_VF_HV,
130 NETXTREME_E_P5_VF,
131 NETXTREME_E_P5_VF_HV,
132 };
133
134 /* indexed by enum above */
135 static const struct {
136 char *name;
137 } board_info[] = {
138 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
139 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
140 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
141 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
142 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
143 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
144 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
145 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
146 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
147 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
148 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
149 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
150 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
151 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
152 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
153 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
154 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
155 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
156 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
157 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
158 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
159 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
160 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
161 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
162 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
163 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
164 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
165 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
166 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
167 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
168 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
169 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
170 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
171 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
172 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
173 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
174 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
175 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
176 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
177 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
178 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
179 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
180 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
181 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
182 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
183 };
184
185 static const struct pci_device_id bnxt_pci_tbl[] = {
186 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
187 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
188 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
189 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
190 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
191 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
192 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
193 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
194 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
195 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
196 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
197 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
198 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
199 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
200 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
201 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
202 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
203 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
204 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
205 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
206 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
207 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
208 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
209 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
210 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
211 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
212 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
213 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
214 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
215 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
216 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
217 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
218 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
219 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
220 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
221 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
222 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
223 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
224 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
225 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
226 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
227 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
228 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
229 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
230 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
231 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
232 #ifdef CONFIG_BNXT_SRIOV
233 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
234 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
235 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
236 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
237 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
238 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
239 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
240 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
241 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
242 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
243 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
244 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
245 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
246 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
247 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
248 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
249 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
250 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
251 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
252 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
253 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
254 #endif
255 { 0 }
256 };
257
258 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
259
260 static const u16 bnxt_vf_req_snif[] = {
261 HWRM_FUNC_CFG,
262 HWRM_FUNC_VF_CFG,
263 HWRM_PORT_PHY_QCFG,
264 HWRM_CFA_L2_FILTER_ALLOC,
265 };
266
267 static const u16 bnxt_async_events_arr[] = {
268 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
269 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
270 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
271 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
272 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
273 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
274 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
275 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
276 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
277 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
278 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
279 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
280 };
281
282 static struct workqueue_struct *bnxt_pf_wq;
283
284 static bool bnxt_vf_pciid(enum board_idx idx)
285 {
286 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
287 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
288 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
289 idx == NETXTREME_E_P5_VF_HV);
290 }
291
292 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
293 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
294 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
295
296 #define BNXT_CP_DB_IRQ_DIS(db) \
297 writel(DB_CP_IRQ_DIS_FLAGS, db)
298
299 #define BNXT_DB_CQ(db, idx) \
300 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
301
302 #define BNXT_DB_NQ_P5(db, idx) \
303 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
304
305 #define BNXT_DB_CQ_ARM(db, idx) \
306 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
307
308 #define BNXT_DB_NQ_ARM_P5(db, idx) \
309 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
310
311 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
312 {
313 if (bp->flags & BNXT_FLAG_CHIP_P5)
314 BNXT_DB_NQ_P5(db, idx);
315 else
316 BNXT_DB_CQ(db, idx);
317 }
318
319 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
320 {
321 if (bp->flags & BNXT_FLAG_CHIP_P5)
322 BNXT_DB_NQ_ARM_P5(db, idx);
323 else
324 BNXT_DB_CQ_ARM(db, idx);
325 }
326
327 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
328 {
329 if (bp->flags & BNXT_FLAG_CHIP_P5)
330 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
331 db->doorbell);
332 else
333 BNXT_DB_CQ(db, idx);
334 }
335
336 const u16 bnxt_lhint_arr[] = {
337 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
338 TX_BD_FLAGS_LHINT_512_TO_1023,
339 TX_BD_FLAGS_LHINT_1024_TO_2047,
340 TX_BD_FLAGS_LHINT_1024_TO_2047,
341 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
342 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
343 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
344 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
345 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
346 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
347 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
348 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
349 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
350 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
351 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
352 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
353 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
354 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
355 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
356 };
357
358 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
359 {
360 struct metadata_dst *md_dst = skb_metadata_dst(skb);
361
362 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
363 return 0;
364
365 return md_dst->u.port_info.port_id;
366 }
367
368 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
369 {
370 struct bnxt *bp = netdev_priv(dev);
371 struct tx_bd *txbd;
372 struct tx_bd_ext *txbd1;
373 struct netdev_queue *txq;
374 int i;
375 dma_addr_t mapping;
376 unsigned int length, pad = 0;
377 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
378 u16 prod, last_frag;
379 struct pci_dev *pdev = bp->pdev;
380 struct bnxt_tx_ring_info *txr;
381 struct bnxt_sw_tx_bd *tx_buf;
382 __le32 lflags = 0;
383
384 i = skb_get_queue_mapping(skb);
385 if (unlikely(i >= bp->tx_nr_rings)) {
386 dev_kfree_skb_any(skb);
387 return NETDEV_TX_OK;
388 }
389
390 txq = netdev_get_tx_queue(dev, i);
391 txr = &bp->tx_ring[bp->tx_ring_map[i]];
392 prod = txr->tx_prod;
393
394 free_size = bnxt_tx_avail(bp, txr);
395 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
396 netif_tx_stop_queue(txq);
397 return NETDEV_TX_BUSY;
398 }
399
400 length = skb->len;
401 len = skb_headlen(skb);
402 last_frag = skb_shinfo(skb)->nr_frags;
403
404 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
405
406 txbd->tx_bd_opaque = prod;
407
408 tx_buf = &txr->tx_buf_ring[prod];
409 tx_buf->skb = skb;
410 tx_buf->nr_frags = last_frag;
411
412 vlan_tag_flags = 0;
413 cfa_action = bnxt_xmit_get_cfa_action(skb);
414 if (skb_vlan_tag_present(skb)) {
415 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
416 skb_vlan_tag_get(skb);
417 /* Currently supports 8021Q, 8021AD vlan offloads
418 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
419 */
420 if (skb->vlan_proto == htons(ETH_P_8021Q))
421 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
422 }
423
424 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
425 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
426
427 if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
428 atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
429 if (!bnxt_ptp_parse(skb, &ptp->tx_seqid)) {
430 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
431 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
432 } else {
433 atomic_inc(&bp->ptp_cfg->tx_avail);
434 }
435 }
436 }
437
438 if (unlikely(skb->no_fcs))
439 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
440
441 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
442 !lflags) {
443 struct tx_push_buffer *tx_push_buf = txr->tx_push;
444 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
445 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
446 void __iomem *db = txr->tx_db.doorbell;
447 void *pdata = tx_push_buf->data;
448 u64 *end;
449 int j, push_len;
450
451 /* Set COAL_NOW to be ready quickly for the next push */
452 tx_push->tx_bd_len_flags_type =
453 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
454 TX_BD_TYPE_LONG_TX_BD |
455 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
456 TX_BD_FLAGS_COAL_NOW |
457 TX_BD_FLAGS_PACKET_END |
458 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
459
460 if (skb->ip_summed == CHECKSUM_PARTIAL)
461 tx_push1->tx_bd_hsize_lflags =
462 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
463 else
464 tx_push1->tx_bd_hsize_lflags = 0;
465
466 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
467 tx_push1->tx_bd_cfa_action =
468 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
469
470 end = pdata + length;
471 end = PTR_ALIGN(end, 8) - 1;
472 *end = 0;
473
474 skb_copy_from_linear_data(skb, pdata, len);
475 pdata += len;
476 for (j = 0; j < last_frag; j++) {
477 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
478 void *fptr;
479
480 fptr = skb_frag_address_safe(frag);
481 if (!fptr)
482 goto normal_tx;
483
484 memcpy(pdata, fptr, skb_frag_size(frag));
485 pdata += skb_frag_size(frag);
486 }
487
488 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
489 txbd->tx_bd_haddr = txr->data_mapping;
490 prod = NEXT_TX(prod);
491 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
492 memcpy(txbd, tx_push1, sizeof(*txbd));
493 prod = NEXT_TX(prod);
494 tx_push->doorbell =
495 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
496 txr->tx_prod = prod;
497
498 tx_buf->is_push = 1;
499 netdev_tx_sent_queue(txq, skb->len);
500 wmb(); /* Sync is_push and byte queue before pushing data */
501
502 push_len = (length + sizeof(*tx_push) + 7) / 8;
503 if (push_len > 16) {
504 __iowrite64_copy(db, tx_push_buf, 16);
505 __iowrite32_copy(db + 4, tx_push_buf + 1,
506 (push_len - 16) << 1);
507 } else {
508 __iowrite64_copy(db, tx_push_buf, push_len);
509 }
510
511 goto tx_done;
512 }
513
514 normal_tx:
515 if (length < BNXT_MIN_PKT_SIZE) {
516 pad = BNXT_MIN_PKT_SIZE - length;
517 if (skb_pad(skb, pad)) {
518 /* SKB already freed. */
519 tx_buf->skb = NULL;
520 return NETDEV_TX_OK;
521 }
522 length = BNXT_MIN_PKT_SIZE;
523 }
524
525 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
526
527 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
528 dev_kfree_skb_any(skb);
529 tx_buf->skb = NULL;
530 return NETDEV_TX_OK;
531 }
532
533 dma_unmap_addr_set(tx_buf, mapping, mapping);
534 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
535 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
536
537 txbd->tx_bd_haddr = cpu_to_le64(mapping);
538
539 prod = NEXT_TX(prod);
540 txbd1 = (struct tx_bd_ext *)
541 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
542
543 txbd1->tx_bd_hsize_lflags = lflags;
544 if (skb_is_gso(skb)) {
545 u32 hdr_len;
546
547 if (skb->encapsulation)
548 hdr_len = skb_inner_network_offset(skb) +
549 skb_inner_network_header_len(skb) +
550 inner_tcp_hdrlen(skb);
551 else
552 hdr_len = skb_transport_offset(skb) +
553 tcp_hdrlen(skb);
554
555 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
556 TX_BD_FLAGS_T_IPID |
557 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
558 length = skb_shinfo(skb)->gso_size;
559 txbd1->tx_bd_mss = cpu_to_le32(length);
560 length += hdr_len;
561 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
562 txbd1->tx_bd_hsize_lflags |=
563 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
564 txbd1->tx_bd_mss = 0;
565 }
566
567 length >>= 9;
568 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
569 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
570 skb->len);
571 i = 0;
572 goto tx_dma_error;
573 }
574 flags |= bnxt_lhint_arr[length];
575 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
576
577 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
578 txbd1->tx_bd_cfa_action =
579 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
580 for (i = 0; i < last_frag; i++) {
581 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
582
583 prod = NEXT_TX(prod);
584 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
585
586 len = skb_frag_size(frag);
587 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
588 DMA_TO_DEVICE);
589
590 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
591 goto tx_dma_error;
592
593 tx_buf = &txr->tx_buf_ring[prod];
594 dma_unmap_addr_set(tx_buf, mapping, mapping);
595
596 txbd->tx_bd_haddr = cpu_to_le64(mapping);
597
598 flags = len << TX_BD_LEN_SHIFT;
599 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
600 }
601
602 flags &= ~TX_BD_LEN;
603 txbd->tx_bd_len_flags_type =
604 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
605 TX_BD_FLAGS_PACKET_END);
606
607 netdev_tx_sent_queue(txq, skb->len);
608
609 skb_tx_timestamp(skb);
610
611 /* Sync BD data before updating doorbell */
612 wmb();
613
614 prod = NEXT_TX(prod);
615 txr->tx_prod = prod;
616
617 if (!netdev_xmit_more() || netif_xmit_stopped(txq))
618 bnxt_db_write(bp, &txr->tx_db, prod);
619
620 tx_done:
621
622 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
623 if (netdev_xmit_more() && !tx_buf->is_push)
624 bnxt_db_write(bp, &txr->tx_db, prod);
625
626 netif_tx_stop_queue(txq);
627
628 /* netif_tx_stop_queue() must be done before checking
629 * tx index in bnxt_tx_avail() below, because in
630 * bnxt_tx_int(), we update tx index before checking for
631 * netif_tx_queue_stopped().
632 */
633 smp_mb();
634 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
635 netif_tx_wake_queue(txq);
636 }
637 return NETDEV_TX_OK;
638
639 tx_dma_error:
640 if (BNXT_TX_PTP_IS_SET(lflags))
641 atomic_inc(&bp->ptp_cfg->tx_avail);
642
643 last_frag = i;
644
645 /* start back at beginning and unmap skb */
646 prod = txr->tx_prod;
647 tx_buf = &txr->tx_buf_ring[prod];
648 tx_buf->skb = NULL;
649 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
650 skb_headlen(skb), PCI_DMA_TODEVICE);
651 prod = NEXT_TX(prod);
652
653 /* unmap remaining mapped pages */
654 for (i = 0; i < last_frag; i++) {
655 prod = NEXT_TX(prod);
656 tx_buf = &txr->tx_buf_ring[prod];
657 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
658 skb_frag_size(&skb_shinfo(skb)->frags[i]),
659 PCI_DMA_TODEVICE);
660 }
661
662 dev_kfree_skb_any(skb);
663 return NETDEV_TX_OK;
664 }
665
666 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
667 {
668 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
669 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
670 u16 cons = txr->tx_cons;
671 struct pci_dev *pdev = bp->pdev;
672 int i;
673 unsigned int tx_bytes = 0;
674
675 for (i = 0; i < nr_pkts; i++) {
676 struct bnxt_sw_tx_bd *tx_buf;
677 bool compl_deferred = false;
678 struct sk_buff *skb;
679 int j, last;
680
681 tx_buf = &txr->tx_buf_ring[cons];
682 cons = NEXT_TX(cons);
683 skb = tx_buf->skb;
684 tx_buf->skb = NULL;
685
686 if (tx_buf->is_push) {
687 tx_buf->is_push = 0;
688 goto next_tx_int;
689 }
690
691 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
692 skb_headlen(skb), PCI_DMA_TODEVICE);
693 last = tx_buf->nr_frags;
694
695 for (j = 0; j < last; j++) {
696 cons = NEXT_TX(cons);
697 tx_buf = &txr->tx_buf_ring[cons];
698 dma_unmap_page(
699 &pdev->dev,
700 dma_unmap_addr(tx_buf, mapping),
701 skb_frag_size(&skb_shinfo(skb)->frags[j]),
702 PCI_DMA_TODEVICE);
703 }
704 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
705 if (bp->flags & BNXT_FLAG_CHIP_P5) {
706 if (!bnxt_get_tx_ts_p5(bp, skb))
707 compl_deferred = true;
708 else
709 atomic_inc(&bp->ptp_cfg->tx_avail);
710 }
711 }
712
713 next_tx_int:
714 cons = NEXT_TX(cons);
715
716 tx_bytes += skb->len;
717 if (!compl_deferred)
718 dev_kfree_skb_any(skb);
719 }
720
721 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
722 txr->tx_cons = cons;
723
724 /* Need to make the tx_cons update visible to bnxt_start_xmit()
725 * before checking for netif_tx_queue_stopped(). Without the
726 * memory barrier, there is a small possibility that bnxt_start_xmit()
727 * will miss it and cause the queue to be stopped forever.
728 */
729 smp_mb();
730
731 if (unlikely(netif_tx_queue_stopped(txq)) &&
732 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
733 __netif_tx_lock(txq, smp_processor_id());
734 if (netif_tx_queue_stopped(txq) &&
735 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
736 txr->dev_state != BNXT_DEV_STATE_CLOSING)
737 netif_tx_wake_queue(txq);
738 __netif_tx_unlock(txq);
739 }
740 }
741
742 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
743 struct bnxt_rx_ring_info *rxr,
744 gfp_t gfp)
745 {
746 struct device *dev = &bp->pdev->dev;
747 struct page *page;
748
749 page = page_pool_dev_alloc_pages(rxr->page_pool);
750 if (!page)
751 return NULL;
752
753 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
754 DMA_ATTR_WEAK_ORDERING);
755 if (dma_mapping_error(dev, *mapping)) {
756 page_pool_recycle_direct(rxr->page_pool, page);
757 return NULL;
758 }
759 *mapping += bp->rx_dma_offset;
760 return page;
761 }
762
763 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
764 gfp_t gfp)
765 {
766 u8 *data;
767 struct pci_dev *pdev = bp->pdev;
768
769 data = kmalloc(bp->rx_buf_size, gfp);
770 if (!data)
771 return NULL;
772
773 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
774 bp->rx_buf_use_size, bp->rx_dir,
775 DMA_ATTR_WEAK_ORDERING);
776
777 if (dma_mapping_error(&pdev->dev, *mapping)) {
778 kfree(data);
779 data = NULL;
780 }
781 return data;
782 }
783
784 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
785 u16 prod, gfp_t gfp)
786 {
787 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
788 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
789 dma_addr_t mapping;
790
791 if (BNXT_RX_PAGE_MODE(bp)) {
792 struct page *page =
793 __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
794
795 if (!page)
796 return -ENOMEM;
797
798 rx_buf->data = page;
799 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
800 } else {
801 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
802
803 if (!data)
804 return -ENOMEM;
805
806 rx_buf->data = data;
807 rx_buf->data_ptr = data + bp->rx_offset;
808 }
809 rx_buf->mapping = mapping;
810
811 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
812 return 0;
813 }
814
815 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
816 {
817 u16 prod = rxr->rx_prod;
818 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
819 struct rx_bd *cons_bd, *prod_bd;
820
821 prod_rx_buf = &rxr->rx_buf_ring[prod];
822 cons_rx_buf = &rxr->rx_buf_ring[cons];
823
824 prod_rx_buf->data = data;
825 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
826
827 prod_rx_buf->mapping = cons_rx_buf->mapping;
828
829 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
830 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
831
832 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
833 }
834
835 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
836 {
837 u16 next, max = rxr->rx_agg_bmap_size;
838
839 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
840 if (next >= max)
841 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
842 return next;
843 }
844
845 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
846 struct bnxt_rx_ring_info *rxr,
847 u16 prod, gfp_t gfp)
848 {
849 struct rx_bd *rxbd =
850 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
851 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
852 struct pci_dev *pdev = bp->pdev;
853 struct page *page;
854 dma_addr_t mapping;
855 u16 sw_prod = rxr->rx_sw_agg_prod;
856 unsigned int offset = 0;
857
858 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
859 page = rxr->rx_page;
860 if (!page) {
861 page = alloc_page(gfp);
862 if (!page)
863 return -ENOMEM;
864 rxr->rx_page = page;
865 rxr->rx_page_offset = 0;
866 }
867 offset = rxr->rx_page_offset;
868 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
869 if (rxr->rx_page_offset == PAGE_SIZE)
870 rxr->rx_page = NULL;
871 else
872 get_page(page);
873 } else {
874 page = alloc_page(gfp);
875 if (!page)
876 return -ENOMEM;
877 }
878
879 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
880 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
881 DMA_ATTR_WEAK_ORDERING);
882 if (dma_mapping_error(&pdev->dev, mapping)) {
883 __free_page(page);
884 return -EIO;
885 }
886
887 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
888 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
889
890 __set_bit(sw_prod, rxr->rx_agg_bmap);
891 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
892 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
893
894 rx_agg_buf->page = page;
895 rx_agg_buf->offset = offset;
896 rx_agg_buf->mapping = mapping;
897 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
898 rxbd->rx_bd_opaque = sw_prod;
899 return 0;
900 }
901
902 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
903 struct bnxt_cp_ring_info *cpr,
904 u16 cp_cons, u16 curr)
905 {
906 struct rx_agg_cmp *agg;
907
908 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
909 agg = (struct rx_agg_cmp *)
910 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
911 return agg;
912 }
913
914 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
915 struct bnxt_rx_ring_info *rxr,
916 u16 agg_id, u16 curr)
917 {
918 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
919
920 return &tpa_info->agg_arr[curr];
921 }
922
923 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
924 u16 start, u32 agg_bufs, bool tpa)
925 {
926 struct bnxt_napi *bnapi = cpr->bnapi;
927 struct bnxt *bp = bnapi->bp;
928 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
929 u16 prod = rxr->rx_agg_prod;
930 u16 sw_prod = rxr->rx_sw_agg_prod;
931 bool p5_tpa = false;
932 u32 i;
933
934 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
935 p5_tpa = true;
936
937 for (i = 0; i < agg_bufs; i++) {
938 u16 cons;
939 struct rx_agg_cmp *agg;
940 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
941 struct rx_bd *prod_bd;
942 struct page *page;
943
944 if (p5_tpa)
945 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
946 else
947 agg = bnxt_get_agg(bp, cpr, idx, start + i);
948 cons = agg->rx_agg_cmp_opaque;
949 __clear_bit(cons, rxr->rx_agg_bmap);
950
951 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
952 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
953
954 __set_bit(sw_prod, rxr->rx_agg_bmap);
955 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
956 cons_rx_buf = &rxr->rx_agg_ring[cons];
957
958 /* It is possible for sw_prod to be equal to cons, so
959 * set cons_rx_buf->page to NULL first.
960 */
961 page = cons_rx_buf->page;
962 cons_rx_buf->page = NULL;
963 prod_rx_buf->page = page;
964 prod_rx_buf->offset = cons_rx_buf->offset;
965
966 prod_rx_buf->mapping = cons_rx_buf->mapping;
967
968 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
969
970 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
971 prod_bd->rx_bd_opaque = sw_prod;
972
973 prod = NEXT_RX_AGG(prod);
974 sw_prod = NEXT_RX_AGG(sw_prod);
975 }
976 rxr->rx_agg_prod = prod;
977 rxr->rx_sw_agg_prod = sw_prod;
978 }
979
980 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
981 struct bnxt_rx_ring_info *rxr,
982 u16 cons, void *data, u8 *data_ptr,
983 dma_addr_t dma_addr,
984 unsigned int offset_and_len)
985 {
986 unsigned int payload = offset_and_len >> 16;
987 unsigned int len = offset_and_len & 0xffff;
988 skb_frag_t *frag;
989 struct page *page = data;
990 u16 prod = rxr->rx_prod;
991 struct sk_buff *skb;
992 int off, err;
993
994 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
995 if (unlikely(err)) {
996 bnxt_reuse_rx_data(rxr, cons, data);
997 return NULL;
998 }
999 dma_addr -= bp->rx_dma_offset;
1000 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
1001 DMA_ATTR_WEAK_ORDERING);
1002 page_pool_release_page(rxr->page_pool, page);
1003
1004 if (unlikely(!payload))
1005 payload = eth_get_headlen(bp->dev, data_ptr, len);
1006
1007 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1008 if (!skb) {
1009 __free_page(page);
1010 return NULL;
1011 }
1012
1013 off = (void *)data_ptr - page_address(page);
1014 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
1015 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1016 payload + NET_IP_ALIGN);
1017
1018 frag = &skb_shinfo(skb)->frags[0];
1019 skb_frag_size_sub(frag, payload);
1020 skb_frag_off_add(frag, payload);
1021 skb->data_len -= payload;
1022 skb->tail += payload;
1023
1024 return skb;
1025 }
1026
1027 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1028 struct bnxt_rx_ring_info *rxr, u16 cons,
1029 void *data, u8 *data_ptr,
1030 dma_addr_t dma_addr,
1031 unsigned int offset_and_len)
1032 {
1033 u16 prod = rxr->rx_prod;
1034 struct sk_buff *skb;
1035 int err;
1036
1037 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1038 if (unlikely(err)) {
1039 bnxt_reuse_rx_data(rxr, cons, data);
1040 return NULL;
1041 }
1042
1043 skb = build_skb(data, 0);
1044 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1045 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1046 if (!skb) {
1047 kfree(data);
1048 return NULL;
1049 }
1050
1051 skb_reserve(skb, bp->rx_offset);
1052 skb_put(skb, offset_and_len & 0xffff);
1053 return skb;
1054 }
1055
1056 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
1057 struct bnxt_cp_ring_info *cpr,
1058 struct sk_buff *skb, u16 idx,
1059 u32 agg_bufs, bool tpa)
1060 {
1061 struct bnxt_napi *bnapi = cpr->bnapi;
1062 struct pci_dev *pdev = bp->pdev;
1063 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1064 u16 prod = rxr->rx_agg_prod;
1065 bool p5_tpa = false;
1066 u32 i;
1067
1068 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1069 p5_tpa = true;
1070
1071 for (i = 0; i < agg_bufs; i++) {
1072 u16 cons, frag_len;
1073 struct rx_agg_cmp *agg;
1074 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1075 struct page *page;
1076 dma_addr_t mapping;
1077
1078 if (p5_tpa)
1079 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1080 else
1081 agg = bnxt_get_agg(bp, cpr, idx, i);
1082 cons = agg->rx_agg_cmp_opaque;
1083 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1084 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1085
1086 cons_rx_buf = &rxr->rx_agg_ring[cons];
1087 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1088 cons_rx_buf->offset, frag_len);
1089 __clear_bit(cons, rxr->rx_agg_bmap);
1090
1091 /* It is possible for bnxt_alloc_rx_page() to allocate
1092 * a sw_prod index that equals the cons index, so we
1093 * need to clear the cons entry now.
1094 */
1095 mapping = cons_rx_buf->mapping;
1096 page = cons_rx_buf->page;
1097 cons_rx_buf->page = NULL;
1098
1099 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1100 struct skb_shared_info *shinfo;
1101 unsigned int nr_frags;
1102
1103 shinfo = skb_shinfo(skb);
1104 nr_frags = --shinfo->nr_frags;
1105 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1106
1107 dev_kfree_skb(skb);
1108
1109 cons_rx_buf->page = page;
1110
1111 /* Update prod since possibly some pages have been
1112 * allocated already.
1113 */
1114 rxr->rx_agg_prod = prod;
1115 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1116 return NULL;
1117 }
1118
1119 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1120 PCI_DMA_FROMDEVICE,
1121 DMA_ATTR_WEAK_ORDERING);
1122
1123 skb->data_len += frag_len;
1124 skb->len += frag_len;
1125 skb->truesize += PAGE_SIZE;
1126
1127 prod = NEXT_RX_AGG(prod);
1128 }
1129 rxr->rx_agg_prod = prod;
1130 return skb;
1131 }
1132
1133 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1134 u8 agg_bufs, u32 *raw_cons)
1135 {
1136 u16 last;
1137 struct rx_agg_cmp *agg;
1138
1139 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1140 last = RING_CMP(*raw_cons);
1141 agg = (struct rx_agg_cmp *)
1142 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1143 return RX_AGG_CMP_VALID(agg, *raw_cons);
1144 }
1145
1146 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1147 unsigned int len,
1148 dma_addr_t mapping)
1149 {
1150 struct bnxt *bp = bnapi->bp;
1151 struct pci_dev *pdev = bp->pdev;
1152 struct sk_buff *skb;
1153
1154 skb = napi_alloc_skb(&bnapi->napi, len);
1155 if (!skb)
1156 return NULL;
1157
1158 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1159 bp->rx_dir);
1160
1161 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1162 len + NET_IP_ALIGN);
1163
1164 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1165 bp->rx_dir);
1166
1167 skb_put(skb, len);
1168 return skb;
1169 }
1170
1171 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1172 u32 *raw_cons, void *cmp)
1173 {
1174 struct rx_cmp *rxcmp = cmp;
1175 u32 tmp_raw_cons = *raw_cons;
1176 u8 cmp_type, agg_bufs = 0;
1177
1178 cmp_type = RX_CMP_TYPE(rxcmp);
1179
1180 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1181 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1182 RX_CMP_AGG_BUFS) >>
1183 RX_CMP_AGG_BUFS_SHIFT;
1184 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1185 struct rx_tpa_end_cmp *tpa_end = cmp;
1186
1187 if (bp->flags & BNXT_FLAG_CHIP_P5)
1188 return 0;
1189
1190 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1191 }
1192
1193 if (agg_bufs) {
1194 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1195 return -EBUSY;
1196 }
1197 *raw_cons = tmp_raw_cons;
1198 return 0;
1199 }
1200
1201 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1202 {
1203 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1204 return;
1205
1206 if (BNXT_PF(bp))
1207 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1208 else
1209 schedule_delayed_work(&bp->fw_reset_task, delay);
1210 }
1211
1212 static void bnxt_queue_sp_work(struct bnxt *bp)
1213 {
1214 if (BNXT_PF(bp))
1215 queue_work(bnxt_pf_wq, &bp->sp_task);
1216 else
1217 schedule_work(&bp->sp_task);
1218 }
1219
1220 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1221 {
1222 if (!rxr->bnapi->in_reset) {
1223 rxr->bnapi->in_reset = true;
1224 if (bp->flags & BNXT_FLAG_CHIP_P5)
1225 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1226 else
1227 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
1228 bnxt_queue_sp_work(bp);
1229 }
1230 rxr->rx_next_cons = 0xffff;
1231 }
1232
1233 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1234 {
1235 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1236 u16 idx = agg_id & MAX_TPA_P5_MASK;
1237
1238 if (test_bit(idx, map->agg_idx_bmap))
1239 idx = find_first_zero_bit(map->agg_idx_bmap,
1240 BNXT_AGG_IDX_BMAP_SIZE);
1241 __set_bit(idx, map->agg_idx_bmap);
1242 map->agg_id_tbl[agg_id] = idx;
1243 return idx;
1244 }
1245
1246 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1247 {
1248 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1249
1250 __clear_bit(idx, map->agg_idx_bmap);
1251 }
1252
1253 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1254 {
1255 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1256
1257 return map->agg_id_tbl[agg_id];
1258 }
1259
1260 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1261 struct rx_tpa_start_cmp *tpa_start,
1262 struct rx_tpa_start_cmp_ext *tpa_start1)
1263 {
1264 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1265 struct bnxt_tpa_info *tpa_info;
1266 u16 cons, prod, agg_id;
1267 struct rx_bd *prod_bd;
1268 dma_addr_t mapping;
1269
1270 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1271 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1272 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1273 } else {
1274 agg_id = TPA_START_AGG_ID(tpa_start);
1275 }
1276 cons = tpa_start->rx_tpa_start_cmp_opaque;
1277 prod = rxr->rx_prod;
1278 cons_rx_buf = &rxr->rx_buf_ring[cons];
1279 prod_rx_buf = &rxr->rx_buf_ring[prod];
1280 tpa_info = &rxr->rx_tpa[agg_id];
1281
1282 if (unlikely(cons != rxr->rx_next_cons ||
1283 TPA_START_ERROR(tpa_start))) {
1284 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1285 cons, rxr->rx_next_cons,
1286 TPA_START_ERROR_CODE(tpa_start1));
1287 bnxt_sched_reset(bp, rxr);
1288 return;
1289 }
1290 /* Store cfa_code in tpa_info to use in tpa_end
1291 * completion processing.
1292 */
1293 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1294 prod_rx_buf->data = tpa_info->data;
1295 prod_rx_buf->data_ptr = tpa_info->data_ptr;
1296
1297 mapping = tpa_info->mapping;
1298 prod_rx_buf->mapping = mapping;
1299
1300 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1301
1302 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1303
1304 tpa_info->data = cons_rx_buf->data;
1305 tpa_info->data_ptr = cons_rx_buf->data_ptr;
1306 cons_rx_buf->data = NULL;
1307 tpa_info->mapping = cons_rx_buf->mapping;
1308
1309 tpa_info->len =
1310 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1311 RX_TPA_START_CMP_LEN_SHIFT;
1312 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1313 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1314
1315 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1316 tpa_info->gso_type = SKB_GSO_TCPV4;
1317 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1318 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1319 tpa_info->gso_type = SKB_GSO_TCPV6;
1320 tpa_info->rss_hash =
1321 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1322 } else {
1323 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1324 tpa_info->gso_type = 0;
1325 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1326 }
1327 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1328 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1329 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1330 tpa_info->agg_count = 0;
1331
1332 rxr->rx_prod = NEXT_RX(prod);
1333 cons = NEXT_RX(cons);
1334 rxr->rx_next_cons = NEXT_RX(cons);
1335 cons_rx_buf = &rxr->rx_buf_ring[cons];
1336
1337 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1338 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1339 cons_rx_buf->data = NULL;
1340 }
1341
1342 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1343 {
1344 if (agg_bufs)
1345 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1346 }
1347
1348 #ifdef CONFIG_INET
1349 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1350 {
1351 struct udphdr *uh = NULL;
1352
1353 if (ip_proto == htons(ETH_P_IP)) {
1354 struct iphdr *iph = (struct iphdr *)skb->data;
1355
1356 if (iph->protocol == IPPROTO_UDP)
1357 uh = (struct udphdr *)(iph + 1);
1358 } else {
1359 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1360
1361 if (iph->nexthdr == IPPROTO_UDP)
1362 uh = (struct udphdr *)(iph + 1);
1363 }
1364 if (uh) {
1365 if (uh->check)
1366 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1367 else
1368 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1369 }
1370 }
1371 #endif
1372
1373 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1374 int payload_off, int tcp_ts,
1375 struct sk_buff *skb)
1376 {
1377 #ifdef CONFIG_INET
1378 struct tcphdr *th;
1379 int len, nw_off;
1380 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1381 u32 hdr_info = tpa_info->hdr_info;
1382 bool loopback = false;
1383
1384 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1385 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1386 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1387
1388 /* If the packet is an internal loopback packet, the offsets will
1389 * have an extra 4 bytes.
1390 */
1391 if (inner_mac_off == 4) {
1392 loopback = true;
1393 } else if (inner_mac_off > 4) {
1394 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1395 ETH_HLEN - 2));
1396
1397 /* We only support inner iPv4/ipv6. If we don't see the
1398 * correct protocol ID, it must be a loopback packet where
1399 * the offsets are off by 4.
1400 */
1401 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1402 loopback = true;
1403 }
1404 if (loopback) {
1405 /* internal loopback packet, subtract all offsets by 4 */
1406 inner_ip_off -= 4;
1407 inner_mac_off -= 4;
1408 outer_ip_off -= 4;
1409 }
1410
1411 nw_off = inner_ip_off - ETH_HLEN;
1412 skb_set_network_header(skb, nw_off);
1413 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1414 struct ipv6hdr *iph = ipv6_hdr(skb);
1415
1416 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1417 len = skb->len - skb_transport_offset(skb);
1418 th = tcp_hdr(skb);
1419 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1420 } else {
1421 struct iphdr *iph = ip_hdr(skb);
1422
1423 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1424 len = skb->len - skb_transport_offset(skb);
1425 th = tcp_hdr(skb);
1426 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1427 }
1428
1429 if (inner_mac_off) { /* tunnel */
1430 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1431 ETH_HLEN - 2));
1432
1433 bnxt_gro_tunnel(skb, proto);
1434 }
1435 #endif
1436 return skb;
1437 }
1438
1439 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1440 int payload_off, int tcp_ts,
1441 struct sk_buff *skb)
1442 {
1443 #ifdef CONFIG_INET
1444 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1445 u32 hdr_info = tpa_info->hdr_info;
1446 int iphdr_len, nw_off;
1447
1448 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1449 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1450 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1451
1452 nw_off = inner_ip_off - ETH_HLEN;
1453 skb_set_network_header(skb, nw_off);
1454 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1455 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1456 skb_set_transport_header(skb, nw_off + iphdr_len);
1457
1458 if (inner_mac_off) { /* tunnel */
1459 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1460 ETH_HLEN - 2));
1461
1462 bnxt_gro_tunnel(skb, proto);
1463 }
1464 #endif
1465 return skb;
1466 }
1467
1468 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1469 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1470
1471 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1472 int payload_off, int tcp_ts,
1473 struct sk_buff *skb)
1474 {
1475 #ifdef CONFIG_INET
1476 struct tcphdr *th;
1477 int len, nw_off, tcp_opt_len = 0;
1478
1479 if (tcp_ts)
1480 tcp_opt_len = 12;
1481
1482 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1483 struct iphdr *iph;
1484
1485 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1486 ETH_HLEN;
1487 skb_set_network_header(skb, nw_off);
1488 iph = ip_hdr(skb);
1489 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1490 len = skb->len - skb_transport_offset(skb);
1491 th = tcp_hdr(skb);
1492 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1493 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1494 struct ipv6hdr *iph;
1495
1496 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1497 ETH_HLEN;
1498 skb_set_network_header(skb, nw_off);
1499 iph = ipv6_hdr(skb);
1500 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1501 len = skb->len - skb_transport_offset(skb);
1502 th = tcp_hdr(skb);
1503 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1504 } else {
1505 dev_kfree_skb_any(skb);
1506 return NULL;
1507 }
1508
1509 if (nw_off) /* tunnel */
1510 bnxt_gro_tunnel(skb, skb->protocol);
1511 #endif
1512 return skb;
1513 }
1514
1515 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1516 struct bnxt_tpa_info *tpa_info,
1517 struct rx_tpa_end_cmp *tpa_end,
1518 struct rx_tpa_end_cmp_ext *tpa_end1,
1519 struct sk_buff *skb)
1520 {
1521 #ifdef CONFIG_INET
1522 int payload_off;
1523 u16 segs;
1524
1525 segs = TPA_END_TPA_SEGS(tpa_end);
1526 if (segs == 1)
1527 return skb;
1528
1529 NAPI_GRO_CB(skb)->count = segs;
1530 skb_shinfo(skb)->gso_size =
1531 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1532 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1533 if (bp->flags & BNXT_FLAG_CHIP_P5)
1534 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1535 else
1536 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1537 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1538 if (likely(skb))
1539 tcp_gro_complete(skb);
1540 #endif
1541 return skb;
1542 }
1543
1544 /* Given the cfa_code of a received packet determine which
1545 * netdev (vf-rep or PF) the packet is destined to.
1546 */
1547 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1548 {
1549 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1550
1551 /* if vf-rep dev is NULL, the must belongs to the PF */
1552 return dev ? dev : bp->dev;
1553 }
1554
1555 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1556 struct bnxt_cp_ring_info *cpr,
1557 u32 *raw_cons,
1558 struct rx_tpa_end_cmp *tpa_end,
1559 struct rx_tpa_end_cmp_ext *tpa_end1,
1560 u8 *event)
1561 {
1562 struct bnxt_napi *bnapi = cpr->bnapi;
1563 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1564 u8 *data_ptr, agg_bufs;
1565 unsigned int len;
1566 struct bnxt_tpa_info *tpa_info;
1567 dma_addr_t mapping;
1568 struct sk_buff *skb;
1569 u16 idx = 0, agg_id;
1570 void *data;
1571 bool gro;
1572
1573 if (unlikely(bnapi->in_reset)) {
1574 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1575
1576 if (rc < 0)
1577 return ERR_PTR(-EBUSY);
1578 return NULL;
1579 }
1580
1581 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1582 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1583 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1584 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1585 tpa_info = &rxr->rx_tpa[agg_id];
1586 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1587 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1588 agg_bufs, tpa_info->agg_count);
1589 agg_bufs = tpa_info->agg_count;
1590 }
1591 tpa_info->agg_count = 0;
1592 *event |= BNXT_AGG_EVENT;
1593 bnxt_free_agg_idx(rxr, agg_id);
1594 idx = agg_id;
1595 gro = !!(bp->flags & BNXT_FLAG_GRO);
1596 } else {
1597 agg_id = TPA_END_AGG_ID(tpa_end);
1598 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1599 tpa_info = &rxr->rx_tpa[agg_id];
1600 idx = RING_CMP(*raw_cons);
1601 if (agg_bufs) {
1602 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1603 return ERR_PTR(-EBUSY);
1604
1605 *event |= BNXT_AGG_EVENT;
1606 idx = NEXT_CMP(idx);
1607 }
1608 gro = !!TPA_END_GRO(tpa_end);
1609 }
1610 data = tpa_info->data;
1611 data_ptr = tpa_info->data_ptr;
1612 prefetch(data_ptr);
1613 len = tpa_info->len;
1614 mapping = tpa_info->mapping;
1615
1616 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1617 bnxt_abort_tpa(cpr, idx, agg_bufs);
1618 if (agg_bufs > MAX_SKB_FRAGS)
1619 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1620 agg_bufs, (int)MAX_SKB_FRAGS);
1621 return NULL;
1622 }
1623
1624 if (len <= bp->rx_copy_thresh) {
1625 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1626 if (!skb) {
1627 bnxt_abort_tpa(cpr, idx, agg_bufs);
1628 return NULL;
1629 }
1630 } else {
1631 u8 *new_data;
1632 dma_addr_t new_mapping;
1633
1634 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1635 if (!new_data) {
1636 bnxt_abort_tpa(cpr, idx, agg_bufs);
1637 return NULL;
1638 }
1639
1640 tpa_info->data = new_data;
1641 tpa_info->data_ptr = new_data + bp->rx_offset;
1642 tpa_info->mapping = new_mapping;
1643
1644 skb = build_skb(data, 0);
1645 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1646 bp->rx_buf_use_size, bp->rx_dir,
1647 DMA_ATTR_WEAK_ORDERING);
1648
1649 if (!skb) {
1650 kfree(data);
1651 bnxt_abort_tpa(cpr, idx, agg_bufs);
1652 return NULL;
1653 }
1654 skb_reserve(skb, bp->rx_offset);
1655 skb_put(skb, len);
1656 }
1657
1658 if (agg_bufs) {
1659 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
1660 if (!skb) {
1661 /* Page reuse already handled by bnxt_rx_pages(). */
1662 return NULL;
1663 }
1664 }
1665
1666 skb->protocol =
1667 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1668
1669 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1670 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1671
1672 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1673 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1674 __be16 vlan_proto = htons(tpa_info->metadata >>
1675 RX_CMP_FLAGS2_METADATA_TPID_SFT);
1676 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1677
1678 if (eth_type_vlan(vlan_proto)) {
1679 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1680 } else {
1681 dev_kfree_skb(skb);
1682 return NULL;
1683 }
1684 }
1685
1686 skb_checksum_none_assert(skb);
1687 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1688 skb->ip_summed = CHECKSUM_UNNECESSARY;
1689 skb->csum_level =
1690 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1691 }
1692
1693 if (gro)
1694 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1695
1696 return skb;
1697 }
1698
1699 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1700 struct rx_agg_cmp *rx_agg)
1701 {
1702 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1703 struct bnxt_tpa_info *tpa_info;
1704
1705 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1706 tpa_info = &rxr->rx_tpa[agg_id];
1707 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1708 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1709 }
1710
1711 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1712 struct sk_buff *skb)
1713 {
1714 if (skb->dev != bp->dev) {
1715 /* this packet belongs to a vf-rep */
1716 bnxt_vf_rep_rx(bp, skb);
1717 return;
1718 }
1719 skb_record_rx_queue(skb, bnapi->index);
1720 napi_gro_receive(&bnapi->napi, skb);
1721 }
1722
1723 /* returns the following:
1724 * 1 - 1 packet successfully received
1725 * 0 - successful TPA_START, packet not completed yet
1726 * -EBUSY - completion ring does not have all the agg buffers yet
1727 * -ENOMEM - packet aborted due to out of memory
1728 * -EIO - packet aborted due to hw error indicated in BD
1729 */
1730 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1731 u32 *raw_cons, u8 *event)
1732 {
1733 struct bnxt_napi *bnapi = cpr->bnapi;
1734 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1735 struct net_device *dev = bp->dev;
1736 struct rx_cmp *rxcmp;
1737 struct rx_cmp_ext *rxcmp1;
1738 u32 tmp_raw_cons = *raw_cons;
1739 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1740 struct bnxt_sw_rx_bd *rx_buf;
1741 unsigned int len;
1742 u8 *data_ptr, agg_bufs, cmp_type;
1743 dma_addr_t dma_addr;
1744 struct sk_buff *skb;
1745 u32 flags, misc;
1746 void *data;
1747 int rc = 0;
1748
1749 rxcmp = (struct rx_cmp *)
1750 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1751
1752 cmp_type = RX_CMP_TYPE(rxcmp);
1753
1754 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1755 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1756 goto next_rx_no_prod_no_len;
1757 }
1758
1759 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1760 cp_cons = RING_CMP(tmp_raw_cons);
1761 rxcmp1 = (struct rx_cmp_ext *)
1762 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1763
1764 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1765 return -EBUSY;
1766
1767 prod = rxr->rx_prod;
1768
1769 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1770 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1771 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1772
1773 *event |= BNXT_RX_EVENT;
1774 goto next_rx_no_prod_no_len;
1775
1776 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1777 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1778 (struct rx_tpa_end_cmp *)rxcmp,
1779 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1780
1781 if (IS_ERR(skb))
1782 return -EBUSY;
1783
1784 rc = -ENOMEM;
1785 if (likely(skb)) {
1786 bnxt_deliver_skb(bp, bnapi, skb);
1787 rc = 1;
1788 }
1789 *event |= BNXT_RX_EVENT;
1790 goto next_rx_no_prod_no_len;
1791 }
1792
1793 cons = rxcmp->rx_cmp_opaque;
1794 if (unlikely(cons != rxr->rx_next_cons)) {
1795 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
1796
1797 /* 0xffff is forced error, don't print it */
1798 if (rxr->rx_next_cons != 0xffff)
1799 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1800 cons, rxr->rx_next_cons);
1801 bnxt_sched_reset(bp, rxr);
1802 if (rc1)
1803 return rc1;
1804 goto next_rx_no_prod_no_len;
1805 }
1806 rx_buf = &rxr->rx_buf_ring[cons];
1807 data = rx_buf->data;
1808 data_ptr = rx_buf->data_ptr;
1809 prefetch(data_ptr);
1810
1811 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1812 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1813
1814 if (agg_bufs) {
1815 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1816 return -EBUSY;
1817
1818 cp_cons = NEXT_CMP(cp_cons);
1819 *event |= BNXT_AGG_EVENT;
1820 }
1821 *event |= BNXT_RX_EVENT;
1822
1823 rx_buf->data = NULL;
1824 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1825 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1826
1827 bnxt_reuse_rx_data(rxr, cons, data);
1828 if (agg_bufs)
1829 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1830 false);
1831
1832 rc = -EIO;
1833 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1834 bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
1835 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1836 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
1837 netdev_warn_once(bp->dev, "RX buffer error %x\n",
1838 rx_err);
1839 bnxt_sched_reset(bp, rxr);
1840 }
1841 }
1842 goto next_rx_no_len;
1843 }
1844
1845 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
1846 len = flags >> RX_CMP_LEN_SHIFT;
1847 dma_addr = rx_buf->mapping;
1848
1849 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1850 rc = 1;
1851 goto next_rx;
1852 }
1853
1854 if (len <= bp->rx_copy_thresh) {
1855 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1856 bnxt_reuse_rx_data(rxr, cons, data);
1857 if (!skb) {
1858 if (agg_bufs)
1859 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1860 agg_bufs, false);
1861 rc = -ENOMEM;
1862 goto next_rx;
1863 }
1864 } else {
1865 u32 payload;
1866
1867 if (rx_buf->data_ptr == data_ptr)
1868 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1869 else
1870 payload = 0;
1871 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1872 payload | len);
1873 if (!skb) {
1874 rc = -ENOMEM;
1875 goto next_rx;
1876 }
1877 }
1878
1879 if (agg_bufs) {
1880 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
1881 if (!skb) {
1882 rc = -ENOMEM;
1883 goto next_rx;
1884 }
1885 }
1886
1887 if (RX_CMP_HASH_VALID(rxcmp)) {
1888 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1889 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1890
1891 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1892 if (hash_type != 1 && hash_type != 3)
1893 type = PKT_HASH_TYPE_L3;
1894 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1895 }
1896
1897 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1898 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1899
1900 if ((rxcmp1->rx_cmp_flags2 &
1901 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1902 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1903 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1904 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1905 __be16 vlan_proto = htons(meta_data >>
1906 RX_CMP_FLAGS2_METADATA_TPID_SFT);
1907
1908 if (eth_type_vlan(vlan_proto)) {
1909 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1910 } else {
1911 dev_kfree_skb(skb);
1912 goto next_rx;
1913 }
1914 }
1915
1916 skb_checksum_none_assert(skb);
1917 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1918 if (dev->features & NETIF_F_RXCSUM) {
1919 skb->ip_summed = CHECKSUM_UNNECESSARY;
1920 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1921 }
1922 } else {
1923 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1924 if (dev->features & NETIF_F_RXCSUM)
1925 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
1926 }
1927 }
1928
1929 if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
1930 RX_CMP_FLAGS_ITYPE_PTP_W_TS)) {
1931 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1932 u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
1933 u64 ns, ts;
1934
1935 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
1936 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
1937
1938 spin_lock_bh(&ptp->ptp_lock);
1939 ns = timecounter_cyc2time(&ptp->tc, ts);
1940 spin_unlock_bh(&ptp->ptp_lock);
1941 memset(skb_hwtstamps(skb), 0,
1942 sizeof(*skb_hwtstamps(skb)));
1943 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
1944 }
1945 }
1946 }
1947 bnxt_deliver_skb(bp, bnapi, skb);
1948 rc = 1;
1949
1950 next_rx:
1951 cpr->rx_packets += 1;
1952 cpr->rx_bytes += len;
1953
1954 next_rx_no_len:
1955 rxr->rx_prod = NEXT_RX(prod);
1956 rxr->rx_next_cons = NEXT_RX(cons);
1957
1958 next_rx_no_prod_no_len:
1959 *raw_cons = tmp_raw_cons;
1960
1961 return rc;
1962 }
1963
1964 /* In netpoll mode, if we are using a combined completion ring, we need to
1965 * discard the rx packets and recycle the buffers.
1966 */
1967 static int bnxt_force_rx_discard(struct bnxt *bp,
1968 struct bnxt_cp_ring_info *cpr,
1969 u32 *raw_cons, u8 *event)
1970 {
1971 u32 tmp_raw_cons = *raw_cons;
1972 struct rx_cmp_ext *rxcmp1;
1973 struct rx_cmp *rxcmp;
1974 u16 cp_cons;
1975 u8 cmp_type;
1976
1977 cp_cons = RING_CMP(tmp_raw_cons);
1978 rxcmp = (struct rx_cmp *)
1979 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1980
1981 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1982 cp_cons = RING_CMP(tmp_raw_cons);
1983 rxcmp1 = (struct rx_cmp_ext *)
1984 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1985
1986 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1987 return -EBUSY;
1988
1989 cmp_type = RX_CMP_TYPE(rxcmp);
1990 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1991 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1992 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1993 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1994 struct rx_tpa_end_cmp_ext *tpa_end1;
1995
1996 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1997 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1998 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1999 }
2000 return bnxt_rx_pkt(bp, cpr, raw_cons, event);
2001 }
2002
2003 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2004 {
2005 struct bnxt_fw_health *fw_health = bp->fw_health;
2006 u32 reg = fw_health->regs[reg_idx];
2007 u32 reg_type, reg_off, val = 0;
2008
2009 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2010 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2011 switch (reg_type) {
2012 case BNXT_FW_HEALTH_REG_TYPE_CFG:
2013 pci_read_config_dword(bp->pdev, reg_off, &val);
2014 break;
2015 case BNXT_FW_HEALTH_REG_TYPE_GRC:
2016 reg_off = fw_health->mapped_regs[reg_idx];
2017 fallthrough;
2018 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2019 val = readl(bp->bar0 + reg_off);
2020 break;
2021 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2022 val = readl(bp->bar1 + reg_off);
2023 break;
2024 }
2025 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2026 val &= fw_health->fw_reset_inprog_reg_mask;
2027 return val;
2028 }
2029
2030 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2031 {
2032 int i;
2033
2034 for (i = 0; i < bp->rx_nr_rings; i++) {
2035 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2036 struct bnxt_ring_grp_info *grp_info;
2037
2038 grp_info = &bp->grp_info[grp_idx];
2039 if (grp_info->agg_fw_ring_id == ring_id)
2040 return grp_idx;
2041 }
2042 return INVALID_HW_RING_ID;
2043 }
2044
2045 #define BNXT_GET_EVENT_PORT(data) \
2046 ((data) & \
2047 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2048
2049 #define BNXT_EVENT_RING_TYPE(data2) \
2050 ((data2) & \
2051 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2052
2053 #define BNXT_EVENT_RING_TYPE_RX(data2) \
2054 (BNXT_EVENT_RING_TYPE(data2) == \
2055 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2056
2057 static int bnxt_async_event_process(struct bnxt *bp,
2058 struct hwrm_async_event_cmpl *cmpl)
2059 {
2060 u16 event_id = le16_to_cpu(cmpl->event_id);
2061 u32 data1 = le32_to_cpu(cmpl->event_data1);
2062 u32 data2 = le32_to_cpu(cmpl->event_data2);
2063
2064 /* TODO CHIMP_FW: Define event id's for link change, error etc */
2065 switch (event_id) {
2066 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2067 struct bnxt_link_info *link_info = &bp->link_info;
2068
2069 if (BNXT_VF(bp))
2070 goto async_event_process_exit;
2071
2072 /* print unsupported speed warning in forced speed mode only */
2073 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2074 (data1 & 0x20000)) {
2075 u16 fw_speed = link_info->force_link_speed;
2076 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2077
2078 if (speed != SPEED_UNKNOWN)
2079 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2080 speed);
2081 }
2082 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2083 }
2084 fallthrough;
2085 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2086 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2087 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2088 fallthrough;
2089 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2090 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2091 break;
2092 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2093 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2094 break;
2095 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2096 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2097
2098 if (BNXT_VF(bp))
2099 break;
2100
2101 if (bp->pf.port_id != port_id)
2102 break;
2103
2104 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2105 break;
2106 }
2107 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2108 if (BNXT_PF(bp))
2109 goto async_event_process_exit;
2110 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2111 break;
2112 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2113 char *fatal_str = "non-fatal";
2114
2115 if (!bp->fw_health)
2116 goto async_event_process_exit;
2117
2118 bp->fw_reset_timestamp = jiffies;
2119 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2120 if (!bp->fw_reset_min_dsecs)
2121 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2122 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2123 if (!bp->fw_reset_max_dsecs)
2124 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2125 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2126 fatal_str = "fatal";
2127 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2128 }
2129 netif_warn(bp, hw, bp->dev,
2130 "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2131 fatal_str, data1, data2,
2132 bp->fw_reset_min_dsecs * 100,
2133 bp->fw_reset_max_dsecs * 100);
2134 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2135 break;
2136 }
2137 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2138 struct bnxt_fw_health *fw_health = bp->fw_health;
2139
2140 if (!fw_health)
2141 goto async_event_process_exit;
2142
2143 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
2144 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2145 if (!fw_health->enabled) {
2146 netif_info(bp, drv, bp->dev,
2147 "Error recovery info: error recovery[0]\n");
2148 break;
2149 }
2150 fw_health->tmr_multiplier =
2151 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2152 bp->current_interval * 10);
2153 fw_health->tmr_counter = fw_health->tmr_multiplier;
2154 fw_health->last_fw_heartbeat =
2155 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2156 fw_health->last_fw_reset_cnt =
2157 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2158 netif_info(bp, drv, bp->dev,
2159 "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
2160 fw_health->master, fw_health->last_fw_reset_cnt,
2161 bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
2162 goto async_event_process_exit;
2163 }
2164 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2165 netif_notice(bp, hw, bp->dev,
2166 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2167 data1, data2);
2168 goto async_event_process_exit;
2169 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2170 struct bnxt_rx_ring_info *rxr;
2171 u16 grp_idx;
2172
2173 if (bp->flags & BNXT_FLAG_CHIP_P5)
2174 goto async_event_process_exit;
2175
2176 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2177 BNXT_EVENT_RING_TYPE(data2), data1);
2178 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2179 goto async_event_process_exit;
2180
2181 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2182 if (grp_idx == INVALID_HW_RING_ID) {
2183 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2184 data1);
2185 goto async_event_process_exit;
2186 }
2187 rxr = bp->bnapi[grp_idx]->rx_ring;
2188 bnxt_sched_reset(bp, rxr);
2189 goto async_event_process_exit;
2190 }
2191 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2192 struct bnxt_fw_health *fw_health = bp->fw_health;
2193
2194 netif_notice(bp, hw, bp->dev,
2195 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2196 data1, data2);
2197 if (fw_health) {
2198 fw_health->echo_req_data1 = data1;
2199 fw_health->echo_req_data2 = data2;
2200 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2201 break;
2202 }
2203 goto async_event_process_exit;
2204 }
2205 default:
2206 goto async_event_process_exit;
2207 }
2208 bnxt_queue_sp_work(bp);
2209 async_event_process_exit:
2210 bnxt_ulp_async_events(bp, cmpl);
2211 return 0;
2212 }
2213
2214 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2215 {
2216 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2217 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2218 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2219 (struct hwrm_fwd_req_cmpl *)txcmp;
2220
2221 switch (cmpl_type) {
2222 case CMPL_BASE_TYPE_HWRM_DONE:
2223 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2224 if (seq_id == bp->hwrm_intr_seq_id)
2225 bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
2226 else
2227 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
2228 break;
2229
2230 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2231 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2232
2233 if ((vf_id < bp->pf.first_vf_id) ||
2234 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2235 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2236 vf_id);
2237 return -EINVAL;
2238 }
2239
2240 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2241 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2242 bnxt_queue_sp_work(bp);
2243 break;
2244
2245 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2246 bnxt_async_event_process(bp,
2247 (struct hwrm_async_event_cmpl *)txcmp);
2248 break;
2249
2250 default:
2251 break;
2252 }
2253
2254 return 0;
2255 }
2256
2257 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2258 {
2259 struct bnxt_napi *bnapi = dev_instance;
2260 struct bnxt *bp = bnapi->bp;
2261 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2262 u32 cons = RING_CMP(cpr->cp_raw_cons);
2263
2264 cpr->event_ctr++;
2265 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2266 napi_schedule(&bnapi->napi);
2267 return IRQ_HANDLED;
2268 }
2269
2270 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2271 {
2272 u32 raw_cons = cpr->cp_raw_cons;
2273 u16 cons = RING_CMP(raw_cons);
2274 struct tx_cmp *txcmp;
2275
2276 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2277
2278 return TX_CMP_VALID(txcmp, raw_cons);
2279 }
2280
2281 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2282 {
2283 struct bnxt_napi *bnapi = dev_instance;
2284 struct bnxt *bp = bnapi->bp;
2285 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2286 u32 cons = RING_CMP(cpr->cp_raw_cons);
2287 u32 int_status;
2288
2289 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2290
2291 if (!bnxt_has_work(bp, cpr)) {
2292 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2293 /* return if erroneous interrupt */
2294 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2295 return IRQ_NONE;
2296 }
2297
2298 /* disable ring IRQ */
2299 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2300
2301 /* Return here if interrupt is shared and is disabled. */
2302 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2303 return IRQ_HANDLED;
2304
2305 napi_schedule(&bnapi->napi);
2306 return IRQ_HANDLED;
2307 }
2308
2309 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2310 int budget)
2311 {
2312 struct bnxt_napi *bnapi = cpr->bnapi;
2313 u32 raw_cons = cpr->cp_raw_cons;
2314 u32 cons;
2315 int tx_pkts = 0;
2316 int rx_pkts = 0;
2317 u8 event = 0;
2318 struct tx_cmp *txcmp;
2319
2320 cpr->has_more_work = 0;
2321 cpr->had_work_done = 1;
2322 while (1) {
2323 int rc;
2324
2325 cons = RING_CMP(raw_cons);
2326 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2327
2328 if (!TX_CMP_VALID(txcmp, raw_cons))
2329 break;
2330
2331 /* The valid test of the entry must be done first before
2332 * reading any further.
2333 */
2334 dma_rmb();
2335 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2336 tx_pkts++;
2337 /* return full budget so NAPI will complete. */
2338 if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
2339 rx_pkts = budget;
2340 raw_cons = NEXT_RAW_CMP(raw_cons);
2341 if (budget)
2342 cpr->has_more_work = 1;
2343 break;
2344 }
2345 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2346 if (likely(budget))
2347 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2348 else
2349 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2350 &event);
2351 if (likely(rc >= 0))
2352 rx_pkts += rc;
2353 /* Increment rx_pkts when rc is -ENOMEM to count towards
2354 * the NAPI budget. Otherwise, we may potentially loop
2355 * here forever if we consistently cannot allocate
2356 * buffers.
2357 */
2358 else if (rc == -ENOMEM && budget)
2359 rx_pkts++;
2360 else if (rc == -EBUSY) /* partial completion */
2361 break;
2362 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2363 CMPL_BASE_TYPE_HWRM_DONE) ||
2364 (TX_CMP_TYPE(txcmp) ==
2365 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2366 (TX_CMP_TYPE(txcmp) ==
2367 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2368 bnxt_hwrm_handler(bp, txcmp);
2369 }
2370 raw_cons = NEXT_RAW_CMP(raw_cons);
2371
2372 if (rx_pkts && rx_pkts == budget) {
2373 cpr->has_more_work = 1;
2374 break;
2375 }
2376 }
2377
2378 if (event & BNXT_REDIRECT_EVENT)
2379 xdp_do_flush_map();
2380
2381 if (event & BNXT_TX_EVENT) {
2382 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2383 u16 prod = txr->tx_prod;
2384
2385 /* Sync BD data before updating doorbell */
2386 wmb();
2387
2388 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2389 }
2390
2391 cpr->cp_raw_cons = raw_cons;
2392 bnapi->tx_pkts += tx_pkts;
2393 bnapi->events |= event;
2394 return rx_pkts;
2395 }
2396
2397 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2398 {
2399 if (bnapi->tx_pkts) {
2400 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2401 bnapi->tx_pkts = 0;
2402 }
2403
2404 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
2405 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2406
2407 if (bnapi->events & BNXT_AGG_EVENT)
2408 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2409 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2410 }
2411 bnapi->events = 0;
2412 }
2413
2414 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2415 int budget)
2416 {
2417 struct bnxt_napi *bnapi = cpr->bnapi;
2418 int rx_pkts;
2419
2420 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2421
2422 /* ACK completion ring before freeing tx ring and producing new
2423 * buffers in rx/agg rings to prevent overflowing the completion
2424 * ring.
2425 */
2426 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2427
2428 __bnxt_poll_work_done(bp, bnapi);
2429 return rx_pkts;
2430 }
2431
2432 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2433 {
2434 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2435 struct bnxt *bp = bnapi->bp;
2436 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2437 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2438 struct tx_cmp *txcmp;
2439 struct rx_cmp_ext *rxcmp1;
2440 u32 cp_cons, tmp_raw_cons;
2441 u32 raw_cons = cpr->cp_raw_cons;
2442 u32 rx_pkts = 0;
2443 u8 event = 0;
2444
2445 while (1) {
2446 int rc;
2447
2448 cp_cons = RING_CMP(raw_cons);
2449 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2450
2451 if (!TX_CMP_VALID(txcmp, raw_cons))
2452 break;
2453
2454 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2455 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2456 cp_cons = RING_CMP(tmp_raw_cons);
2457 rxcmp1 = (struct rx_cmp_ext *)
2458 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2459
2460 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2461 break;
2462
2463 /* force an error to recycle the buffer */
2464 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2465 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2466
2467 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2468 if (likely(rc == -EIO) && budget)
2469 rx_pkts++;
2470 else if (rc == -EBUSY) /* partial completion */
2471 break;
2472 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2473 CMPL_BASE_TYPE_HWRM_DONE)) {
2474 bnxt_hwrm_handler(bp, txcmp);
2475 } else {
2476 netdev_err(bp->dev,
2477 "Invalid completion received on special ring\n");
2478 }
2479 raw_cons = NEXT_RAW_CMP(raw_cons);
2480
2481 if (rx_pkts == budget)
2482 break;
2483 }
2484
2485 cpr->cp_raw_cons = raw_cons;
2486 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2487 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2488
2489 if (event & BNXT_AGG_EVENT)
2490 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2491
2492 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2493 napi_complete_done(napi, rx_pkts);
2494 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2495 }
2496 return rx_pkts;
2497 }
2498
2499 static int bnxt_poll(struct napi_struct *napi, int budget)
2500 {
2501 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2502 struct bnxt *bp = bnapi->bp;
2503 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2504 int work_done = 0;
2505
2506 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2507 napi_complete(napi);
2508 return 0;
2509 }
2510 while (1) {
2511 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2512
2513 if (work_done >= budget) {
2514 if (!budget)
2515 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2516 break;
2517 }
2518
2519 if (!bnxt_has_work(bp, cpr)) {
2520 if (napi_complete_done(napi, work_done))
2521 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2522 break;
2523 }
2524 }
2525 if (bp->flags & BNXT_FLAG_DIM) {
2526 struct dim_sample dim_sample = {};
2527
2528 dim_update_sample(cpr->event_ctr,
2529 cpr->rx_packets,
2530 cpr->rx_bytes,
2531 &dim_sample);
2532 net_dim(&cpr->dim, dim_sample);
2533 }
2534 return work_done;
2535 }
2536
2537 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2538 {
2539 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2540 int i, work_done = 0;
2541
2542 for (i = 0; i < 2; i++) {
2543 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2544
2545 if (cpr2) {
2546 work_done += __bnxt_poll_work(bp, cpr2,
2547 budget - work_done);
2548 cpr->has_more_work |= cpr2->has_more_work;
2549 }
2550 }
2551 return work_done;
2552 }
2553
2554 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2555 u64 dbr_type)
2556 {
2557 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2558 int i;
2559
2560 for (i = 0; i < 2; i++) {
2561 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2562 struct bnxt_db_info *db;
2563
2564 if (cpr2 && cpr2->had_work_done) {
2565 db = &cpr2->cp_db;
2566 writeq(db->db_key64 | dbr_type |
2567 RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2568 cpr2->had_work_done = 0;
2569 }
2570 }
2571 __bnxt_poll_work_done(bp, bnapi);
2572 }
2573
2574 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2575 {
2576 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2577 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2578 u32 raw_cons = cpr->cp_raw_cons;
2579 struct bnxt *bp = bnapi->bp;
2580 struct nqe_cn *nqcmp;
2581 int work_done = 0;
2582 u32 cons;
2583
2584 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2585 napi_complete(napi);
2586 return 0;
2587 }
2588 if (cpr->has_more_work) {
2589 cpr->has_more_work = 0;
2590 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2591 }
2592 while (1) {
2593 cons = RING_CMP(raw_cons);
2594 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2595
2596 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2597 if (cpr->has_more_work)
2598 break;
2599
2600 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
2601 cpr->cp_raw_cons = raw_cons;
2602 if (napi_complete_done(napi, work_done))
2603 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2604 cpr->cp_raw_cons);
2605 return work_done;
2606 }
2607
2608 /* The valid test of the entry must be done first before
2609 * reading any further.
2610 */
2611 dma_rmb();
2612
2613 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2614 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2615 struct bnxt_cp_ring_info *cpr2;
2616
2617 cpr2 = cpr->cp_ring_arr[idx];
2618 work_done += __bnxt_poll_work(bp, cpr2,
2619 budget - work_done);
2620 cpr->has_more_work |= cpr2->has_more_work;
2621 } else {
2622 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2623 }
2624 raw_cons = NEXT_RAW_CMP(raw_cons);
2625 }
2626 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
2627 if (raw_cons != cpr->cp_raw_cons) {
2628 cpr->cp_raw_cons = raw_cons;
2629 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2630 }
2631 return work_done;
2632 }
2633
2634 static void bnxt_free_tx_skbs(struct bnxt *bp)
2635 {
2636 int i, max_idx;
2637 struct pci_dev *pdev = bp->pdev;
2638
2639 if (!bp->tx_ring)
2640 return;
2641
2642 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2643 for (i = 0; i < bp->tx_nr_rings; i++) {
2644 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2645 int j;
2646
2647 for (j = 0; j < max_idx;) {
2648 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2649 struct sk_buff *skb;
2650 int k, last;
2651
2652 if (i < bp->tx_nr_rings_xdp &&
2653 tx_buf->action == XDP_REDIRECT) {
2654 dma_unmap_single(&pdev->dev,
2655 dma_unmap_addr(tx_buf, mapping),
2656 dma_unmap_len(tx_buf, len),
2657 PCI_DMA_TODEVICE);
2658 xdp_return_frame(tx_buf->xdpf);
2659 tx_buf->action = 0;
2660 tx_buf->xdpf = NULL;
2661 j++;
2662 continue;
2663 }
2664
2665 skb = tx_buf->skb;
2666 if (!skb) {
2667 j++;
2668 continue;
2669 }
2670
2671 tx_buf->skb = NULL;
2672
2673 if (tx_buf->is_push) {
2674 dev_kfree_skb(skb);
2675 j += 2;
2676 continue;
2677 }
2678
2679 dma_unmap_single(&pdev->dev,
2680 dma_unmap_addr(tx_buf, mapping),
2681 skb_headlen(skb),
2682 PCI_DMA_TODEVICE);
2683
2684 last = tx_buf->nr_frags;
2685 j += 2;
2686 for (k = 0; k < last; k++, j++) {
2687 int ring_idx = j & bp->tx_ring_mask;
2688 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2689
2690 tx_buf = &txr->tx_buf_ring[ring_idx];
2691 dma_unmap_page(
2692 &pdev->dev,
2693 dma_unmap_addr(tx_buf, mapping),
2694 skb_frag_size(frag), PCI_DMA_TODEVICE);
2695 }
2696 dev_kfree_skb(skb);
2697 }
2698 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2699 }
2700 }
2701
2702 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2703 {
2704 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
2705 struct pci_dev *pdev = bp->pdev;
2706 struct bnxt_tpa_idx_map *map;
2707 int i, max_idx, max_agg_idx;
2708
2709 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2710 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2711 if (!rxr->rx_tpa)
2712 goto skip_rx_tpa_free;
2713
2714 for (i = 0; i < bp->max_tpa; i++) {
2715 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2716 u8 *data = tpa_info->data;
2717
2718 if (!data)
2719 continue;
2720
2721 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2722 bp->rx_buf_use_size, bp->rx_dir,
2723 DMA_ATTR_WEAK_ORDERING);
2724
2725 tpa_info->data = NULL;
2726
2727 kfree(data);
2728 }
2729
2730 skip_rx_tpa_free:
2731 for (i = 0; i < max_idx; i++) {
2732 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2733 dma_addr_t mapping = rx_buf->mapping;
2734 void *data = rx_buf->data;
2735
2736 if (!data)
2737 continue;
2738
2739 rx_buf->data = NULL;
2740 if (BNXT_RX_PAGE_MODE(bp)) {
2741 mapping -= bp->rx_dma_offset;
2742 dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
2743 bp->rx_dir,
2744 DMA_ATTR_WEAK_ORDERING);
2745 page_pool_recycle_direct(rxr->page_pool, data);
2746 } else {
2747 dma_unmap_single_attrs(&pdev->dev, mapping,
2748 bp->rx_buf_use_size, bp->rx_dir,
2749 DMA_ATTR_WEAK_ORDERING);
2750 kfree(data);
2751 }
2752 }
2753 for (i = 0; i < max_agg_idx; i++) {
2754 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2755 struct page *page = rx_agg_buf->page;
2756
2757 if (!page)
2758 continue;
2759
2760 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2761 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
2762 DMA_ATTR_WEAK_ORDERING);
2763
2764 rx_agg_buf->page = NULL;
2765 __clear_bit(i, rxr->rx_agg_bmap);
2766
2767 __free_page(page);
2768 }
2769 if (rxr->rx_page) {
2770 __free_page(rxr->rx_page);
2771 rxr->rx_page = NULL;
2772 }
2773 map = rxr->rx_tpa_idx_map;
2774 if (map)
2775 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2776 }
2777
2778 static void bnxt_free_rx_skbs(struct bnxt *bp)
2779 {
2780 int i;
2781
2782 if (!bp->rx_ring)
2783 return;
2784
2785 for (i = 0; i < bp->rx_nr_rings; i++)
2786 bnxt_free_one_rx_ring_skbs(bp, i);
2787 }
2788
2789 static void bnxt_free_skbs(struct bnxt *bp)
2790 {
2791 bnxt_free_tx_skbs(bp);
2792 bnxt_free_rx_skbs(bp);
2793 }
2794
2795 static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
2796 {
2797 u8 init_val = mem_init->init_val;
2798 u16 offset = mem_init->offset;
2799 u8 *p2 = p;
2800 int i;
2801
2802 if (!init_val)
2803 return;
2804 if (offset == BNXT_MEM_INVALID_OFFSET) {
2805 memset(p, init_val, len);
2806 return;
2807 }
2808 for (i = 0; i < len; i += mem_init->size)
2809 *(p2 + i + offset) = init_val;
2810 }
2811
2812 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2813 {
2814 struct pci_dev *pdev = bp->pdev;
2815 int i;
2816
2817 for (i = 0; i < rmem->nr_pages; i++) {
2818 if (!rmem->pg_arr[i])
2819 continue;
2820
2821 dma_free_coherent(&pdev->dev, rmem->page_size,
2822 rmem->pg_arr[i], rmem->dma_arr[i]);
2823
2824 rmem->pg_arr[i] = NULL;
2825 }
2826 if (rmem->pg_tbl) {
2827 size_t pg_tbl_size = rmem->nr_pages * 8;
2828
2829 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2830 pg_tbl_size = rmem->page_size;
2831 dma_free_coherent(&pdev->dev, pg_tbl_size,
2832 rmem->pg_tbl, rmem->pg_tbl_map);
2833 rmem->pg_tbl = NULL;
2834 }
2835 if (rmem->vmem_size && *rmem->vmem) {
2836 vfree(*rmem->vmem);
2837 *rmem->vmem = NULL;
2838 }
2839 }
2840
2841 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2842 {
2843 struct pci_dev *pdev = bp->pdev;
2844 u64 valid_bit = 0;
2845 int i;
2846
2847 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2848 valid_bit = PTU_PTE_VALID;
2849 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2850 size_t pg_tbl_size = rmem->nr_pages * 8;
2851
2852 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2853 pg_tbl_size = rmem->page_size;
2854 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2855 &rmem->pg_tbl_map,
2856 GFP_KERNEL);
2857 if (!rmem->pg_tbl)
2858 return -ENOMEM;
2859 }
2860
2861 for (i = 0; i < rmem->nr_pages; i++) {
2862 u64 extra_bits = valid_bit;
2863
2864 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2865 rmem->page_size,
2866 &rmem->dma_arr[i],
2867 GFP_KERNEL);
2868 if (!rmem->pg_arr[i])
2869 return -ENOMEM;
2870
2871 if (rmem->mem_init)
2872 bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
2873 rmem->page_size);
2874 if (rmem->nr_pages > 1 || rmem->depth > 0) {
2875 if (i == rmem->nr_pages - 2 &&
2876 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2877 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2878 else if (i == rmem->nr_pages - 1 &&
2879 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2880 extra_bits |= PTU_PTE_LAST;
2881 rmem->pg_tbl[i] =
2882 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2883 }
2884 }
2885
2886 if (rmem->vmem_size) {
2887 *rmem->vmem = vzalloc(rmem->vmem_size);
2888 if (!(*rmem->vmem))
2889 return -ENOMEM;
2890 }
2891 return 0;
2892 }
2893
2894 static void bnxt_free_tpa_info(struct bnxt *bp)
2895 {
2896 int i;
2897
2898 for (i = 0; i < bp->rx_nr_rings; i++) {
2899 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2900
2901 kfree(rxr->rx_tpa_idx_map);
2902 rxr->rx_tpa_idx_map = NULL;
2903 if (rxr->rx_tpa) {
2904 kfree(rxr->rx_tpa[0].agg_arr);
2905 rxr->rx_tpa[0].agg_arr = NULL;
2906 }
2907 kfree(rxr->rx_tpa);
2908 rxr->rx_tpa = NULL;
2909 }
2910 }
2911
2912 static int bnxt_alloc_tpa_info(struct bnxt *bp)
2913 {
2914 int i, j, total_aggs = 0;
2915
2916 bp->max_tpa = MAX_TPA;
2917 if (bp->flags & BNXT_FLAG_CHIP_P5) {
2918 if (!bp->max_tpa_v2)
2919 return 0;
2920 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2921 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
2922 }
2923
2924 for (i = 0; i < bp->rx_nr_rings; i++) {
2925 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2926 struct rx_agg_cmp *agg;
2927
2928 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
2929 GFP_KERNEL);
2930 if (!rxr->rx_tpa)
2931 return -ENOMEM;
2932
2933 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2934 continue;
2935 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
2936 rxr->rx_tpa[0].agg_arr = agg;
2937 if (!agg)
2938 return -ENOMEM;
2939 for (j = 1; j < bp->max_tpa; j++)
2940 rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
2941 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
2942 GFP_KERNEL);
2943 if (!rxr->rx_tpa_idx_map)
2944 return -ENOMEM;
2945 }
2946 return 0;
2947 }
2948
2949 static void bnxt_free_rx_rings(struct bnxt *bp)
2950 {
2951 int i;
2952
2953 if (!bp->rx_ring)
2954 return;
2955
2956 bnxt_free_tpa_info(bp);
2957 for (i = 0; i < bp->rx_nr_rings; i++) {
2958 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2959 struct bnxt_ring_struct *ring;
2960
2961 if (rxr->xdp_prog)
2962 bpf_prog_put(rxr->xdp_prog);
2963
2964 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2965 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2966
2967 page_pool_destroy(rxr->page_pool);
2968 rxr->page_pool = NULL;
2969
2970 kfree(rxr->rx_agg_bmap);
2971 rxr->rx_agg_bmap = NULL;
2972
2973 ring = &rxr->rx_ring_struct;
2974 bnxt_free_ring(bp, &ring->ring_mem);
2975
2976 ring = &rxr->rx_agg_ring_struct;
2977 bnxt_free_ring(bp, &ring->ring_mem);
2978 }
2979 }
2980
2981 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
2982 struct bnxt_rx_ring_info *rxr)
2983 {
2984 struct page_pool_params pp = { 0 };
2985
2986 pp.pool_size = bp->rx_ring_size;
2987 pp.nid = dev_to_node(&bp->pdev->dev);
2988 pp.dev = &bp->pdev->dev;
2989 pp.dma_dir = DMA_BIDIRECTIONAL;
2990
2991 rxr->page_pool = page_pool_create(&pp);
2992 if (IS_ERR(rxr->page_pool)) {
2993 int err = PTR_ERR(rxr->page_pool);
2994
2995 rxr->page_pool = NULL;
2996 return err;
2997 }
2998 return 0;
2999 }
3000
3001 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3002 {
3003 int i, rc = 0, agg_rings = 0;
3004
3005 if (!bp->rx_ring)
3006 return -ENOMEM;
3007
3008 if (bp->flags & BNXT_FLAG_AGG_RINGS)
3009 agg_rings = 1;
3010
3011 for (i = 0; i < bp->rx_nr_rings; i++) {
3012 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3013 struct bnxt_ring_struct *ring;
3014
3015 ring = &rxr->rx_ring_struct;
3016
3017 rc = bnxt_alloc_rx_page_pool(bp, rxr);
3018 if (rc)
3019 return rc;
3020
3021 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3022 if (rc < 0)
3023 return rc;
3024
3025 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3026 MEM_TYPE_PAGE_POOL,
3027 rxr->page_pool);
3028 if (rc) {
3029 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3030 return rc;
3031 }
3032
3033 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3034 if (rc)
3035 return rc;
3036
3037 ring->grp_idx = i;
3038 if (agg_rings) {
3039 u16 mem_size;
3040
3041 ring = &rxr->rx_agg_ring_struct;
3042 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3043 if (rc)
3044 return rc;
3045
3046 ring->grp_idx = i;
3047 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3048 mem_size = rxr->rx_agg_bmap_size / 8;
3049 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3050 if (!rxr->rx_agg_bmap)
3051 return -ENOMEM;
3052 }
3053 }
3054 if (bp->flags & BNXT_FLAG_TPA)
3055 rc = bnxt_alloc_tpa_info(bp);
3056 return rc;
3057 }
3058
3059 static void bnxt_free_tx_rings(struct bnxt *bp)
3060 {
3061 int i;
3062 struct pci_dev *pdev = bp->pdev;
3063
3064 if (!bp->tx_ring)
3065 return;
3066
3067 for (i = 0; i < bp->tx_nr_rings; i++) {
3068 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3069 struct bnxt_ring_struct *ring;
3070
3071 if (txr->tx_push) {
3072 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3073 txr->tx_push, txr->tx_push_mapping);
3074 txr->tx_push = NULL;
3075 }
3076
3077 ring = &txr->tx_ring_struct;
3078
3079 bnxt_free_ring(bp, &ring->ring_mem);
3080 }
3081 }
3082
3083 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3084 {
3085 int i, j, rc;
3086 struct pci_dev *pdev = bp->pdev;
3087
3088 bp->tx_push_size = 0;
3089 if (bp->tx_push_thresh) {
3090 int push_size;
3091
3092 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3093 bp->tx_push_thresh);
3094
3095 if (push_size > 256) {
3096 push_size = 0;
3097 bp->tx_push_thresh = 0;
3098 }
3099
3100 bp->tx_push_size = push_size;
3101 }
3102
3103 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3104 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3105 struct bnxt_ring_struct *ring;
3106 u8 qidx;
3107
3108 ring = &txr->tx_ring_struct;
3109
3110 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3111 if (rc)
3112 return rc;
3113
3114 ring->grp_idx = txr->bnapi->index;
3115 if (bp->tx_push_size) {
3116 dma_addr_t mapping;
3117
3118 /* One pre-allocated DMA buffer to backup
3119 * TX push operation
3120 */
3121 txr->tx_push = dma_alloc_coherent(&pdev->dev,
3122 bp->tx_push_size,
3123 &txr->tx_push_mapping,
3124 GFP_KERNEL);
3125
3126 if (!txr->tx_push)
3127 return -ENOMEM;
3128
3129 mapping = txr->tx_push_mapping +
3130 sizeof(struct tx_push_bd);
3131 txr->data_mapping = cpu_to_le64(mapping);
3132 }
3133 qidx = bp->tc_to_qidx[j];
3134 ring->queue_id = bp->q_info[qidx].queue_id;
3135 if (i < bp->tx_nr_rings_xdp)
3136 continue;
3137 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3138 j++;
3139 }
3140 return 0;
3141 }
3142
3143 static void bnxt_free_cp_rings(struct bnxt *bp)
3144 {
3145 int i;
3146
3147 if (!bp->bnapi)
3148 return;
3149
3150 for (i = 0; i < bp->cp_nr_rings; i++) {
3151 struct bnxt_napi *bnapi = bp->bnapi[i];
3152 struct bnxt_cp_ring_info *cpr;
3153 struct bnxt_ring_struct *ring;
3154 int j;
3155
3156 if (!bnapi)
3157 continue;
3158
3159 cpr = &bnapi->cp_ring;
3160 ring = &cpr->cp_ring_struct;
3161
3162 bnxt_free_ring(bp, &ring->ring_mem);
3163
3164 for (j = 0; j < 2; j++) {
3165 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3166
3167 if (cpr2) {
3168 ring = &cpr2->cp_ring_struct;
3169 bnxt_free_ring(bp, &ring->ring_mem);
3170 kfree(cpr2);
3171 cpr->cp_ring_arr[j] = NULL;
3172 }
3173 }
3174 }
3175 }
3176
3177 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3178 {
3179 struct bnxt_ring_mem_info *rmem;
3180 struct bnxt_ring_struct *ring;
3181 struct bnxt_cp_ring_info *cpr;
3182 int rc;
3183
3184 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3185 if (!cpr)
3186 return NULL;
3187
3188 ring = &cpr->cp_ring_struct;
3189 rmem = &ring->ring_mem;
3190 rmem->nr_pages = bp->cp_nr_pages;
3191 rmem->page_size = HW_CMPD_RING_SIZE;
3192 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3193 rmem->dma_arr = cpr->cp_desc_mapping;
3194 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3195 rc = bnxt_alloc_ring(bp, rmem);
3196 if (rc) {
3197 bnxt_free_ring(bp, rmem);
3198 kfree(cpr);
3199 cpr = NULL;
3200 }
3201 return cpr;
3202 }
3203
3204 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3205 {
3206 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3207 int i, rc, ulp_base_vec, ulp_msix;
3208
3209 ulp_msix = bnxt_get_ulp_msix_num(bp);
3210 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3211 for (i = 0; i < bp->cp_nr_rings; i++) {
3212 struct bnxt_napi *bnapi = bp->bnapi[i];
3213 struct bnxt_cp_ring_info *cpr;
3214 struct bnxt_ring_struct *ring;
3215
3216 if (!bnapi)
3217 continue;
3218
3219 cpr = &bnapi->cp_ring;
3220 cpr->bnapi = bnapi;
3221 ring = &cpr->cp_ring_struct;
3222
3223 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3224 if (rc)
3225 return rc;
3226
3227 if (ulp_msix && i >= ulp_base_vec)
3228 ring->map_idx = i + ulp_msix;
3229 else
3230 ring->map_idx = i;
3231
3232 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3233 continue;
3234
3235 if (i < bp->rx_nr_rings) {
3236 struct bnxt_cp_ring_info *cpr2 =
3237 bnxt_alloc_cp_sub_ring(bp);
3238
3239 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3240 if (!cpr2)
3241 return -ENOMEM;
3242 cpr2->bnapi = bnapi;
3243 }
3244 if ((sh && i < bp->tx_nr_rings) ||
3245 (!sh && i >= bp->rx_nr_rings)) {
3246 struct bnxt_cp_ring_info *cpr2 =
3247 bnxt_alloc_cp_sub_ring(bp);
3248
3249 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3250 if (!cpr2)
3251 return -ENOMEM;
3252 cpr2->bnapi = bnapi;
3253 }
3254 }
3255 return 0;
3256 }
3257
3258 static void bnxt_init_ring_struct(struct bnxt *bp)
3259 {
3260 int i;
3261
3262 for (i = 0; i < bp->cp_nr_rings; i++) {
3263 struct bnxt_napi *bnapi = bp->bnapi[i];
3264 struct bnxt_ring_mem_info *rmem;
3265 struct bnxt_cp_ring_info *cpr;
3266 struct bnxt_rx_ring_info *rxr;
3267 struct bnxt_tx_ring_info *txr;
3268 struct bnxt_ring_struct *ring;
3269
3270 if (!bnapi)
3271 continue;
3272
3273 cpr = &bnapi->cp_ring;
3274 ring = &cpr->cp_ring_struct;
3275 rmem = &ring->ring_mem;
3276 rmem->nr_pages = bp->cp_nr_pages;
3277 rmem->page_size = HW_CMPD_RING_SIZE;
3278 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3279 rmem->dma_arr = cpr->cp_desc_mapping;
3280 rmem->vmem_size = 0;
3281
3282 rxr = bnapi->rx_ring;
3283 if (!rxr)
3284 goto skip_rx;
3285
3286 ring = &rxr->rx_ring_struct;
3287 rmem = &ring->ring_mem;
3288 rmem->nr_pages = bp->rx_nr_pages;
3289 rmem->page_size = HW_RXBD_RING_SIZE;
3290 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3291 rmem->dma_arr = rxr->rx_desc_mapping;
3292 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3293 rmem->vmem = (void **)&rxr->rx_buf_ring;
3294
3295 ring = &rxr->rx_agg_ring_struct;
3296 rmem = &ring->ring_mem;
3297 rmem->nr_pages = bp->rx_agg_nr_pages;
3298 rmem->page_size = HW_RXBD_RING_SIZE;
3299 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3300 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3301 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3302 rmem->vmem = (void **)&rxr->rx_agg_ring;
3303
3304 skip_rx:
3305 txr = bnapi->tx_ring;
3306 if (!txr)
3307 continue;
3308
3309 ring = &txr->tx_ring_struct;
3310 rmem = &ring->ring_mem;
3311 rmem->nr_pages = bp->tx_nr_pages;
3312 rmem->page_size = HW_RXBD_RING_SIZE;
3313 rmem->pg_arr = (void **)txr->tx_desc_ring;
3314 rmem->dma_arr = txr->tx_desc_mapping;
3315 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3316 rmem->vmem = (void **)&txr->tx_buf_ring;
3317 }
3318 }
3319
3320 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3321 {
3322 int i;
3323 u32 prod;
3324 struct rx_bd **rx_buf_ring;
3325
3326 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3327 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3328 int j;
3329 struct rx_bd *rxbd;
3330
3331 rxbd = rx_buf_ring[i];
3332 if (!rxbd)
3333 continue;
3334
3335 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3336 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3337 rxbd->rx_bd_opaque = prod;
3338 }
3339 }
3340 }
3341
3342 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
3343 {
3344 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3345 struct net_device *dev = bp->dev;
3346 u32 prod;
3347 int i;
3348
3349 prod = rxr->rx_prod;
3350 for (i = 0; i < bp->rx_ring_size; i++) {
3351 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
3352 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3353 ring_nr, i, bp->rx_ring_size);
3354 break;
3355 }
3356 prod = NEXT_RX(prod);
3357 }
3358 rxr->rx_prod = prod;
3359
3360 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3361 return 0;
3362
3363 prod = rxr->rx_agg_prod;
3364 for (i = 0; i < bp->rx_agg_ring_size; i++) {
3365 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
3366 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3367 ring_nr, i, bp->rx_ring_size);
3368 break;
3369 }
3370 prod = NEXT_RX_AGG(prod);
3371 }
3372 rxr->rx_agg_prod = prod;
3373
3374 if (rxr->rx_tpa) {
3375 dma_addr_t mapping;
3376 u8 *data;
3377
3378 for (i = 0; i < bp->max_tpa; i++) {
3379 data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
3380 if (!data)
3381 return -ENOMEM;
3382
3383 rxr->rx_tpa[i].data = data;
3384 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3385 rxr->rx_tpa[i].mapping = mapping;
3386 }
3387 }
3388 return 0;
3389 }
3390
3391 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3392 {
3393 struct bnxt_rx_ring_info *rxr;
3394 struct bnxt_ring_struct *ring;
3395 u32 type;
3396
3397 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3398 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3399
3400 if (NET_IP_ALIGN == 2)
3401 type |= RX_BD_FLAGS_SOP;
3402
3403 rxr = &bp->rx_ring[ring_nr];
3404 ring = &rxr->rx_ring_struct;
3405 bnxt_init_rxbd_pages(ring, type);
3406
3407 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3408 bpf_prog_add(bp->xdp_prog, 1);
3409 rxr->xdp_prog = bp->xdp_prog;
3410 }
3411 ring->fw_ring_id = INVALID_HW_RING_ID;
3412
3413 ring = &rxr->rx_agg_ring_struct;
3414 ring->fw_ring_id = INVALID_HW_RING_ID;
3415
3416 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3417 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3418 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3419
3420 bnxt_init_rxbd_pages(ring, type);
3421 }
3422
3423 return bnxt_alloc_one_rx_ring(bp, ring_nr);
3424 }
3425
3426 static void bnxt_init_cp_rings(struct bnxt *bp)
3427 {
3428 int i, j;
3429
3430 for (i = 0; i < bp->cp_nr_rings; i++) {
3431 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3432 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3433
3434 ring->fw_ring_id = INVALID_HW_RING_ID;
3435 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3436 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3437 for (j = 0; j < 2; j++) {
3438 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3439
3440 if (!cpr2)
3441 continue;
3442
3443 ring = &cpr2->cp_ring_struct;
3444 ring->fw_ring_id = INVALID_HW_RING_ID;
3445 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3446 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3447 }
3448 }
3449 }
3450
3451 static int bnxt_init_rx_rings(struct bnxt *bp)
3452 {
3453 int i, rc = 0;
3454
3455 if (BNXT_RX_PAGE_MODE(bp)) {
3456 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3457 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3458 } else {
3459 bp->rx_offset = BNXT_RX_OFFSET;
3460 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3461 }
3462
3463 for (i = 0; i < bp->rx_nr_rings; i++) {
3464 rc = bnxt_init_one_rx_ring(bp, i);
3465 if (rc)
3466 break;
3467 }
3468
3469 return rc;
3470 }
3471
3472 static int bnxt_init_tx_rings(struct bnxt *bp)
3473 {
3474 u16 i;
3475
3476 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3477 MAX_SKB_FRAGS + 1);
3478
3479 for (i = 0; i < bp->tx_nr_rings; i++) {
3480 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3481 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3482
3483 ring->fw_ring_id = INVALID_HW_RING_ID;
3484 }
3485
3486 return 0;
3487 }
3488
3489 static void bnxt_free_ring_grps(struct bnxt *bp)
3490 {
3491 kfree(bp->grp_info);
3492 bp->grp_info = NULL;
3493 }
3494
3495 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3496 {
3497 int i;
3498
3499 if (irq_re_init) {
3500 bp->grp_info = kcalloc(bp->cp_nr_rings,
3501 sizeof(struct bnxt_ring_grp_info),
3502 GFP_KERNEL);
3503 if (!bp->grp_info)
3504 return -ENOMEM;
3505 }
3506 for (i = 0; i < bp->cp_nr_rings; i++) {
3507 if (irq_re_init)
3508 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3509 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3510 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3511 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3512 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3513 }
3514 return 0;
3515 }
3516
3517 static void bnxt_free_vnics(struct bnxt *bp)
3518 {
3519 kfree(bp->vnic_info);
3520 bp->vnic_info = NULL;
3521 bp->nr_vnics = 0;
3522 }
3523
3524 static int bnxt_alloc_vnics(struct bnxt *bp)
3525 {
3526 int num_vnics = 1;
3527
3528 #ifdef CONFIG_RFS_ACCEL
3529 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3530 num_vnics += bp->rx_nr_rings;
3531 #endif
3532
3533 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3534 num_vnics++;
3535
3536 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3537 GFP_KERNEL);
3538 if (!bp->vnic_info)
3539 return -ENOMEM;
3540
3541 bp->nr_vnics = num_vnics;
3542 return 0;
3543 }
3544
3545 static void bnxt_init_vnics(struct bnxt *bp)
3546 {
3547 int i;
3548
3549 for (i = 0; i < bp->nr_vnics; i++) {
3550 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3551 int j;
3552
3553 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3554 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3555 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3556
3557 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3558
3559 if (bp->vnic_info[i].rss_hash_key) {
3560 if (i == 0)
3561 prandom_bytes(vnic->rss_hash_key,
3562 HW_HASH_KEY_SIZE);
3563 else
3564 memcpy(vnic->rss_hash_key,
3565 bp->vnic_info[0].rss_hash_key,
3566 HW_HASH_KEY_SIZE);
3567 }
3568 }
3569 }
3570
3571 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3572 {
3573 int pages;
3574
3575 pages = ring_size / desc_per_pg;
3576
3577 if (!pages)
3578 return 1;
3579
3580 pages++;
3581
3582 while (pages & (pages - 1))
3583 pages++;
3584
3585 return pages;
3586 }
3587
3588 void bnxt_set_tpa_flags(struct bnxt *bp)
3589 {
3590 bp->flags &= ~BNXT_FLAG_TPA;
3591 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3592 return;
3593 if (bp->dev->features & NETIF_F_LRO)
3594 bp->flags |= BNXT_FLAG_LRO;
3595 else if (bp->dev->features & NETIF_F_GRO_HW)
3596 bp->flags |= BNXT_FLAG_GRO;
3597 }
3598
3599 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3600 * be set on entry.
3601 */
3602 void bnxt_set_ring_params(struct bnxt *bp)
3603 {
3604 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
3605 u32 agg_factor = 0, agg_ring_size = 0;
3606
3607 /* 8 for CRC and VLAN */
3608 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3609
3610 rx_space = rx_size + NET_SKB_PAD +
3611 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3612
3613 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3614 ring_size = bp->rx_ring_size;
3615 bp->rx_agg_ring_size = 0;
3616 bp->rx_agg_nr_pages = 0;
3617
3618 if (bp->flags & BNXT_FLAG_TPA)
3619 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3620
3621 bp->flags &= ~BNXT_FLAG_JUMBO;
3622 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3623 u32 jumbo_factor;
3624
3625 bp->flags |= BNXT_FLAG_JUMBO;
3626 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3627 if (jumbo_factor > agg_factor)
3628 agg_factor = jumbo_factor;
3629 }
3630 agg_ring_size = ring_size * agg_factor;
3631
3632 if (agg_ring_size) {
3633 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3634 RX_DESC_CNT);
3635 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3636 u32 tmp = agg_ring_size;
3637
3638 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3639 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3640 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3641 tmp, agg_ring_size);
3642 }
3643 bp->rx_agg_ring_size = agg_ring_size;
3644 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3645 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3646 rx_space = rx_size + NET_SKB_PAD +
3647 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3648 }
3649
3650 bp->rx_buf_use_size = rx_size;
3651 bp->rx_buf_size = rx_space;
3652
3653 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3654 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3655
3656 ring_size = bp->tx_ring_size;
3657 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3658 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3659
3660 max_rx_cmpl = bp->rx_ring_size;
3661 /* MAX TPA needs to be added because TPA_START completions are
3662 * immediately recycled, so the TPA completions are not bound by
3663 * the RX ring size.
3664 */
3665 if (bp->flags & BNXT_FLAG_TPA)
3666 max_rx_cmpl += bp->max_tpa;
3667 /* RX and TPA completions are 32-byte, all others are 16-byte */
3668 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
3669 bp->cp_ring_size = ring_size;
3670
3671 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3672 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3673 bp->cp_nr_pages = MAX_CP_PAGES;
3674 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3675 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3676 ring_size, bp->cp_ring_size);
3677 }
3678 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3679 bp->cp_ring_mask = bp->cp_bit - 1;
3680 }
3681
3682 /* Changing allocation mode of RX rings.
3683 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3684 */
3685 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3686 {
3687 if (page_mode) {
3688 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3689 return -EOPNOTSUPP;
3690 bp->dev->max_mtu =
3691 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3692 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3693 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3694 bp->rx_dir = DMA_BIDIRECTIONAL;
3695 bp->rx_skb_func = bnxt_rx_page_skb;
3696 /* Disable LRO or GRO_HW */
3697 netdev_update_features(bp->dev);
3698 } else {
3699 bp->dev->max_mtu = bp->max_mtu;
3700 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3701 bp->rx_dir = DMA_FROM_DEVICE;
3702 bp->rx_skb_func = bnxt_rx_skb;
3703 }
3704 return 0;
3705 }
3706
3707 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3708 {
3709 int i;
3710 struct bnxt_vnic_info *vnic;
3711 struct pci_dev *pdev = bp->pdev;
3712
3713 if (!bp->vnic_info)
3714 return;
3715
3716 for (i = 0; i < bp->nr_vnics; i++) {
3717 vnic = &bp->vnic_info[i];
3718
3719 kfree(vnic->fw_grp_ids);
3720 vnic->fw_grp_ids = NULL;
3721
3722 kfree(vnic->uc_list);
3723 vnic->uc_list = NULL;
3724
3725 if (vnic->mc_list) {
3726 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3727 vnic->mc_list, vnic->mc_list_mapping);
3728 vnic->mc_list = NULL;
3729 }
3730
3731 if (vnic->rss_table) {
3732 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
3733 vnic->rss_table,
3734 vnic->rss_table_dma_addr);
3735 vnic->rss_table = NULL;
3736 }
3737
3738 vnic->rss_hash_key = NULL;
3739 vnic->flags = 0;
3740 }
3741 }
3742
3743 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3744 {
3745 int i, rc = 0, size;
3746 struct bnxt_vnic_info *vnic;
3747 struct pci_dev *pdev = bp->pdev;
3748 int max_rings;
3749
3750 for (i = 0; i < bp->nr_vnics; i++) {
3751 vnic = &bp->vnic_info[i];
3752
3753 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3754 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3755
3756 if (mem_size > 0) {
3757 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3758 if (!vnic->uc_list) {
3759 rc = -ENOMEM;
3760 goto out;
3761 }
3762 }
3763 }
3764
3765 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3766 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3767 vnic->mc_list =
3768 dma_alloc_coherent(&pdev->dev,
3769 vnic->mc_list_size,
3770 &vnic->mc_list_mapping,
3771 GFP_KERNEL);
3772 if (!vnic->mc_list) {
3773 rc = -ENOMEM;
3774 goto out;
3775 }
3776 }
3777
3778 if (bp->flags & BNXT_FLAG_CHIP_P5)
3779 goto vnic_skip_grps;
3780
3781 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3782 max_rings = bp->rx_nr_rings;
3783 else
3784 max_rings = 1;
3785
3786 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3787 if (!vnic->fw_grp_ids) {
3788 rc = -ENOMEM;
3789 goto out;
3790 }
3791 vnic_skip_grps:
3792 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3793 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3794 continue;
3795
3796 /* Allocate rss table and hash key */
3797 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3798 if (bp->flags & BNXT_FLAG_CHIP_P5)
3799 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
3800
3801 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
3802 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
3803 vnic->rss_table_size,
3804 &vnic->rss_table_dma_addr,
3805 GFP_KERNEL);
3806 if (!vnic->rss_table) {
3807 rc = -ENOMEM;
3808 goto out;
3809 }
3810
3811 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3812 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3813 }
3814 return 0;
3815
3816 out:
3817 return rc;
3818 }
3819
3820 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3821 {
3822 struct pci_dev *pdev = bp->pdev;
3823
3824 if (bp->hwrm_cmd_resp_addr) {
3825 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3826 bp->hwrm_cmd_resp_dma_addr);
3827 bp->hwrm_cmd_resp_addr = NULL;
3828 }
3829
3830 if (bp->hwrm_cmd_kong_resp_addr) {
3831 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3832 bp->hwrm_cmd_kong_resp_addr,
3833 bp->hwrm_cmd_kong_resp_dma_addr);
3834 bp->hwrm_cmd_kong_resp_addr = NULL;
3835 }
3836 }
3837
3838 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3839 {
3840 struct pci_dev *pdev = bp->pdev;
3841
3842 if (bp->hwrm_cmd_kong_resp_addr)
3843 return 0;
3844
3845 bp->hwrm_cmd_kong_resp_addr =
3846 dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3847 &bp->hwrm_cmd_kong_resp_dma_addr,
3848 GFP_KERNEL);
3849 if (!bp->hwrm_cmd_kong_resp_addr)
3850 return -ENOMEM;
3851
3852 return 0;
3853 }
3854
3855 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3856 {
3857 struct pci_dev *pdev = bp->pdev;
3858
3859 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3860 &bp->hwrm_cmd_resp_dma_addr,
3861 GFP_KERNEL);
3862 if (!bp->hwrm_cmd_resp_addr)
3863 return -ENOMEM;
3864
3865 return 0;
3866 }
3867
3868 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3869 {
3870 if (bp->hwrm_short_cmd_req_addr) {
3871 struct pci_dev *pdev = bp->pdev;
3872
3873 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3874 bp->hwrm_short_cmd_req_addr,
3875 bp->hwrm_short_cmd_req_dma_addr);
3876 bp->hwrm_short_cmd_req_addr = NULL;
3877 }
3878 }
3879
3880 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3881 {
3882 struct pci_dev *pdev = bp->pdev;
3883
3884 if (bp->hwrm_short_cmd_req_addr)
3885 return 0;
3886
3887 bp->hwrm_short_cmd_req_addr =
3888 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3889 &bp->hwrm_short_cmd_req_dma_addr,
3890 GFP_KERNEL);
3891 if (!bp->hwrm_short_cmd_req_addr)
3892 return -ENOMEM;
3893
3894 return 0;
3895 }
3896
3897 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
3898 {
3899 kfree(stats->hw_masks);
3900 stats->hw_masks = NULL;
3901 kfree(stats->sw_stats);
3902 stats->sw_stats = NULL;
3903 if (stats->hw_stats) {
3904 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
3905 stats->hw_stats_map);
3906 stats->hw_stats = NULL;
3907 }
3908 }
3909
3910 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
3911 bool alloc_masks)
3912 {
3913 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
3914 &stats->hw_stats_map, GFP_KERNEL);
3915 if (!stats->hw_stats)
3916 return -ENOMEM;
3917
3918 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
3919 if (!stats->sw_stats)
3920 goto stats_mem_err;
3921
3922 if (alloc_masks) {
3923 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
3924 if (!stats->hw_masks)
3925 goto stats_mem_err;
3926 }
3927 return 0;
3928
3929 stats_mem_err:
3930 bnxt_free_stats_mem(bp, stats);
3931 return -ENOMEM;
3932 }
3933
3934 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
3935 {
3936 int i;
3937
3938 for (i = 0; i < count; i++)
3939 mask_arr[i] = mask;
3940 }
3941
3942 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
3943 {
3944 int i;
3945
3946 for (i = 0; i < count; i++)
3947 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
3948 }
3949
3950 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
3951 struct bnxt_stats_mem *stats)
3952 {
3953 struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
3954 struct hwrm_func_qstats_ext_input req = {0};
3955 __le64 *hw_masks;
3956 int rc;
3957
3958 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
3959 !(bp->flags & BNXT_FLAG_CHIP_P5))
3960 return -EOPNOTSUPP;
3961
3962 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1);
3963 req.fid = cpu_to_le16(0xffff);
3964 req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
3965 mutex_lock(&bp->hwrm_cmd_lock);
3966 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3967 if (rc)
3968 goto qstat_exit;
3969
3970 hw_masks = &resp->rx_ucast_pkts;
3971 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
3972
3973 qstat_exit:
3974 mutex_unlock(&bp->hwrm_cmd_lock);
3975 return rc;
3976 }
3977
3978 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
3979 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
3980
3981 static void bnxt_init_stats(struct bnxt *bp)
3982 {
3983 struct bnxt_napi *bnapi = bp->bnapi[0];
3984 struct bnxt_cp_ring_info *cpr;
3985 struct bnxt_stats_mem *stats;
3986 __le64 *rx_stats, *tx_stats;
3987 int rc, rx_count, tx_count;
3988 u64 *rx_masks, *tx_masks;
3989 u64 mask;
3990 u8 flags;
3991
3992 cpr = &bnapi->cp_ring;
3993 stats = &cpr->stats;
3994 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
3995 if (rc) {
3996 if (bp->flags & BNXT_FLAG_CHIP_P5)
3997 mask = (1ULL << 48) - 1;
3998 else
3999 mask = -1ULL;
4000 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
4001 }
4002 if (bp->flags & BNXT_FLAG_PORT_STATS) {
4003 stats = &bp->port_stats;
4004 rx_stats = stats->hw_stats;
4005 rx_masks = stats->hw_masks;
4006 rx_count = sizeof(struct rx_port_stats) / 8;
4007 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4008 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4009 tx_count = sizeof(struct tx_port_stats) / 8;
4010
4011 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
4012 rc = bnxt_hwrm_port_qstats(bp, flags);
4013 if (rc) {
4014 mask = (1ULL << 40) - 1;
4015
4016 bnxt_fill_masks(rx_masks, mask, rx_count);
4017 bnxt_fill_masks(tx_masks, mask, tx_count);
4018 } else {
4019 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4020 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
4021 bnxt_hwrm_port_qstats(bp, 0);
4022 }
4023 }
4024 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4025 stats = &bp->rx_port_stats_ext;
4026 rx_stats = stats->hw_stats;
4027 rx_masks = stats->hw_masks;
4028 rx_count = sizeof(struct rx_port_stats_ext) / 8;
4029 stats = &bp->tx_port_stats_ext;
4030 tx_stats = stats->hw_stats;
4031 tx_masks = stats->hw_masks;
4032 tx_count = sizeof(struct tx_port_stats_ext) / 8;
4033
4034 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4035 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4036 if (rc) {
4037 mask = (1ULL << 40) - 1;
4038
4039 bnxt_fill_masks(rx_masks, mask, rx_count);
4040 if (tx_stats)
4041 bnxt_fill_masks(tx_masks, mask, tx_count);
4042 } else {
4043 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4044 if (tx_stats)
4045 bnxt_copy_hw_masks(tx_masks, tx_stats,
4046 tx_count);
4047 bnxt_hwrm_port_qstats_ext(bp, 0);
4048 }
4049 }
4050 }
4051
4052 static void bnxt_free_port_stats(struct bnxt *bp)
4053 {
4054 bp->flags &= ~BNXT_FLAG_PORT_STATS;
4055 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
4056
4057 bnxt_free_stats_mem(bp, &bp->port_stats);
4058 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4059 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
4060 }
4061
4062 static void bnxt_free_ring_stats(struct bnxt *bp)
4063 {
4064 int i;
4065
4066 if (!bp->bnapi)
4067 return;
4068
4069 for (i = 0; i < bp->cp_nr_rings; i++) {
4070 struct bnxt_napi *bnapi = bp->bnapi[i];
4071 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4072
4073 bnxt_free_stats_mem(bp, &cpr->stats);
4074 }
4075 }
4076
4077 static int bnxt_alloc_stats(struct bnxt *bp)
4078 {
4079 u32 size, i;
4080 int rc;
4081
4082 size = bp->hw_ring_stats_size;
4083
4084 for (i = 0; i < bp->cp_nr_rings; i++) {
4085 struct bnxt_napi *bnapi = bp->bnapi[i];
4086 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4087
4088 cpr->stats.len = size;
4089 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4090 if (rc)
4091 return rc;
4092
4093 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4094 }
4095
4096 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4097 return 0;
4098
4099 if (bp->port_stats.hw_stats)
4100 goto alloc_ext_stats;
4101
4102 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4103 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4104 if (rc)
4105 return rc;
4106
4107 bp->flags |= BNXT_FLAG_PORT_STATS;
4108
4109 alloc_ext_stats:
4110 /* Display extended statistics only if FW supports it */
4111 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4112 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4113 return 0;
4114
4115 if (bp->rx_port_stats_ext.hw_stats)
4116 goto alloc_tx_ext_stats;
4117
4118 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4119 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4120 /* Extended stats are optional */
4121 if (rc)
4122 return 0;
4123
4124 alloc_tx_ext_stats:
4125 if (bp->tx_port_stats_ext.hw_stats)
4126 return 0;
4127
4128 if (bp->hwrm_spec_code >= 0x10902 ||
4129 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4130 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4131 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4132 /* Extended stats are optional */
4133 if (rc)
4134 return 0;
4135 }
4136 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4137 return 0;
4138 }
4139
4140 static void bnxt_clear_ring_indices(struct bnxt *bp)
4141 {
4142 int i;
4143
4144 if (!bp->bnapi)
4145 return;
4146
4147 for (i = 0; i < bp->cp_nr_rings; i++) {
4148 struct bnxt_napi *bnapi = bp->bnapi[i];
4149 struct bnxt_cp_ring_info *cpr;
4150 struct bnxt_rx_ring_info *rxr;
4151 struct bnxt_tx_ring_info *txr;
4152
4153 if (!bnapi)
4154 continue;
4155
4156 cpr = &bnapi->cp_ring;
4157 cpr->cp_raw_cons = 0;
4158
4159 txr = bnapi->tx_ring;
4160 if (txr) {
4161 txr->tx_prod = 0;
4162 txr->tx_cons = 0;
4163 }
4164
4165 rxr = bnapi->rx_ring;
4166 if (rxr) {
4167 rxr->rx_prod = 0;
4168 rxr->rx_agg_prod = 0;
4169 rxr->rx_sw_agg_prod = 0;
4170 rxr->rx_next_cons = 0;
4171 }
4172 }
4173 }
4174
4175 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4176 {
4177 #ifdef CONFIG_RFS_ACCEL
4178 int i;
4179
4180 /* Under rtnl_lock and all our NAPIs have been disabled. It's
4181 * safe to delete the hash table.
4182 */
4183 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4184 struct hlist_head *head;
4185 struct hlist_node *tmp;
4186 struct bnxt_ntuple_filter *fltr;
4187
4188 head = &bp->ntp_fltr_hash_tbl[i];
4189 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4190 hlist_del(&fltr->hash);
4191 kfree(fltr);
4192 }
4193 }
4194 if (irq_reinit) {
4195 kfree(bp->ntp_fltr_bmap);
4196 bp->ntp_fltr_bmap = NULL;
4197 }
4198 bp->ntp_fltr_count = 0;
4199 #endif
4200 }
4201
4202 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4203 {
4204 #ifdef CONFIG_RFS_ACCEL
4205 int i, rc = 0;
4206
4207 if (!(bp->flags & BNXT_FLAG_RFS))
4208 return 0;
4209
4210 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4211 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4212
4213 bp->ntp_fltr_count = 0;
4214 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
4215 sizeof(long),
4216 GFP_KERNEL);
4217
4218 if (!bp->ntp_fltr_bmap)
4219 rc = -ENOMEM;
4220
4221 return rc;
4222 #else
4223 return 0;
4224 #endif
4225 }
4226
4227 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4228 {
4229 bnxt_free_vnic_attributes(bp);
4230 bnxt_free_tx_rings(bp);
4231 bnxt_free_rx_rings(bp);
4232 bnxt_free_cp_rings(bp);
4233 bnxt_free_ntp_fltrs(bp, irq_re_init);
4234 if (irq_re_init) {
4235 bnxt_free_ring_stats(bp);
4236 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
4237 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
4238 bnxt_free_port_stats(bp);
4239 bnxt_free_ring_grps(bp);
4240 bnxt_free_vnics(bp);
4241 kfree(bp->tx_ring_map);
4242 bp->tx_ring_map = NULL;
4243 kfree(bp->tx_ring);
4244 bp->tx_ring = NULL;
4245 kfree(bp->rx_ring);
4246 bp->rx_ring = NULL;
4247 kfree(bp->bnapi);
4248 bp->bnapi = NULL;
4249 } else {
4250 bnxt_clear_ring_indices(bp);
4251 }
4252 }
4253
4254 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4255 {
4256 int i, j, rc, size, arr_size;
4257 void *bnapi;
4258
4259 if (irq_re_init) {
4260 /* Allocate bnapi mem pointer array and mem block for
4261 * all queues
4262 */
4263 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4264 bp->cp_nr_rings);
4265 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4266 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4267 if (!bnapi)
4268 return -ENOMEM;
4269
4270 bp->bnapi = bnapi;
4271 bnapi += arr_size;
4272 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4273 bp->bnapi[i] = bnapi;
4274 bp->bnapi[i]->index = i;
4275 bp->bnapi[i]->bp = bp;
4276 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4277 struct bnxt_cp_ring_info *cpr =
4278 &bp->bnapi[i]->cp_ring;
4279
4280 cpr->cp_ring_struct.ring_mem.flags =
4281 BNXT_RMEM_RING_PTE_FLAG;
4282 }
4283 }
4284
4285 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4286 sizeof(struct bnxt_rx_ring_info),
4287 GFP_KERNEL);
4288 if (!bp->rx_ring)
4289 return -ENOMEM;
4290
4291 for (i = 0; i < bp->rx_nr_rings; i++) {
4292 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4293
4294 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4295 rxr->rx_ring_struct.ring_mem.flags =
4296 BNXT_RMEM_RING_PTE_FLAG;
4297 rxr->rx_agg_ring_struct.ring_mem.flags =
4298 BNXT_RMEM_RING_PTE_FLAG;
4299 }
4300 rxr->bnapi = bp->bnapi[i];
4301 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4302 }
4303
4304 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4305 sizeof(struct bnxt_tx_ring_info),
4306 GFP_KERNEL);
4307 if (!bp->tx_ring)
4308 return -ENOMEM;
4309
4310 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4311 GFP_KERNEL);
4312
4313 if (!bp->tx_ring_map)
4314 return -ENOMEM;
4315
4316 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4317 j = 0;
4318 else
4319 j = bp->rx_nr_rings;
4320
4321 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4322 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4323
4324 if (bp->flags & BNXT_FLAG_CHIP_P5)
4325 txr->tx_ring_struct.ring_mem.flags =
4326 BNXT_RMEM_RING_PTE_FLAG;
4327 txr->bnapi = bp->bnapi[j];
4328 bp->bnapi[j]->tx_ring = txr;
4329 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4330 if (i >= bp->tx_nr_rings_xdp) {
4331 txr->txq_index = i - bp->tx_nr_rings_xdp;
4332 bp->bnapi[j]->tx_int = bnxt_tx_int;
4333 } else {
4334 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4335 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4336 }
4337 }
4338
4339 rc = bnxt_alloc_stats(bp);
4340 if (rc)
4341 goto alloc_mem_err;
4342 bnxt_init_stats(bp);
4343
4344 rc = bnxt_alloc_ntp_fltrs(bp);
4345 if (rc)
4346 goto alloc_mem_err;
4347
4348 rc = bnxt_alloc_vnics(bp);
4349 if (rc)
4350 goto alloc_mem_err;
4351 }
4352
4353 bnxt_init_ring_struct(bp);
4354
4355 rc = bnxt_alloc_rx_rings(bp);
4356 if (rc)
4357 goto alloc_mem_err;
4358
4359 rc = bnxt_alloc_tx_rings(bp);
4360 if (rc)
4361 goto alloc_mem_err;
4362
4363 rc = bnxt_alloc_cp_rings(bp);
4364 if (rc)
4365 goto alloc_mem_err;
4366
4367 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4368 BNXT_VNIC_UCAST_FLAG;
4369 rc = bnxt_alloc_vnic_attributes(bp);
4370 if (rc)
4371 goto alloc_mem_err;
4372 return 0;
4373
4374 alloc_mem_err:
4375 bnxt_free_mem(bp, true);
4376 return rc;
4377 }
4378
4379 static void bnxt_disable_int(struct bnxt *bp)
4380 {
4381 int i;
4382
4383 if (!bp->bnapi)
4384 return;
4385
4386 for (i = 0; i < bp->cp_nr_rings; i++) {
4387 struct bnxt_napi *bnapi = bp->bnapi[i];
4388 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4389 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4390
4391 if (ring->fw_ring_id != INVALID_HW_RING_ID)
4392 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4393 }
4394 }
4395
4396 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4397 {
4398 struct bnxt_napi *bnapi = bp->bnapi[n];
4399 struct bnxt_cp_ring_info *cpr;
4400
4401 cpr = &bnapi->cp_ring;
4402 return cpr->cp_ring_struct.map_idx;
4403 }
4404
4405 static void bnxt_disable_int_sync(struct bnxt *bp)
4406 {
4407 int i;
4408
4409 if (!bp->irq_tbl)
4410 return;
4411
4412 atomic_inc(&bp->intr_sem);
4413
4414 bnxt_disable_int(bp);
4415 for (i = 0; i < bp->cp_nr_rings; i++) {
4416 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4417
4418 synchronize_irq(bp->irq_tbl[map_idx].vector);
4419 }
4420 }
4421
4422 static void bnxt_enable_int(struct bnxt *bp)
4423 {
4424 int i;
4425
4426 atomic_set(&bp->intr_sem, 0);
4427 for (i = 0; i < bp->cp_nr_rings; i++) {
4428 struct bnxt_napi *bnapi = bp->bnapi[i];
4429 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4430
4431 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4432 }
4433 }
4434
4435 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
4436 u16 cmpl_ring, u16 target_id)
4437 {
4438 struct input *req = request;
4439
4440 req->req_type = cpu_to_le16(req_type);
4441 req->cmpl_ring = cpu_to_le16(cmpl_ring);
4442 req->target_id = cpu_to_le16(target_id);
4443 if (bnxt_kong_hwrm_message(bp, req))
4444 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
4445 else
4446 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
4447 }
4448
4449 static int bnxt_hwrm_to_stderr(u32 hwrm_err)
4450 {
4451 switch (hwrm_err) {
4452 case HWRM_ERR_CODE_SUCCESS:
4453 return 0;
4454 case HWRM_ERR_CODE_RESOURCE_LOCKED:
4455 return -EROFS;
4456 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
4457 return -EACCES;
4458 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
4459 return -ENOSPC;
4460 case HWRM_ERR_CODE_INVALID_PARAMS:
4461 case HWRM_ERR_CODE_INVALID_FLAGS:
4462 case HWRM_ERR_CODE_INVALID_ENABLES:
4463 case HWRM_ERR_CODE_UNSUPPORTED_TLV:
4464 case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
4465 return -EINVAL;
4466 case HWRM_ERR_CODE_NO_BUFFER:
4467 return -ENOMEM;
4468 case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
4469 case HWRM_ERR_CODE_BUSY:
4470 return -EAGAIN;
4471 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
4472 return -EOPNOTSUPP;
4473 default:
4474 return -EIO;
4475 }
4476 }
4477
4478 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
4479 int timeout, bool silent)
4480 {
4481 int i, intr_process, rc, tmo_count;
4482 struct input *req = msg;
4483 u32 *data = msg;
4484 u8 *valid;
4485 u16 cp_ring_id, len = 0;
4486 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
4487 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
4488 struct hwrm_short_input short_input = {0};
4489 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
4490 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
4491 u16 dst = BNXT_HWRM_CHNL_CHIMP;
4492
4493 if (BNXT_NO_FW_ACCESS(bp) &&
4494 le16_to_cpu(req->req_type) != HWRM_FUNC_RESET)
4495 return -EBUSY;
4496
4497 if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4498 if (msg_len > bp->hwrm_max_ext_req_len ||
4499 !bp->hwrm_short_cmd_req_addr)
4500 return -EINVAL;
4501 }
4502
4503 if (bnxt_hwrm_kong_chnl(bp, req)) {
4504 dst = BNXT_HWRM_CHNL_KONG;
4505 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
4506 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
4507 resp = bp->hwrm_cmd_kong_resp_addr;
4508 }
4509
4510 memset(resp, 0, PAGE_SIZE);
4511 cp_ring_id = le16_to_cpu(req->cmpl_ring);
4512 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
4513
4514 req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
4515 /* currently supports only one outstanding message */
4516 if (intr_process)
4517 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
4518
4519 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
4520 msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4521 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
4522 u16 max_msg_len;
4523
4524 /* Set boundary for maximum extended request length for short
4525 * cmd format. If passed up from device use the max supported
4526 * internal req length.
4527 */
4528 max_msg_len = bp->hwrm_max_ext_req_len;
4529
4530 memcpy(short_cmd_req, req, msg_len);
4531 if (msg_len < max_msg_len)
4532 memset(short_cmd_req + msg_len, 0,
4533 max_msg_len - msg_len);
4534
4535 short_input.req_type = req->req_type;
4536 short_input.signature =
4537 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
4538 short_input.size = cpu_to_le16(msg_len);
4539 short_input.req_addr =
4540 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
4541
4542 data = (u32 *)&short_input;
4543 msg_len = sizeof(short_input);
4544
4545 /* Sync memory write before updating doorbell */
4546 wmb();
4547
4548 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
4549 }
4550
4551 /* Write request msg to hwrm channel */
4552 __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
4553
4554 for (i = msg_len; i < max_req_len; i += 4)
4555 writel(0, bp->bar0 + bar_offset + i);
4556
4557 /* Ring channel doorbell */
4558 writel(1, bp->bar0 + doorbell_offset);
4559
4560 if (!pci_is_enabled(bp->pdev))
4561 return -ENODEV;
4562
4563 if (!timeout)
4564 timeout = DFLT_HWRM_CMD_TIMEOUT;
4565 /* Limit timeout to an upper limit */
4566 timeout = min(timeout, HWRM_CMD_MAX_TIMEOUT);
4567 /* convert timeout to usec */
4568 timeout *= 1000;
4569
4570 i = 0;
4571 /* Short timeout for the first few iterations:
4572 * number of loops = number of loops for short timeout +
4573 * number of loops for standard timeout.
4574 */
4575 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
4576 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
4577 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
4578
4579 if (intr_process) {
4580 u16 seq_id = bp->hwrm_intr_seq_id;
4581
4582 /* Wait until hwrm response cmpl interrupt is processed */
4583 while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
4584 i++ < tmo_count) {
4585 /* Abort the wait for completion if the FW health
4586 * check has failed.
4587 */
4588 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4589 return -EBUSY;
4590 /* on first few passes, just barely sleep */
4591 if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
4592 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4593 HWRM_SHORT_MAX_TIMEOUT);
4594 } else {
4595 if (HWRM_WAIT_MUST_ABORT(bp, req))
4596 break;
4597 usleep_range(HWRM_MIN_TIMEOUT,
4598 HWRM_MAX_TIMEOUT);
4599 }
4600 }
4601
4602 if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
4603 if (!silent)
4604 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
4605 le16_to_cpu(req->req_type));
4606 return -EBUSY;
4607 }
4608 len = le16_to_cpu(resp->resp_len);
4609 valid = ((u8 *)resp) + len - 1;
4610 } else {
4611 int j;
4612
4613 /* Check if response len is updated */
4614 for (i = 0; i < tmo_count; i++) {
4615 /* Abort the wait for completion if the FW health
4616 * check has failed.
4617 */
4618 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4619 return -EBUSY;
4620 len = le16_to_cpu(resp->resp_len);
4621 if (len)
4622 break;
4623 /* on first few passes, just barely sleep */
4624 if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
4625 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4626 HWRM_SHORT_MAX_TIMEOUT);
4627 } else {
4628 if (HWRM_WAIT_MUST_ABORT(bp, req))
4629 goto timeout_abort;
4630 usleep_range(HWRM_MIN_TIMEOUT,
4631 HWRM_MAX_TIMEOUT);
4632 }
4633 }
4634
4635 if (i >= tmo_count) {
4636 timeout_abort:
4637 if (!silent)
4638 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
4639 HWRM_TOTAL_TIMEOUT(i),
4640 le16_to_cpu(req->req_type),
4641 le16_to_cpu(req->seq_id), len);
4642 return -EBUSY;
4643 }
4644
4645 /* Last byte of resp contains valid bit */
4646 valid = ((u8 *)resp) + len - 1;
4647 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
4648 /* make sure we read from updated DMA memory */
4649 dma_rmb();
4650 if (*valid)
4651 break;
4652 usleep_range(1, 5);
4653 }
4654
4655 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
4656 if (!silent)
4657 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
4658 HWRM_TOTAL_TIMEOUT(i),
4659 le16_to_cpu(req->req_type),
4660 le16_to_cpu(req->seq_id), len,
4661 *valid);
4662 return -EBUSY;
4663 }
4664 }
4665
4666 /* Zero valid bit for compatibility. Valid bit in an older spec
4667 * may become a new field in a newer spec. We must make sure that
4668 * a new field not implemented by old spec will read zero.
4669 */
4670 *valid = 0;
4671 rc = le16_to_cpu(resp->error_code);
4672 if (rc && !silent)
4673 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
4674 le16_to_cpu(resp->req_type),
4675 le16_to_cpu(resp->seq_id), rc);
4676 return bnxt_hwrm_to_stderr(rc);
4677 }
4678
4679 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4680 {
4681 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
4682 }
4683
4684 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4685 int timeout)
4686 {
4687 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4688 }
4689
4690 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4691 {
4692 int rc;
4693
4694 mutex_lock(&bp->hwrm_cmd_lock);
4695 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
4696 mutex_unlock(&bp->hwrm_cmd_lock);
4697 return rc;
4698 }
4699
4700 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4701 int timeout)
4702 {
4703 int rc;
4704
4705 mutex_lock(&bp->hwrm_cmd_lock);
4706 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4707 mutex_unlock(&bp->hwrm_cmd_lock);
4708 return rc;
4709 }
4710
4711 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4712 bool async_only)
4713 {
4714 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
4715 struct hwrm_func_drv_rgtr_input req = {0};
4716 DECLARE_BITMAP(async_events_bmap, 256);
4717 u32 *events = (u32 *)async_events_bmap;
4718 u32 flags;
4719 int rc, i;
4720
4721 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4722
4723 req.enables =
4724 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4725 FUNC_DRV_RGTR_REQ_ENABLES_VER |
4726 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4727
4728 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4729 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4730 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4731 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4732 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4733 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4734 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
4735 req.flags = cpu_to_le32(flags);
4736 req.ver_maj_8b = DRV_VER_MAJ;
4737 req.ver_min_8b = DRV_VER_MIN;
4738 req.ver_upd_8b = DRV_VER_UPD;
4739 req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4740 req.ver_min = cpu_to_le16(DRV_VER_MIN);
4741 req.ver_upd = cpu_to_le16(DRV_VER_UPD);
4742
4743 if (BNXT_PF(bp)) {
4744 u32 data[8];
4745 int i;
4746
4747 memset(data, 0, sizeof(data));
4748 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4749 u16 cmd = bnxt_vf_req_snif[i];
4750 unsigned int bit, idx;
4751
4752 idx = cmd / 32;
4753 bit = cmd % 32;
4754 data[idx] |= 1 << bit;
4755 }
4756
4757 for (i = 0; i < 8; i++)
4758 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4759
4760 req.enables |=
4761 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4762 }
4763
4764 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4765 req.flags |= cpu_to_le32(
4766 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4767
4768 memset(async_events_bmap, 0, sizeof(async_events_bmap));
4769 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4770 u16 event_id = bnxt_async_events_arr[i];
4771
4772 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4773 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4774 continue;
4775 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4776 }
4777 if (bmap && bmap_size) {
4778 for (i = 0; i < bmap_size; i++) {
4779 if (test_bit(i, bmap))
4780 __set_bit(i, async_events_bmap);
4781 }
4782 }
4783 for (i = 0; i < 8; i++)
4784 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4785
4786 if (async_only)
4787 req.enables =
4788 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4789
4790 mutex_lock(&bp->hwrm_cmd_lock);
4791 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4792 if (!rc) {
4793 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4794 if (resp->flags &
4795 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4796 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4797 }
4798 mutex_unlock(&bp->hwrm_cmd_lock);
4799 return rc;
4800 }
4801
4802 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4803 {
4804 struct hwrm_func_drv_unrgtr_input req = {0};
4805
4806 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4807 return 0;
4808
4809 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4810 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4811 }
4812
4813 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4814 {
4815 u32 rc = 0;
4816 struct hwrm_tunnel_dst_port_free_input req = {0};
4817
4818 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4819 req.tunnel_type = tunnel_type;
4820
4821 switch (tunnel_type) {
4822 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4823 req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
4824 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
4825 break;
4826 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4827 req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
4828 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
4829 break;
4830 default:
4831 break;
4832 }
4833
4834 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4835 if (rc)
4836 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4837 rc);
4838 return rc;
4839 }
4840
4841 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4842 u8 tunnel_type)
4843 {
4844 u32 rc = 0;
4845 struct hwrm_tunnel_dst_port_alloc_input req = {0};
4846 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4847
4848 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4849
4850 req.tunnel_type = tunnel_type;
4851 req.tunnel_dst_port_val = port;
4852
4853 mutex_lock(&bp->hwrm_cmd_lock);
4854 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4855 if (rc) {
4856 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4857 rc);
4858 goto err_out;
4859 }
4860
4861 switch (tunnel_type) {
4862 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4863 bp->vxlan_fw_dst_port_id =
4864 le16_to_cpu(resp->tunnel_dst_port_id);
4865 break;
4866 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4867 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
4868 break;
4869 default:
4870 break;
4871 }
4872
4873 err_out:
4874 mutex_unlock(&bp->hwrm_cmd_lock);
4875 return rc;
4876 }
4877
4878 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4879 {
4880 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4881 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4882
4883 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
4884 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4885
4886 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4887 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4888 req.mask = cpu_to_le32(vnic->rx_mask);
4889 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4890 }
4891
4892 #ifdef CONFIG_RFS_ACCEL
4893 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4894 struct bnxt_ntuple_filter *fltr)
4895 {
4896 struct hwrm_cfa_ntuple_filter_free_input req = {0};
4897
4898 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4899 req.ntuple_filter_id = fltr->filter_id;
4900 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4901 }
4902
4903 #define BNXT_NTP_FLTR_FLAGS \
4904 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4905 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
4906 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
4907 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
4908 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
4909 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
4910 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
4911 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
4912 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
4913 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
4914 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
4915 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
4916 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
4917 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4918
4919 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
4920 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4921
4922 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4923 struct bnxt_ntuple_filter *fltr)
4924 {
4925 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
4926 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4927 struct flow_keys *keys = &fltr->fkeys;
4928 struct bnxt_vnic_info *vnic;
4929 u32 flags = 0;
4930 int rc = 0;
4931
4932 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
4933 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4934
4935 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4936 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4937 req.dst_id = cpu_to_le16(fltr->rxq);
4938 } else {
4939 vnic = &bp->vnic_info[fltr->rxq + 1];
4940 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
4941 }
4942 req.flags = cpu_to_le32(flags);
4943 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4944
4945 req.ethertype = htons(ETH_P_IP);
4946 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4947 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4948 req.ip_protocol = keys->basic.ip_proto;
4949
4950 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4951 int i;
4952
4953 req.ethertype = htons(ETH_P_IPV6);
4954 req.ip_addr_type =
4955 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4956 *(struct in6_addr *)&req.src_ipaddr[0] =
4957 keys->addrs.v6addrs.src;
4958 *(struct in6_addr *)&req.dst_ipaddr[0] =
4959 keys->addrs.v6addrs.dst;
4960 for (i = 0; i < 4; i++) {
4961 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4962 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4963 }
4964 } else {
4965 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4966 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4967 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4968 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4969 }
4970 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4971 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4972 req.tunnel_type =
4973 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4974 }
4975
4976 req.src_port = keys->ports.src;
4977 req.src_port_mask = cpu_to_be16(0xffff);
4978 req.dst_port = keys->ports.dst;
4979 req.dst_port_mask = cpu_to_be16(0xffff);
4980
4981 mutex_lock(&bp->hwrm_cmd_lock);
4982 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4983 if (!rc) {
4984 resp = bnxt_get_hwrm_resp_addr(bp, &req);
4985 fltr->filter_id = resp->ntuple_filter_id;
4986 }
4987 mutex_unlock(&bp->hwrm_cmd_lock);
4988 return rc;
4989 }
4990 #endif
4991
4992 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4993 u8 *mac_addr)
4994 {
4995 u32 rc = 0;
4996 struct hwrm_cfa_l2_filter_alloc_input req = {0};
4997 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4998
4999 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
5000 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
5001 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
5002 req.flags |=
5003 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
5004 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
5005 req.enables =
5006 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
5007 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
5008 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
5009 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
5010 req.l2_addr_mask[0] = 0xff;
5011 req.l2_addr_mask[1] = 0xff;
5012 req.l2_addr_mask[2] = 0xff;
5013 req.l2_addr_mask[3] = 0xff;
5014 req.l2_addr_mask[4] = 0xff;
5015 req.l2_addr_mask[5] = 0xff;
5016
5017 mutex_lock(&bp->hwrm_cmd_lock);
5018 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5019 if (!rc)
5020 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
5021 resp->l2_filter_id;
5022 mutex_unlock(&bp->hwrm_cmd_lock);
5023 return rc;
5024 }
5025
5026 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
5027 {
5028 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
5029 int rc = 0;
5030
5031 /* Any associated ntuple filters will also be cleared by firmware. */
5032 mutex_lock(&bp->hwrm_cmd_lock);
5033 for (i = 0; i < num_of_vnics; i++) {
5034 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5035
5036 for (j = 0; j < vnic->uc_filter_count; j++) {
5037 struct hwrm_cfa_l2_filter_free_input req = {0};
5038
5039 bnxt_hwrm_cmd_hdr_init(bp, &req,
5040 HWRM_CFA_L2_FILTER_FREE, -1, -1);
5041
5042 req.l2_filter_id = vnic->fw_l2_filter_id[j];
5043
5044 rc = _hwrm_send_message(bp, &req, sizeof(req),
5045 HWRM_CMD_TIMEOUT);
5046 }
5047 vnic->uc_filter_count = 0;
5048 }
5049 mutex_unlock(&bp->hwrm_cmd_lock);
5050
5051 return rc;
5052 }
5053
5054 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
5055 {
5056 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5057 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
5058 struct hwrm_vnic_tpa_cfg_input req = {0};
5059
5060 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
5061 return 0;
5062
5063 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
5064
5065 if (tpa_flags) {
5066 u16 mss = bp->dev->mtu - 40;
5067 u32 nsegs, n, segs = 0, flags;
5068
5069 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
5070 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
5071 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
5072 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
5073 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
5074 if (tpa_flags & BNXT_FLAG_GRO)
5075 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
5076
5077 req.flags = cpu_to_le32(flags);
5078
5079 req.enables =
5080 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
5081 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
5082 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
5083
5084 /* Number of segs are log2 units, and first packet is not
5085 * included as part of this units.
5086 */
5087 if (mss <= BNXT_RX_PAGE_SIZE) {
5088 n = BNXT_RX_PAGE_SIZE / mss;
5089 nsegs = (MAX_SKB_FRAGS - 1) * n;
5090 } else {
5091 n = mss / BNXT_RX_PAGE_SIZE;
5092 if (mss & (BNXT_RX_PAGE_SIZE - 1))
5093 n++;
5094 nsegs = (MAX_SKB_FRAGS - n) / n;
5095 }
5096
5097 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5098 segs = MAX_TPA_SEGS_P5;
5099 max_aggs = bp->max_tpa;
5100 } else {
5101 segs = ilog2(nsegs);
5102 }
5103 req.max_agg_segs = cpu_to_le16(segs);
5104 req.max_aggs = cpu_to_le16(max_aggs);
5105
5106 req.min_agg_len = cpu_to_le32(512);
5107 }
5108 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5109
5110 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5111 }
5112
5113 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
5114 {
5115 struct bnxt_ring_grp_info *grp_info;
5116
5117 grp_info = &bp->grp_info[ring->grp_idx];
5118 return grp_info->cp_fw_ring_id;
5119 }
5120
5121 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
5122 {
5123 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5124 struct bnxt_napi *bnapi = rxr->bnapi;
5125 struct bnxt_cp_ring_info *cpr;
5126
5127 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
5128 return cpr->cp_ring_struct.fw_ring_id;
5129 } else {
5130 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
5131 }
5132 }
5133
5134 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
5135 {
5136 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5137 struct bnxt_napi *bnapi = txr->bnapi;
5138 struct bnxt_cp_ring_info *cpr;
5139
5140 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
5141 return cpr->cp_ring_struct.fw_ring_id;
5142 } else {
5143 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5144 }
5145 }
5146
5147 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
5148 {
5149 int entries;
5150
5151 if (bp->flags & BNXT_FLAG_CHIP_P5)
5152 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
5153 else
5154 entries = HW_HASH_INDEX_SIZE;
5155
5156 bp->rss_indir_tbl_entries = entries;
5157 bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
5158 GFP_KERNEL);
5159 if (!bp->rss_indir_tbl)
5160 return -ENOMEM;
5161 return 0;
5162 }
5163
5164 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5165 {
5166 u16 max_rings, max_entries, pad, i;
5167
5168 if (!bp->rx_nr_rings)
5169 return;
5170
5171 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5172 max_rings = bp->rx_nr_rings - 1;
5173 else
5174 max_rings = bp->rx_nr_rings;
5175
5176 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5177
5178 for (i = 0; i < max_entries; i++)
5179 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5180
5181 pad = bp->rss_indir_tbl_entries - max_entries;
5182 if (pad)
5183 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5184 }
5185
5186 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5187 {
5188 u16 i, tbl_size, max_ring = 0;
5189
5190 if (!bp->rss_indir_tbl)
5191 return 0;
5192
5193 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5194 for (i = 0; i < tbl_size; i++)
5195 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5196 return max_ring;
5197 }
5198
5199 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5200 {
5201 if (bp->flags & BNXT_FLAG_CHIP_P5)
5202 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5203 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5204 return 2;
5205 return 1;
5206 }
5207
5208 static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5209 {
5210 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5211 u16 i, j;
5212
5213 /* Fill the RSS indirection table with ring group ids */
5214 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5215 if (!no_rss)
5216 j = bp->rss_indir_tbl[i];
5217 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5218 }
5219 }
5220
5221 static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5222 struct bnxt_vnic_info *vnic)
5223 {
5224 __le16 *ring_tbl = vnic->rss_table;
5225 struct bnxt_rx_ring_info *rxr;
5226 u16 tbl_size, i;
5227
5228 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5229
5230 for (i = 0; i < tbl_size; i++) {
5231 u16 ring_id, j;
5232
5233 j = bp->rss_indir_tbl[i];
5234 rxr = &bp->rx_ring[j];
5235
5236 ring_id = rxr->rx_ring_struct.fw_ring_id;
5237 *ring_tbl++ = cpu_to_le16(ring_id);
5238 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5239 *ring_tbl++ = cpu_to_le16(ring_id);
5240 }
5241 }
5242
5243 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5244 {
5245 if (bp->flags & BNXT_FLAG_CHIP_P5)
5246 __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5247 else
5248 __bnxt_fill_hw_rss_tbl(bp, vnic);
5249 }
5250
5251 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5252 {
5253 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5254 struct hwrm_vnic_rss_cfg_input req = {0};
5255
5256 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5257 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
5258 return 0;
5259
5260 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5261 if (set_rss) {
5262 bnxt_fill_hw_rss_tbl(bp, vnic);
5263 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
5264 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5265 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5266 req.hash_key_tbl_addr =
5267 cpu_to_le64(vnic->rss_hash_key_dma_addr);
5268 }
5269 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5270 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5271 }
5272
5273 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5274 {
5275 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5276 struct hwrm_vnic_rss_cfg_input req = {0};
5277 dma_addr_t ring_tbl_map;
5278 u32 i, nr_ctxs;
5279
5280 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5281 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5282 if (!set_rss) {
5283 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5284 return 0;
5285 }
5286 bnxt_fill_hw_rss_tbl(bp, vnic);
5287 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
5288 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5289 req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
5290 ring_tbl_map = vnic->rss_table_dma_addr;
5291 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
5292 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
5293 int rc;
5294
5295 req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
5296 req.ring_table_pair_index = i;
5297 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5298 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5299 if (rc)
5300 return rc;
5301 }
5302 return 0;
5303 }
5304
5305 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5306 {
5307 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5308 struct hwrm_vnic_plcmodes_cfg_input req = {0};
5309
5310 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
5311 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
5312 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5313 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5314 req.enables =
5315 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
5316 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
5317 /* thresholds not implemented in firmware yet */
5318 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5319 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5320 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5321 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5322 }
5323
5324 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5325 u16 ctx_idx)
5326 {
5327 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
5328
5329 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
5330 req.rss_cos_lb_ctx_id =
5331 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
5332
5333 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5334 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
5335 }
5336
5337 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5338 {
5339 int i, j;
5340
5341 for (i = 0; i < bp->nr_vnics; i++) {
5342 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5343
5344 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5345 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5346 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5347 }
5348 }
5349 bp->rsscos_nr_ctxs = 0;
5350 }
5351
5352 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
5353 {
5354 int rc;
5355 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
5356 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
5357 bp->hwrm_cmd_resp_addr;
5358
5359 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
5360 -1);
5361
5362 mutex_lock(&bp->hwrm_cmd_lock);
5363 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5364 if (!rc)
5365 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
5366 le16_to_cpu(resp->rss_cos_lb_ctx_id);
5367 mutex_unlock(&bp->hwrm_cmd_lock);
5368
5369 return rc;
5370 }
5371
5372 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5373 {
5374 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5375 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5376 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5377 }
5378
5379 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5380 {
5381 unsigned int ring = 0, grp_idx;
5382 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5383 struct hwrm_vnic_cfg_input req = {0};
5384 u16 def_vlan = 0;
5385
5386 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
5387
5388 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5389 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5390
5391 req.default_rx_ring_id =
5392 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5393 req.default_cmpl_ring_id =
5394 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5395 req.enables =
5396 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5397 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5398 goto vnic_mru;
5399 }
5400 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5401 /* Only RSS support for now TBD: COS & LB */
5402 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5403 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5404 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5405 VNIC_CFG_REQ_ENABLES_MRU);
5406 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5407 req.rss_rule =
5408 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5409 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5410 VNIC_CFG_REQ_ENABLES_MRU);
5411 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5412 } else {
5413 req.rss_rule = cpu_to_le16(0xffff);
5414 }
5415
5416 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5417 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5418 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5419 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5420 } else {
5421 req.cos_rule = cpu_to_le16(0xffff);
5422 }
5423
5424 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5425 ring = 0;
5426 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5427 ring = vnic_id - 1;
5428 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5429 ring = bp->rx_nr_rings - 1;
5430
5431 grp_idx = bp->rx_ring[ring].bnapi->index;
5432 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5433 req.lb_rule = cpu_to_le16(0xffff);
5434 vnic_mru:
5435 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
5436
5437 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5438 #ifdef CONFIG_BNXT_SRIOV
5439 if (BNXT_VF(bp))
5440 def_vlan = bp->vf.vlan;
5441 #endif
5442 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5443 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5444 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
5445 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5446
5447 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5448 }
5449
5450 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5451 {
5452 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5453 struct hwrm_vnic_free_input req = {0};
5454
5455 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
5456 req.vnic_id =
5457 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5458
5459 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5460 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5461 }
5462 }
5463
5464 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5465 {
5466 u16 i;
5467
5468 for (i = 0; i < bp->nr_vnics; i++)
5469 bnxt_hwrm_vnic_free_one(bp, i);
5470 }
5471
5472 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5473 unsigned int start_rx_ring_idx,
5474 unsigned int nr_rings)
5475 {
5476 int rc = 0;
5477 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5478 struct hwrm_vnic_alloc_input req = {0};
5479 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5480 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5481
5482 if (bp->flags & BNXT_FLAG_CHIP_P5)
5483 goto vnic_no_ring_grps;
5484
5485 /* map ring groups to this vnic */
5486 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5487 grp_idx = bp->rx_ring[i].bnapi->index;
5488 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5489 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5490 j, nr_rings);
5491 break;
5492 }
5493 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5494 }
5495
5496 vnic_no_ring_grps:
5497 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5498 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5499 if (vnic_id == 0)
5500 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5501
5502 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
5503
5504 mutex_lock(&bp->hwrm_cmd_lock);
5505 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5506 if (!rc)
5507 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5508 mutex_unlock(&bp->hwrm_cmd_lock);
5509 return rc;
5510 }
5511
5512 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5513 {
5514 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5515 struct hwrm_vnic_qcaps_input req = {0};
5516 int rc;
5517
5518 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5519 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5520 if (bp->hwrm_spec_code < 0x10600)
5521 return 0;
5522
5523 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
5524 mutex_lock(&bp->hwrm_cmd_lock);
5525 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5526 if (!rc) {
5527 u32 flags = le32_to_cpu(resp->flags);
5528
5529 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5530 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5531 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5532 if (flags &
5533 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5534 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5535
5536 /* Older P5 fw before EXT_HW_STATS support did not set
5537 * VLAN_STRIP_CAP properly.
5538 */
5539 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
5540 (BNXT_CHIP_P5_THOR(bp) &&
5541 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5542 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
5543 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5544 if (bp->max_tpa_v2) {
5545 if (BNXT_CHIP_P5_THOR(bp))
5546 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5547 else
5548 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5549 }
5550 }
5551 mutex_unlock(&bp->hwrm_cmd_lock);
5552 return rc;
5553 }
5554
5555 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5556 {
5557 u16 i;
5558 u32 rc = 0;
5559
5560 if (bp->flags & BNXT_FLAG_CHIP_P5)
5561 return 0;
5562
5563 mutex_lock(&bp->hwrm_cmd_lock);
5564 for (i = 0; i < bp->rx_nr_rings; i++) {
5565 struct hwrm_ring_grp_alloc_input req = {0};
5566 struct hwrm_ring_grp_alloc_output *resp =
5567 bp->hwrm_cmd_resp_addr;
5568 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5569
5570 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
5571
5572 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5573 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5574 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5575 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5576
5577 rc = _hwrm_send_message(bp, &req, sizeof(req),
5578 HWRM_CMD_TIMEOUT);
5579 if (rc)
5580 break;
5581
5582 bp->grp_info[grp_idx].fw_grp_id =
5583 le32_to_cpu(resp->ring_group_id);
5584 }
5585 mutex_unlock(&bp->hwrm_cmd_lock);
5586 return rc;
5587 }
5588
5589 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5590 {
5591 u16 i;
5592 struct hwrm_ring_grp_free_input req = {0};
5593
5594 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5595 return;
5596
5597 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
5598
5599 mutex_lock(&bp->hwrm_cmd_lock);
5600 for (i = 0; i < bp->cp_nr_rings; i++) {
5601 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5602 continue;
5603 req.ring_group_id =
5604 cpu_to_le32(bp->grp_info[i].fw_grp_id);
5605
5606 _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5607 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5608 }
5609 mutex_unlock(&bp->hwrm_cmd_lock);
5610 }
5611
5612 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5613 struct bnxt_ring_struct *ring,
5614 u32 ring_type, u32 map_index)
5615 {
5616 int rc = 0, err = 0;
5617 struct hwrm_ring_alloc_input req = {0};
5618 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5619 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5620 struct bnxt_ring_grp_info *grp_info;
5621 u16 ring_id;
5622
5623 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
5624
5625 req.enables = 0;
5626 if (rmem->nr_pages > 1) {
5627 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5628 /* Page size is in log2 units */
5629 req.page_size = BNXT_PAGE_SHIFT;
5630 req.page_tbl_depth = 1;
5631 } else {
5632 req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
5633 }
5634 req.fbo = 0;
5635 /* Association of ring index with doorbell index and MSIX number */
5636 req.logical_id = cpu_to_le16(map_index);
5637
5638 switch (ring_type) {
5639 case HWRM_RING_ALLOC_TX: {
5640 struct bnxt_tx_ring_info *txr;
5641
5642 txr = container_of(ring, struct bnxt_tx_ring_info,
5643 tx_ring_struct);
5644 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5645 /* Association of transmit ring with completion ring */
5646 grp_info = &bp->grp_info[ring->grp_idx];
5647 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5648 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
5649 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5650 req.queue_id = cpu_to_le16(ring->queue_id);
5651 break;
5652 }
5653 case HWRM_RING_ALLOC_RX:
5654 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5655 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
5656 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5657 u16 flags = 0;
5658
5659 /* Association of rx ring with stats context */
5660 grp_info = &bp->grp_info[ring->grp_idx];
5661 req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5662 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5663 req.enables |= cpu_to_le32(
5664 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5665 if (NET_IP_ALIGN == 2)
5666 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5667 req.flags = cpu_to_le16(flags);
5668 }
5669 break;
5670 case HWRM_RING_ALLOC_AGG:
5671 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5672 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5673 /* Association of agg ring with rx ring */
5674 grp_info = &bp->grp_info[ring->grp_idx];
5675 req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5676 req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5677 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5678 req.enables |= cpu_to_le32(
5679 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5680 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5681 } else {
5682 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5683 }
5684 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5685 break;
5686 case HWRM_RING_ALLOC_CMPL:
5687 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5688 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5689 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5690 /* Association of cp ring with nq */
5691 grp_info = &bp->grp_info[map_index];
5692 req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5693 req.cq_handle = cpu_to_le64(ring->handle);
5694 req.enables |= cpu_to_le32(
5695 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5696 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5697 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5698 }
5699 break;
5700 case HWRM_RING_ALLOC_NQ:
5701 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5702 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5703 if (bp->flags & BNXT_FLAG_USING_MSIX)
5704 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5705 break;
5706 default:
5707 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5708 ring_type);
5709 return -1;
5710 }
5711
5712 mutex_lock(&bp->hwrm_cmd_lock);
5713 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5714 err = le16_to_cpu(resp->error_code);
5715 ring_id = le16_to_cpu(resp->ring_id);
5716 mutex_unlock(&bp->hwrm_cmd_lock);
5717
5718 if (rc || err) {
5719 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5720 ring_type, rc, err);
5721 return -EIO;
5722 }
5723 ring->fw_ring_id = ring_id;
5724 return rc;
5725 }
5726
5727 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5728 {
5729 int rc;
5730
5731 if (BNXT_PF(bp)) {
5732 struct hwrm_func_cfg_input req = {0};
5733
5734 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5735 req.fid = cpu_to_le16(0xffff);
5736 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5737 req.async_event_cr = cpu_to_le16(idx);
5738 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5739 } else {
5740 struct hwrm_func_vf_cfg_input req = {0};
5741
5742 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
5743 req.enables =
5744 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5745 req.async_event_cr = cpu_to_le16(idx);
5746 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5747 }
5748 return rc;
5749 }
5750
5751 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5752 u32 map_idx, u32 xid)
5753 {
5754 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5755 if (BNXT_PF(bp))
5756 db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
5757 else
5758 db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
5759 switch (ring_type) {
5760 case HWRM_RING_ALLOC_TX:
5761 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5762 break;
5763 case HWRM_RING_ALLOC_RX:
5764 case HWRM_RING_ALLOC_AGG:
5765 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5766 break;
5767 case HWRM_RING_ALLOC_CMPL:
5768 db->db_key64 = DBR_PATH_L2;
5769 break;
5770 case HWRM_RING_ALLOC_NQ:
5771 db->db_key64 = DBR_PATH_L2;
5772 break;
5773 }
5774 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5775 } else {
5776 db->doorbell = bp->bar1 + map_idx * 0x80;
5777 switch (ring_type) {
5778 case HWRM_RING_ALLOC_TX:
5779 db->db_key32 = DB_KEY_TX;
5780 break;
5781 case HWRM_RING_ALLOC_RX:
5782 case HWRM_RING_ALLOC_AGG:
5783 db->db_key32 = DB_KEY_RX;
5784 break;
5785 case HWRM_RING_ALLOC_CMPL:
5786 db->db_key32 = DB_KEY_CP;
5787 break;
5788 }
5789 }
5790 }
5791
5792 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5793 {
5794 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5795 int i, rc = 0;
5796 u32 type;
5797
5798 if (bp->flags & BNXT_FLAG_CHIP_P5)
5799 type = HWRM_RING_ALLOC_NQ;
5800 else
5801 type = HWRM_RING_ALLOC_CMPL;
5802 for (i = 0; i < bp->cp_nr_rings; i++) {
5803 struct bnxt_napi *bnapi = bp->bnapi[i];
5804 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5805 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5806 u32 map_idx = ring->map_idx;
5807 unsigned int vector;
5808
5809 vector = bp->irq_tbl[map_idx].vector;
5810 disable_irq_nosync(vector);
5811 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5812 if (rc) {
5813 enable_irq(vector);
5814 goto err_out;
5815 }
5816 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5817 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5818 enable_irq(vector);
5819 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5820
5821 if (!i) {
5822 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5823 if (rc)
5824 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5825 }
5826 }
5827
5828 type = HWRM_RING_ALLOC_TX;
5829 for (i = 0; i < bp->tx_nr_rings; i++) {
5830 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5831 struct bnxt_ring_struct *ring;
5832 u32 map_idx;
5833
5834 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5835 struct bnxt_napi *bnapi = txr->bnapi;
5836 struct bnxt_cp_ring_info *cpr, *cpr2;
5837 u32 type2 = HWRM_RING_ALLOC_CMPL;
5838
5839 cpr = &bnapi->cp_ring;
5840 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5841 ring = &cpr2->cp_ring_struct;
5842 ring->handle = BNXT_TX_HDL;
5843 map_idx = bnapi->index;
5844 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5845 if (rc)
5846 goto err_out;
5847 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5848 ring->fw_ring_id);
5849 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5850 }
5851 ring = &txr->tx_ring_struct;
5852 map_idx = i;
5853 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5854 if (rc)
5855 goto err_out;
5856 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5857 }
5858
5859 type = HWRM_RING_ALLOC_RX;
5860 for (i = 0; i < bp->rx_nr_rings; i++) {
5861 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5862 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5863 struct bnxt_napi *bnapi = rxr->bnapi;
5864 u32 map_idx = bnapi->index;
5865
5866 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5867 if (rc)
5868 goto err_out;
5869 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5870 /* If we have agg rings, post agg buffers first. */
5871 if (!agg_rings)
5872 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5873 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5874 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5875 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5876 u32 type2 = HWRM_RING_ALLOC_CMPL;
5877 struct bnxt_cp_ring_info *cpr2;
5878
5879 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5880 ring = &cpr2->cp_ring_struct;
5881 ring->handle = BNXT_RX_HDL;
5882 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5883 if (rc)
5884 goto err_out;
5885 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5886 ring->fw_ring_id);
5887 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5888 }
5889 }
5890
5891 if (agg_rings) {
5892 type = HWRM_RING_ALLOC_AGG;
5893 for (i = 0; i < bp->rx_nr_rings; i++) {
5894 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5895 struct bnxt_ring_struct *ring =
5896 &rxr->rx_agg_ring_struct;
5897 u32 grp_idx = ring->grp_idx;
5898 u32 map_idx = grp_idx + bp->rx_nr_rings;
5899
5900 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5901 if (rc)
5902 goto err_out;
5903
5904 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5905 ring->fw_ring_id);
5906 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5907 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5908 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5909 }
5910 }
5911 err_out:
5912 return rc;
5913 }
5914
5915 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5916 struct bnxt_ring_struct *ring,
5917 u32 ring_type, int cmpl_ring_id)
5918 {
5919 int rc;
5920 struct hwrm_ring_free_input req = {0};
5921 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5922 u16 error_code;
5923
5924 if (BNXT_NO_FW_ACCESS(bp))
5925 return 0;
5926
5927 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
5928 req.ring_type = ring_type;
5929 req.ring_id = cpu_to_le16(ring->fw_ring_id);
5930
5931 mutex_lock(&bp->hwrm_cmd_lock);
5932 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5933 error_code = le16_to_cpu(resp->error_code);
5934 mutex_unlock(&bp->hwrm_cmd_lock);
5935
5936 if (rc || error_code) {
5937 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5938 ring_type, rc, error_code);
5939 return -EIO;
5940 }
5941 return 0;
5942 }
5943
5944 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5945 {
5946 u32 type;
5947 int i;
5948
5949 if (!bp->bnapi)
5950 return;
5951
5952 for (i = 0; i < bp->tx_nr_rings; i++) {
5953 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5954 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5955
5956 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5957 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5958
5959 hwrm_ring_free_send_msg(bp, ring,
5960 RING_FREE_REQ_RING_TYPE_TX,
5961 close_path ? cmpl_ring_id :
5962 INVALID_HW_RING_ID);
5963 ring->fw_ring_id = INVALID_HW_RING_ID;
5964 }
5965 }
5966
5967 for (i = 0; i < bp->rx_nr_rings; i++) {
5968 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5969 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5970 u32 grp_idx = rxr->bnapi->index;
5971
5972 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5973 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5974
5975 hwrm_ring_free_send_msg(bp, ring,
5976 RING_FREE_REQ_RING_TYPE_RX,
5977 close_path ? cmpl_ring_id :
5978 INVALID_HW_RING_ID);
5979 ring->fw_ring_id = INVALID_HW_RING_ID;
5980 bp->grp_info[grp_idx].rx_fw_ring_id =
5981 INVALID_HW_RING_ID;
5982 }
5983 }
5984
5985 if (bp->flags & BNXT_FLAG_CHIP_P5)
5986 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5987 else
5988 type = RING_FREE_REQ_RING_TYPE_RX;
5989 for (i = 0; i < bp->rx_nr_rings; i++) {
5990 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5991 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5992 u32 grp_idx = rxr->bnapi->index;
5993
5994 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5995 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5996
5997 hwrm_ring_free_send_msg(bp, ring, type,
5998 close_path ? cmpl_ring_id :
5999 INVALID_HW_RING_ID);
6000 ring->fw_ring_id = INVALID_HW_RING_ID;
6001 bp->grp_info[grp_idx].agg_fw_ring_id =
6002 INVALID_HW_RING_ID;
6003 }
6004 }
6005
6006 /* The completion rings are about to be freed. After that the
6007 * IRQ doorbell will not work anymore. So we need to disable
6008 * IRQ here.
6009 */
6010 bnxt_disable_int_sync(bp);
6011
6012 if (bp->flags & BNXT_FLAG_CHIP_P5)
6013 type = RING_FREE_REQ_RING_TYPE_NQ;
6014 else
6015 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
6016 for (i = 0; i < bp->cp_nr_rings; i++) {
6017 struct bnxt_napi *bnapi = bp->bnapi[i];
6018 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6019 struct bnxt_ring_struct *ring;
6020 int j;
6021
6022 for (j = 0; j < 2; j++) {
6023 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
6024
6025 if (cpr2) {
6026 ring = &cpr2->cp_ring_struct;
6027 if (ring->fw_ring_id == INVALID_HW_RING_ID)
6028 continue;
6029 hwrm_ring_free_send_msg(bp, ring,
6030 RING_FREE_REQ_RING_TYPE_L2_CMPL,
6031 INVALID_HW_RING_ID);
6032 ring->fw_ring_id = INVALID_HW_RING_ID;
6033 }
6034 }
6035 ring = &cpr->cp_ring_struct;
6036 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6037 hwrm_ring_free_send_msg(bp, ring, type,
6038 INVALID_HW_RING_ID);
6039 ring->fw_ring_id = INVALID_HW_RING_ID;
6040 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
6041 }
6042 }
6043 }
6044
6045 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
6046 bool shared);
6047
6048 static int bnxt_hwrm_get_rings(struct bnxt *bp)
6049 {
6050 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6051 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6052 struct hwrm_func_qcfg_input req = {0};
6053 int rc;
6054
6055 if (bp->hwrm_spec_code < 0x10601)
6056 return 0;
6057
6058 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6059 req.fid = cpu_to_le16(0xffff);
6060 mutex_lock(&bp->hwrm_cmd_lock);
6061 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6062 if (rc) {
6063 mutex_unlock(&bp->hwrm_cmd_lock);
6064 return rc;
6065 }
6066
6067 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6068 if (BNXT_NEW_RM(bp)) {
6069 u16 cp, stats;
6070
6071 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
6072 hw_resc->resv_hw_ring_grps =
6073 le32_to_cpu(resp->alloc_hw_ring_grps);
6074 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
6075 cp = le16_to_cpu(resp->alloc_cmpl_rings);
6076 stats = le16_to_cpu(resp->alloc_stat_ctx);
6077 hw_resc->resv_irqs = cp;
6078 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6079 int rx = hw_resc->resv_rx_rings;
6080 int tx = hw_resc->resv_tx_rings;
6081
6082 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6083 rx >>= 1;
6084 if (cp < (rx + tx)) {
6085 bnxt_trim_rings(bp, &rx, &tx, cp, false);
6086 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6087 rx <<= 1;
6088 hw_resc->resv_rx_rings = rx;
6089 hw_resc->resv_tx_rings = tx;
6090 }
6091 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
6092 hw_resc->resv_hw_ring_grps = rx;
6093 }
6094 hw_resc->resv_cp_rings = cp;
6095 hw_resc->resv_stat_ctxs = stats;
6096 }
6097 mutex_unlock(&bp->hwrm_cmd_lock);
6098 return 0;
6099 }
6100
6101 /* Caller must hold bp->hwrm_cmd_lock */
6102 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
6103 {
6104 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6105 struct hwrm_func_qcfg_input req = {0};
6106 int rc;
6107
6108 if (bp->hwrm_spec_code < 0x10601)
6109 return 0;
6110
6111 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6112 req.fid = cpu_to_le16(fid);
6113 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6114 if (!rc)
6115 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6116
6117 return rc;
6118 }
6119
6120 static bool bnxt_rfs_supported(struct bnxt *bp);
6121
6122 static void
6123 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
6124 int tx_rings, int rx_rings, int ring_grps,
6125 int cp_rings, int stats, int vnics)
6126 {
6127 u32 enables = 0;
6128
6129 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
6130 req->fid = cpu_to_le16(0xffff);
6131 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6132 req->num_tx_rings = cpu_to_le16(tx_rings);
6133 if (BNXT_NEW_RM(bp)) {
6134 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
6135 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6136 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6137 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6138 enables |= tx_rings + ring_grps ?
6139 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6140 enables |= rx_rings ?
6141 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6142 } else {
6143 enables |= cp_rings ?
6144 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6145 enables |= ring_grps ?
6146 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
6147 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6148 }
6149 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
6150
6151 req->num_rx_rings = cpu_to_le16(rx_rings);
6152 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6153 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6154 req->num_msix = cpu_to_le16(cp_rings);
6155 req->num_rsscos_ctxs =
6156 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6157 } else {
6158 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6159 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6160 req->num_rsscos_ctxs = cpu_to_le16(1);
6161 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6162 bnxt_rfs_supported(bp))
6163 req->num_rsscos_ctxs =
6164 cpu_to_le16(ring_grps + 1);
6165 }
6166 req->num_stat_ctxs = cpu_to_le16(stats);
6167 req->num_vnics = cpu_to_le16(vnics);
6168 }
6169 req->enables = cpu_to_le32(enables);
6170 }
6171
6172 static void
6173 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
6174 struct hwrm_func_vf_cfg_input *req, int tx_rings,
6175 int rx_rings, int ring_grps, int cp_rings,
6176 int stats, int vnics)
6177 {
6178 u32 enables = 0;
6179
6180 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
6181 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6182 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
6183 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6184 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6185 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6186 enables |= tx_rings + ring_grps ?
6187 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6188 } else {
6189 enables |= cp_rings ?
6190 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6191 enables |= ring_grps ?
6192 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
6193 }
6194 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
6195 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
6196
6197 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
6198 req->num_tx_rings = cpu_to_le16(tx_rings);
6199 req->num_rx_rings = cpu_to_le16(rx_rings);
6200 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6201 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6202 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6203 } else {
6204 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6205 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6206 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6207 }
6208 req->num_stat_ctxs = cpu_to_le16(stats);
6209 req->num_vnics = cpu_to_le16(vnics);
6210
6211 req->enables = cpu_to_le32(enables);
6212 }
6213
6214 static int
6215 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6216 int ring_grps, int cp_rings, int stats, int vnics)
6217 {
6218 struct hwrm_func_cfg_input req = {0};
6219 int rc;
6220
6221 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6222 cp_rings, stats, vnics);
6223 if (!req.enables)
6224 return 0;
6225
6226 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6227 if (rc)
6228 return rc;
6229
6230 if (bp->hwrm_spec_code < 0x10601)
6231 bp->hw_resc.resv_tx_rings = tx_rings;
6232
6233 return bnxt_hwrm_get_rings(bp);
6234 }
6235
6236 static int
6237 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6238 int ring_grps, int cp_rings, int stats, int vnics)
6239 {
6240 struct hwrm_func_vf_cfg_input req = {0};
6241 int rc;
6242
6243 if (!BNXT_NEW_RM(bp)) {
6244 bp->hw_resc.resv_tx_rings = tx_rings;
6245 return 0;
6246 }
6247
6248 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6249 cp_rings, stats, vnics);
6250 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6251 if (rc)
6252 return rc;
6253
6254 return bnxt_hwrm_get_rings(bp);
6255 }
6256
6257 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
6258 int cp, int stat, int vnic)
6259 {
6260 if (BNXT_PF(bp))
6261 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6262 vnic);
6263 else
6264 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6265 vnic);
6266 }
6267
6268 int bnxt_nq_rings_in_use(struct bnxt *bp)
6269 {
6270 int cp = bp->cp_nr_rings;
6271 int ulp_msix, ulp_base;
6272
6273 ulp_msix = bnxt_get_ulp_msix_num(bp);
6274 if (ulp_msix) {
6275 ulp_base = bnxt_get_ulp_msix_base(bp);
6276 cp += ulp_msix;
6277 if ((ulp_base + ulp_msix) > cp)
6278 cp = ulp_base + ulp_msix;
6279 }
6280 return cp;
6281 }
6282
6283 static int bnxt_cp_rings_in_use(struct bnxt *bp)
6284 {
6285 int cp;
6286
6287 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6288 return bnxt_nq_rings_in_use(bp);
6289
6290 cp = bp->tx_nr_rings + bp->rx_nr_rings;
6291 return cp;
6292 }
6293
6294 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6295 {
6296 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6297 int cp = bp->cp_nr_rings;
6298
6299 if (!ulp_stat)
6300 return cp;
6301
6302 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6303 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6304
6305 return cp + ulp_stat;
6306 }
6307
6308 /* Check if a default RSS map needs to be setup. This function is only
6309 * used on older firmware that does not require reserving RX rings.
6310 */
6311 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6312 {
6313 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6314
6315 /* The RSS map is valid for RX rings set to resv_rx_rings */
6316 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6317 hw_resc->resv_rx_rings = bp->rx_nr_rings;
6318 if (!netif_is_rxfh_configured(bp->dev))
6319 bnxt_set_dflt_rss_indir_tbl(bp);
6320 }
6321 }
6322
6323 static bool bnxt_need_reserve_rings(struct bnxt *bp)
6324 {
6325 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6326 int cp = bnxt_cp_rings_in_use(bp);
6327 int nq = bnxt_nq_rings_in_use(bp);
6328 int rx = bp->rx_nr_rings, stat;
6329 int vnic = 1, grp = rx;
6330
6331 if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6332 bp->hwrm_spec_code >= 0x10601)
6333 return true;
6334
6335 /* Old firmware does not need RX ring reservations but we still
6336 * need to setup a default RSS map when needed. With new firmware
6337 * we go through RX ring reservations first and then set up the
6338 * RSS map for the successfully reserved RX rings when needed.
6339 */
6340 if (!BNXT_NEW_RM(bp)) {
6341 bnxt_check_rss_tbl_no_rmgr(bp);
6342 return false;
6343 }
6344 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6345 vnic = rx + 1;
6346 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6347 rx <<= 1;
6348 stat = bnxt_get_func_stat_ctxs(bp);
6349 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6350 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6351 (hw_resc->resv_hw_ring_grps != grp &&
6352 !(bp->flags & BNXT_FLAG_CHIP_P5)))
6353 return true;
6354 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6355 hw_resc->resv_irqs != nq)
6356 return true;
6357 return false;
6358 }
6359
6360 static int __bnxt_reserve_rings(struct bnxt *bp)
6361 {
6362 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6363 int cp = bnxt_nq_rings_in_use(bp);
6364 int tx = bp->tx_nr_rings;
6365 int rx = bp->rx_nr_rings;
6366 int grp, rx_rings, rc;
6367 int vnic = 1, stat;
6368 bool sh = false;
6369
6370 if (!bnxt_need_reserve_rings(bp))
6371 return 0;
6372
6373 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6374 sh = true;
6375 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6376 vnic = rx + 1;
6377 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6378 rx <<= 1;
6379 grp = bp->rx_nr_rings;
6380 stat = bnxt_get_func_stat_ctxs(bp);
6381
6382 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
6383 if (rc)
6384 return rc;
6385
6386 tx = hw_resc->resv_tx_rings;
6387 if (BNXT_NEW_RM(bp)) {
6388 rx = hw_resc->resv_rx_rings;
6389 cp = hw_resc->resv_irqs;
6390 grp = hw_resc->resv_hw_ring_grps;
6391 vnic = hw_resc->resv_vnics;
6392 stat = hw_resc->resv_stat_ctxs;
6393 }
6394
6395 rx_rings = rx;
6396 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6397 if (rx >= 2) {
6398 rx_rings = rx >> 1;
6399 } else {
6400 if (netif_running(bp->dev))
6401 return -ENOMEM;
6402
6403 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6404 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6405 bp->dev->hw_features &= ~NETIF_F_LRO;
6406 bp->dev->features &= ~NETIF_F_LRO;
6407 bnxt_set_ring_params(bp);
6408 }
6409 }
6410 rx_rings = min_t(int, rx_rings, grp);
6411 cp = min_t(int, cp, bp->cp_nr_rings);
6412 if (stat > bnxt_get_ulp_stat_ctxs(bp))
6413 stat -= bnxt_get_ulp_stat_ctxs(bp);
6414 cp = min_t(int, cp, stat);
6415 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6416 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6417 rx = rx_rings << 1;
6418 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6419 bp->tx_nr_rings = tx;
6420
6421 /* If we cannot reserve all the RX rings, reset the RSS map only
6422 * if absolutely necessary
6423 */
6424 if (rx_rings != bp->rx_nr_rings) {
6425 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6426 rx_rings, bp->rx_nr_rings);
6427 if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
6428 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6429 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6430 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6431 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6432 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6433 }
6434 }
6435 bp->rx_nr_rings = rx_rings;
6436 bp->cp_nr_rings = cp;
6437
6438 if (!tx || !rx || !cp || !grp || !vnic || !stat)
6439 return -ENOMEM;
6440
6441 if (!netif_is_rxfh_configured(bp->dev))
6442 bnxt_set_dflt_rss_indir_tbl(bp);
6443
6444 return rc;
6445 }
6446
6447 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6448 int ring_grps, int cp_rings, int stats,
6449 int vnics)
6450 {
6451 struct hwrm_func_vf_cfg_input req = {0};
6452 u32 flags;
6453
6454 if (!BNXT_NEW_RM(bp))
6455 return 0;
6456
6457 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6458 cp_rings, stats, vnics);
6459 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6460 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6461 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6462 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6463 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6464 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6465 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6466 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6467
6468 req.flags = cpu_to_le32(flags);
6469 return hwrm_send_message_silent(bp, &req, sizeof(req),
6470 HWRM_CMD_TIMEOUT);
6471 }
6472
6473 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6474 int ring_grps, int cp_rings, int stats,
6475 int vnics)
6476 {
6477 struct hwrm_func_cfg_input req = {0};
6478 u32 flags;
6479
6480 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6481 cp_rings, stats, vnics);
6482 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6483 if (BNXT_NEW_RM(bp)) {
6484 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6485 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6486 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6487 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6488 if (bp->flags & BNXT_FLAG_CHIP_P5)
6489 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6490 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6491 else
6492 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6493 }
6494
6495 req.flags = cpu_to_le32(flags);
6496 return hwrm_send_message_silent(bp, &req, sizeof(req),
6497 HWRM_CMD_TIMEOUT);
6498 }
6499
6500 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6501 int ring_grps, int cp_rings, int stats,
6502 int vnics)
6503 {
6504 if (bp->hwrm_spec_code < 0x10801)
6505 return 0;
6506
6507 if (BNXT_PF(bp))
6508 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6509 ring_grps, cp_rings, stats,
6510 vnics);
6511
6512 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6513 cp_rings, stats, vnics);
6514 }
6515
6516 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6517 {
6518 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6519 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6520 struct hwrm_ring_aggint_qcaps_input req = {0};
6521 int rc;
6522
6523 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6524 coal_cap->num_cmpl_dma_aggr_max = 63;
6525 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6526 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6527 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6528 coal_cap->int_lat_tmr_min_max = 65535;
6529 coal_cap->int_lat_tmr_max_max = 65535;
6530 coal_cap->num_cmpl_aggr_int_max = 65535;
6531 coal_cap->timer_units = 80;
6532
6533 if (bp->hwrm_spec_code < 0x10902)
6534 return;
6535
6536 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
6537 mutex_lock(&bp->hwrm_cmd_lock);
6538 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6539 if (!rc) {
6540 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6541 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6542 coal_cap->num_cmpl_dma_aggr_max =
6543 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6544 coal_cap->num_cmpl_dma_aggr_during_int_max =
6545 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6546 coal_cap->cmpl_aggr_dma_tmr_max =
6547 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6548 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6549 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6550 coal_cap->int_lat_tmr_min_max =
6551 le16_to_cpu(resp->int_lat_tmr_min_max);
6552 coal_cap->int_lat_tmr_max_max =
6553 le16_to_cpu(resp->int_lat_tmr_max_max);
6554 coal_cap->num_cmpl_aggr_int_max =
6555 le16_to_cpu(resp->num_cmpl_aggr_int_max);
6556 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6557 }
6558 mutex_unlock(&bp->hwrm_cmd_lock);
6559 }
6560
6561 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6562 {
6563 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6564
6565 return usec * 1000 / coal_cap->timer_units;
6566 }
6567
6568 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6569 struct bnxt_coal *hw_coal,
6570 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6571 {
6572 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6573 u32 cmpl_params = coal_cap->cmpl_params;
6574 u16 val, tmr, max, flags = 0;
6575
6576 max = hw_coal->bufs_per_record * 128;
6577 if (hw_coal->budget)
6578 max = hw_coal->bufs_per_record * hw_coal->budget;
6579 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6580
6581 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6582 req->num_cmpl_aggr_int = cpu_to_le16(val);
6583
6584 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6585 req->num_cmpl_dma_aggr = cpu_to_le16(val);
6586
6587 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6588 coal_cap->num_cmpl_dma_aggr_during_int_max);
6589 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6590
6591 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6592 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6593 req->int_lat_tmr_max = cpu_to_le16(tmr);
6594
6595 /* min timer set to 1/2 of interrupt timer */
6596 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6597 val = tmr / 2;
6598 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6599 req->int_lat_tmr_min = cpu_to_le16(val);
6600 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6601 }
6602
6603 /* buf timer set to 1/4 of interrupt timer */
6604 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6605 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6606
6607 if (cmpl_params &
6608 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6609 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6610 val = clamp_t(u16, tmr, 1,
6611 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6612 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
6613 req->enables |=
6614 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6615 }
6616
6617 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6618 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6619 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6620 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6621 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6622 req->flags = cpu_to_le16(flags);
6623 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6624 }
6625
6626 /* Caller holds bp->hwrm_cmd_lock */
6627 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6628 struct bnxt_coal *hw_coal)
6629 {
6630 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
6631 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6632 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6633 u32 nq_params = coal_cap->nq_params;
6634 u16 tmr;
6635
6636 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6637 return 0;
6638
6639 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
6640 -1, -1);
6641 req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6642 req.flags =
6643 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6644
6645 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6646 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6647 req.int_lat_tmr_min = cpu_to_le16(tmr);
6648 req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6649 return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6650 }
6651
6652 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6653 {
6654 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
6655 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6656 struct bnxt_coal coal;
6657
6658 /* Tick values in micro seconds.
6659 * 1 coal_buf x bufs_per_record = 1 completion record.
6660 */
6661 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6662
6663 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6664 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6665
6666 if (!bnapi->rx_ring)
6667 return -ENODEV;
6668
6669 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6670 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6671
6672 bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
6673
6674 req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6675
6676 return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
6677 HWRM_CMD_TIMEOUT);
6678 }
6679
6680 int bnxt_hwrm_set_coal(struct bnxt *bp)
6681 {
6682 int i, rc = 0;
6683 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
6684 req_tx = {0}, *req;
6685
6686 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6687 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6688 bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
6689 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6690
6691 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
6692 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
6693
6694 mutex_lock(&bp->hwrm_cmd_lock);
6695 for (i = 0; i < bp->cp_nr_rings; i++) {
6696 struct bnxt_napi *bnapi = bp->bnapi[i];
6697 struct bnxt_coal *hw_coal;
6698 u16 ring_id;
6699
6700 req = &req_rx;
6701 if (!bnapi->rx_ring) {
6702 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6703 req = &req_tx;
6704 } else {
6705 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6706 }
6707 req->ring_id = cpu_to_le16(ring_id);
6708
6709 rc = _hwrm_send_message(bp, req, sizeof(*req),
6710 HWRM_CMD_TIMEOUT);
6711 if (rc)
6712 break;
6713
6714 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6715 continue;
6716
6717 if (bnapi->rx_ring && bnapi->tx_ring) {
6718 req = &req_tx;
6719 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6720 req->ring_id = cpu_to_le16(ring_id);
6721 rc = _hwrm_send_message(bp, req, sizeof(*req),
6722 HWRM_CMD_TIMEOUT);
6723 if (rc)
6724 break;
6725 }
6726 if (bnapi->rx_ring)
6727 hw_coal = &bp->rx_coal;
6728 else
6729 hw_coal = &bp->tx_coal;
6730 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6731 }
6732 mutex_unlock(&bp->hwrm_cmd_lock);
6733 return rc;
6734 }
6735
6736 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6737 {
6738 struct hwrm_stat_ctx_clr_stats_input req0 = {0};
6739 struct hwrm_stat_ctx_free_input req = {0};
6740 int i;
6741
6742 if (!bp->bnapi)
6743 return;
6744
6745 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6746 return;
6747
6748 bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1);
6749 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
6750
6751 mutex_lock(&bp->hwrm_cmd_lock);
6752 for (i = 0; i < bp->cp_nr_rings; i++) {
6753 struct bnxt_napi *bnapi = bp->bnapi[i];
6754 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6755
6756 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6757 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6758 if (BNXT_FW_MAJ(bp) <= 20) {
6759 req0.stat_ctx_id = req.stat_ctx_id;
6760 _hwrm_send_message(bp, &req0, sizeof(req0),
6761 HWRM_CMD_TIMEOUT);
6762 }
6763 _hwrm_send_message(bp, &req, sizeof(req),
6764 HWRM_CMD_TIMEOUT);
6765
6766 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6767 }
6768 }
6769 mutex_unlock(&bp->hwrm_cmd_lock);
6770 }
6771
6772 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6773 {
6774 int rc = 0, i;
6775 struct hwrm_stat_ctx_alloc_input req = {0};
6776 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6777
6778 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6779 return 0;
6780
6781 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
6782
6783 req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6784 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6785
6786 mutex_lock(&bp->hwrm_cmd_lock);
6787 for (i = 0; i < bp->cp_nr_rings; i++) {
6788 struct bnxt_napi *bnapi = bp->bnapi[i];
6789 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6790
6791 req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
6792
6793 rc = _hwrm_send_message(bp, &req, sizeof(req),
6794 HWRM_CMD_TIMEOUT);
6795 if (rc)
6796 break;
6797
6798 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6799
6800 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6801 }
6802 mutex_unlock(&bp->hwrm_cmd_lock);
6803 return rc;
6804 }
6805
6806 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6807 {
6808 struct hwrm_func_qcfg_input req = {0};
6809 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6810 u32 min_db_offset = 0;
6811 u16 flags;
6812 int rc;
6813
6814 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6815 req.fid = cpu_to_le16(0xffff);
6816 mutex_lock(&bp->hwrm_cmd_lock);
6817 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6818 if (rc)
6819 goto func_qcfg_exit;
6820
6821 #ifdef CONFIG_BNXT_SRIOV
6822 if (BNXT_VF(bp)) {
6823 struct bnxt_vf_info *vf = &bp->vf;
6824
6825 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6826 } else {
6827 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6828 }
6829 #endif
6830 flags = le16_to_cpu(resp->flags);
6831 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6832 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6833 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6834 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6835 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6836 }
6837 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6838 bp->flags |= BNXT_FLAG_MULTI_HOST;
6839 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
6840 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
6841
6842 switch (resp->port_partition_type) {
6843 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6844 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6845 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6846 bp->port_partition_type = resp->port_partition_type;
6847 break;
6848 }
6849 if (bp->hwrm_spec_code < 0x10707 ||
6850 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6851 bp->br_mode = BRIDGE_MODE_VEB;
6852 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6853 bp->br_mode = BRIDGE_MODE_VEPA;
6854 else
6855 bp->br_mode = BRIDGE_MODE_UNDEF;
6856
6857 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6858 if (!bp->max_mtu)
6859 bp->max_mtu = BNXT_MAX_MTU;
6860
6861 if (bp->db_size)
6862 goto func_qcfg_exit;
6863
6864 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6865 if (BNXT_PF(bp))
6866 min_db_offset = DB_PF_OFFSET_P5;
6867 else
6868 min_db_offset = DB_VF_OFFSET_P5;
6869 }
6870 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6871 1024);
6872 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6873 bp->db_size <= min_db_offset)
6874 bp->db_size = pci_resource_len(bp->pdev, 2);
6875
6876 func_qcfg_exit:
6877 mutex_unlock(&bp->hwrm_cmd_lock);
6878 return rc;
6879 }
6880
6881 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
6882 struct hwrm_func_backing_store_qcaps_output *resp)
6883 {
6884 struct bnxt_mem_init *mem_init;
6885 u16 init_mask;
6886 u8 init_val;
6887 u8 *offset;
6888 int i;
6889
6890 init_val = resp->ctx_kind_initializer;
6891 init_mask = le16_to_cpu(resp->ctx_init_mask);
6892 offset = &resp->qp_init_offset;
6893 mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
6894 for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
6895 mem_init->init_val = init_val;
6896 mem_init->offset = BNXT_MEM_INVALID_OFFSET;
6897 if (!init_mask)
6898 continue;
6899 if (i == BNXT_CTX_MEM_INIT_STAT)
6900 offset = &resp->stat_init_offset;
6901 if (init_mask & (1 << i))
6902 mem_init->offset = *offset * 4;
6903 else
6904 mem_init->init_val = 0;
6905 }
6906 ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
6907 ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
6908 ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
6909 ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
6910 ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
6911 ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
6912 }
6913
6914 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6915 {
6916 struct hwrm_func_backing_store_qcaps_input req = {0};
6917 struct hwrm_func_backing_store_qcaps_output *resp =
6918 bp->hwrm_cmd_resp_addr;
6919 int rc;
6920
6921 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6922 return 0;
6923
6924 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
6925 mutex_lock(&bp->hwrm_cmd_lock);
6926 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6927 if (!rc) {
6928 struct bnxt_ctx_pg_info *ctx_pg;
6929 struct bnxt_ctx_mem_info *ctx;
6930 int i, tqm_rings;
6931
6932 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6933 if (!ctx) {
6934 rc = -ENOMEM;
6935 goto ctx_err;
6936 }
6937 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6938 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6939 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6940 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6941 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6942 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6943 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6944 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6945 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6946 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6947 ctx->vnic_max_vnic_entries =
6948 le16_to_cpu(resp->vnic_max_vnic_entries);
6949 ctx->vnic_max_ring_table_entries =
6950 le16_to_cpu(resp->vnic_max_ring_table_entries);
6951 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6952 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6953 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6954 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6955 ctx->tqm_min_entries_per_ring =
6956 le32_to_cpu(resp->tqm_min_entries_per_ring);
6957 ctx->tqm_max_entries_per_ring =
6958 le32_to_cpu(resp->tqm_max_entries_per_ring);
6959 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6960 if (!ctx->tqm_entries_multiple)
6961 ctx->tqm_entries_multiple = 1;
6962 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6963 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6964 ctx->mrav_num_entries_units =
6965 le16_to_cpu(resp->mrav_num_entries_units);
6966 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6967 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6968
6969 bnxt_init_ctx_initializer(ctx, resp);
6970
6971 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
6972 if (!ctx->tqm_fp_rings_count)
6973 ctx->tqm_fp_rings_count = bp->max_q;
6974 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
6975 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
6976
6977 tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
6978 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
6979 if (!ctx_pg) {
6980 kfree(ctx);
6981 rc = -ENOMEM;
6982 goto ctx_err;
6983 }
6984 for (i = 0; i < tqm_rings; i++, ctx_pg++)
6985 ctx->tqm_mem[i] = ctx_pg;
6986 bp->ctx = ctx;
6987 } else {
6988 rc = 0;
6989 }
6990 ctx_err:
6991 mutex_unlock(&bp->hwrm_cmd_lock);
6992 return rc;
6993 }
6994
6995 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6996 __le64 *pg_dir)
6997 {
6998 if (!rmem->nr_pages)
6999 return;
7000
7001 BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
7002 if (rmem->depth >= 1) {
7003 if (rmem->depth == 2)
7004 *pg_attr |= 2;
7005 else
7006 *pg_attr |= 1;
7007 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
7008 } else {
7009 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
7010 }
7011 }
7012
7013 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
7014 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
7015 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
7016 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
7017 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
7018 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
7019
7020 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
7021 {
7022 struct hwrm_func_backing_store_cfg_input req = {0};
7023 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7024 struct bnxt_ctx_pg_info *ctx_pg;
7025 u32 req_len = sizeof(req);
7026 __le32 *num_entries;
7027 __le64 *pg_dir;
7028 u32 flags = 0;
7029 u8 *pg_attr;
7030 u32 ena;
7031 int i;
7032
7033 if (!ctx)
7034 return 0;
7035
7036 if (req_len > bp->hwrm_max_ext_req_len)
7037 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
7038 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
7039 req.enables = cpu_to_le32(enables);
7040
7041 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
7042 ctx_pg = &ctx->qp_mem;
7043 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
7044 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
7045 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
7046 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
7047 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7048 &req.qpc_pg_size_qpc_lvl,
7049 &req.qpc_page_dir);
7050 }
7051 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
7052 ctx_pg = &ctx->srq_mem;
7053 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
7054 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
7055 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
7056 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7057 &req.srq_pg_size_srq_lvl,
7058 &req.srq_page_dir);
7059 }
7060 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
7061 ctx_pg = &ctx->cq_mem;
7062 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
7063 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
7064 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
7065 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
7066 &req.cq_page_dir);
7067 }
7068 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
7069 ctx_pg = &ctx->vnic_mem;
7070 req.vnic_num_vnic_entries =
7071 cpu_to_le16(ctx->vnic_max_vnic_entries);
7072 req.vnic_num_ring_table_entries =
7073 cpu_to_le16(ctx->vnic_max_ring_table_entries);
7074 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
7075 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7076 &req.vnic_pg_size_vnic_lvl,
7077 &req.vnic_page_dir);
7078 }
7079 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
7080 ctx_pg = &ctx->stat_mem;
7081 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
7082 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
7083 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7084 &req.stat_pg_size_stat_lvl,
7085 &req.stat_page_dir);
7086 }
7087 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
7088 ctx_pg = &ctx->mrav_mem;
7089 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
7090 if (ctx->mrav_num_entries_units)
7091 flags |=
7092 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
7093 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
7094 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7095 &req.mrav_pg_size_mrav_lvl,
7096 &req.mrav_page_dir);
7097 }
7098 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
7099 ctx_pg = &ctx->tim_mem;
7100 req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
7101 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
7102 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7103 &req.tim_pg_size_tim_lvl,
7104 &req.tim_page_dir);
7105 }
7106 for (i = 0, num_entries = &req.tqm_sp_num_entries,
7107 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
7108 pg_dir = &req.tqm_sp_page_dir,
7109 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
7110 i < BNXT_MAX_TQM_RINGS;
7111 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
7112 if (!(enables & ena))
7113 continue;
7114
7115 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
7116 ctx_pg = ctx->tqm_mem[i];
7117 *num_entries = cpu_to_le32(ctx_pg->entries);
7118 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
7119 }
7120 req.flags = cpu_to_le32(flags);
7121 return hwrm_send_message(bp, &req, req_len, HWRM_CMD_TIMEOUT);
7122 }
7123
7124 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
7125 struct bnxt_ctx_pg_info *ctx_pg)
7126 {
7127 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7128
7129 rmem->page_size = BNXT_PAGE_SIZE;
7130 rmem->pg_arr = ctx_pg->ctx_pg_arr;
7131 rmem->dma_arr = ctx_pg->ctx_dma_arr;
7132 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
7133 if (rmem->depth >= 1)
7134 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
7135 return bnxt_alloc_ring(bp, rmem);
7136 }
7137
7138 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7139 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
7140 u8 depth, struct bnxt_mem_init *mem_init)
7141 {
7142 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7143 int rc;
7144
7145 if (!mem_size)
7146 return -EINVAL;
7147
7148 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7149 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
7150 ctx_pg->nr_pages = 0;
7151 return -EINVAL;
7152 }
7153 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
7154 int nr_tbls, i;
7155
7156 rmem->depth = 2;
7157 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
7158 GFP_KERNEL);
7159 if (!ctx_pg->ctx_pg_tbl)
7160 return -ENOMEM;
7161 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
7162 rmem->nr_pages = nr_tbls;
7163 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7164 if (rc)
7165 return rc;
7166 for (i = 0; i < nr_tbls; i++) {
7167 struct bnxt_ctx_pg_info *pg_tbl;
7168
7169 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
7170 if (!pg_tbl)
7171 return -ENOMEM;
7172 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
7173 rmem = &pg_tbl->ring_mem;
7174 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
7175 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
7176 rmem->depth = 1;
7177 rmem->nr_pages = MAX_CTX_PAGES;
7178 rmem->mem_init = mem_init;
7179 if (i == (nr_tbls - 1)) {
7180 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
7181
7182 if (rem)
7183 rmem->nr_pages = rem;
7184 }
7185 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7186 if (rc)
7187 break;
7188 }
7189 } else {
7190 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7191 if (rmem->nr_pages > 1 || depth)
7192 rmem->depth = 1;
7193 rmem->mem_init = mem_init;
7194 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7195 }
7196 return rc;
7197 }
7198
7199 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7200 struct bnxt_ctx_pg_info *ctx_pg)
7201 {
7202 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7203
7204 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
7205 ctx_pg->ctx_pg_tbl) {
7206 int i, nr_tbls = rmem->nr_pages;
7207
7208 for (i = 0; i < nr_tbls; i++) {
7209 struct bnxt_ctx_pg_info *pg_tbl;
7210 struct bnxt_ring_mem_info *rmem2;
7211
7212 pg_tbl = ctx_pg->ctx_pg_tbl[i];
7213 if (!pg_tbl)
7214 continue;
7215 rmem2 = &pg_tbl->ring_mem;
7216 bnxt_free_ring(bp, rmem2);
7217 ctx_pg->ctx_pg_arr[i] = NULL;
7218 kfree(pg_tbl);
7219 ctx_pg->ctx_pg_tbl[i] = NULL;
7220 }
7221 kfree(ctx_pg->ctx_pg_tbl);
7222 ctx_pg->ctx_pg_tbl = NULL;
7223 }
7224 bnxt_free_ring(bp, rmem);
7225 ctx_pg->nr_pages = 0;
7226 }
7227
7228 static void bnxt_free_ctx_mem(struct bnxt *bp)
7229 {
7230 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7231 int i;
7232
7233 if (!ctx)
7234 return;
7235
7236 if (ctx->tqm_mem[0]) {
7237 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
7238 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
7239 kfree(ctx->tqm_mem[0]);
7240 ctx->tqm_mem[0] = NULL;
7241 }
7242
7243 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7244 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
7245 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7246 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7247 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7248 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7249 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
7250 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7251 }
7252
7253 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7254 {
7255 struct bnxt_ctx_pg_info *ctx_pg;
7256 struct bnxt_ctx_mem_info *ctx;
7257 struct bnxt_mem_init *init;
7258 u32 mem_size, ena, entries;
7259 u32 entries_sp, min;
7260 u32 num_mr, num_ah;
7261 u32 extra_srqs = 0;
7262 u32 extra_qps = 0;
7263 u8 pg_lvl = 1;
7264 int i, rc;
7265
7266 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7267 if (rc) {
7268 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7269 rc);
7270 return rc;
7271 }
7272 ctx = bp->ctx;
7273 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7274 return 0;
7275
7276 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
7277 pg_lvl = 2;
7278 extra_qps = 65536;
7279 extra_srqs = 8192;
7280 }
7281
7282 ctx_pg = &ctx->qp_mem;
7283 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7284 extra_qps;
7285 if (ctx->qp_entry_size) {
7286 mem_size = ctx->qp_entry_size * ctx_pg->entries;
7287 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7288 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7289 if (rc)
7290 return rc;
7291 }
7292
7293 ctx_pg = &ctx->srq_mem;
7294 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
7295 if (ctx->srq_entry_size) {
7296 mem_size = ctx->srq_entry_size * ctx_pg->entries;
7297 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
7298 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7299 if (rc)
7300 return rc;
7301 }
7302
7303 ctx_pg = &ctx->cq_mem;
7304 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
7305 if (ctx->cq_entry_size) {
7306 mem_size = ctx->cq_entry_size * ctx_pg->entries;
7307 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
7308 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7309 if (rc)
7310 return rc;
7311 }
7312
7313 ctx_pg = &ctx->vnic_mem;
7314 ctx_pg->entries = ctx->vnic_max_vnic_entries +
7315 ctx->vnic_max_ring_table_entries;
7316 if (ctx->vnic_entry_size) {
7317 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
7318 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
7319 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7320 if (rc)
7321 return rc;
7322 }
7323
7324 ctx_pg = &ctx->stat_mem;
7325 ctx_pg->entries = ctx->stat_max_entries;
7326 if (ctx->stat_entry_size) {
7327 mem_size = ctx->stat_entry_size * ctx_pg->entries;
7328 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
7329 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7330 if (rc)
7331 return rc;
7332 }
7333
7334 ena = 0;
7335 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7336 goto skip_rdma;
7337
7338 ctx_pg = &ctx->mrav_mem;
7339 /* 128K extra is needed to accommodate static AH context
7340 * allocation by f/w.
7341 */
7342 num_mr = 1024 * 256;
7343 num_ah = 1024 * 128;
7344 ctx_pg->entries = num_mr + num_ah;
7345 if (ctx->mrav_entry_size) {
7346 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
7347 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
7348 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
7349 if (rc)
7350 return rc;
7351 }
7352 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
7353 if (ctx->mrav_num_entries_units)
7354 ctx_pg->entries =
7355 ((num_mr / ctx->mrav_num_entries_units) << 16) |
7356 (num_ah / ctx->mrav_num_entries_units);
7357
7358 ctx_pg = &ctx->tim_mem;
7359 ctx_pg->entries = ctx->qp_mem.entries;
7360 if (ctx->tim_entry_size) {
7361 mem_size = ctx->tim_entry_size * ctx_pg->entries;
7362 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
7363 if (rc)
7364 return rc;
7365 }
7366 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7367
7368 skip_rdma:
7369 min = ctx->tqm_min_entries_per_ring;
7370 entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7371 2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7372 entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
7373 entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
7374 entries = roundup(entries, ctx->tqm_entries_multiple);
7375 entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
7376 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
7377 ctx_pg = ctx->tqm_mem[i];
7378 ctx_pg->entries = i ? entries : entries_sp;
7379 if (ctx->tqm_entry_size) {
7380 mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7381 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
7382 NULL);
7383 if (rc)
7384 return rc;
7385 }
7386 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
7387 }
7388 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7389 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
7390 if (rc) {
7391 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7392 rc);
7393 return rc;
7394 }
7395 ctx->flags |= BNXT_CTX_FLAG_INITED;
7396 return 0;
7397 }
7398
7399 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
7400 {
7401 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7402 struct hwrm_func_resource_qcaps_input req = {0};
7403 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7404 int rc;
7405
7406 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
7407 req.fid = cpu_to_le16(0xffff);
7408
7409 mutex_lock(&bp->hwrm_cmd_lock);
7410 rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
7411 HWRM_CMD_TIMEOUT);
7412 if (rc)
7413 goto hwrm_func_resc_qcaps_exit;
7414
7415 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7416 if (!all)
7417 goto hwrm_func_resc_qcaps_exit;
7418
7419 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7420 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7421 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7422 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7423 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7424 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7425 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7426 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7427 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7428 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7429 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7430 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7431 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7432 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7433 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7434 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7435
7436 if (bp->flags & BNXT_FLAG_CHIP_P5) {
7437 u16 max_msix = le16_to_cpu(resp->max_msix);
7438
7439 hw_resc->max_nqs = max_msix;
7440 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7441 }
7442
7443 if (BNXT_PF(bp)) {
7444 struct bnxt_pf_info *pf = &bp->pf;
7445
7446 pf->vf_resv_strategy =
7447 le16_to_cpu(resp->vf_reservation_strategy);
7448 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
7449 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7450 }
7451 hwrm_func_resc_qcaps_exit:
7452 mutex_unlock(&bp->hwrm_cmd_lock);
7453 return rc;
7454 }
7455
7456 /* bp->hwrm_cmd_lock already held. */
7457 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
7458 {
7459 struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7460 struct hwrm_port_mac_ptp_qcfg_input req = {0};
7461 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
7462 u8 flags;
7463 int rc;
7464
7465 if (bp->hwrm_spec_code < 0x10801) {
7466 rc = -ENODEV;
7467 goto no_ptp;
7468 }
7469
7470 req.port_id = cpu_to_le16(bp->pf.port_id);
7471 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_PTP_QCFG, -1, -1);
7472 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7473 if (rc)
7474 goto no_ptp;
7475
7476 flags = resp->flags;
7477 if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
7478 rc = -ENODEV;
7479 goto no_ptp;
7480 }
7481 if (!ptp) {
7482 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
7483 if (!ptp)
7484 return -ENOMEM;
7485 ptp->bp = bp;
7486 bp->ptp_cfg = ptp;
7487 }
7488 if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
7489 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
7490 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
7491 } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
7492 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
7493 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
7494 } else {
7495 rc = -ENODEV;
7496 goto no_ptp;
7497 }
7498 return 0;
7499
7500 no_ptp:
7501 kfree(ptp);
7502 bp->ptp_cfg = NULL;
7503 return rc;
7504 }
7505
7506 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
7507 {
7508 int rc = 0;
7509 struct hwrm_func_qcaps_input req = {0};
7510 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7511 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7512 u32 flags, flags_ext;
7513
7514 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
7515 req.fid = cpu_to_le16(0xffff);
7516
7517 mutex_lock(&bp->hwrm_cmd_lock);
7518 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7519 if (rc)
7520 goto hwrm_func_qcaps_exit;
7521
7522 flags = le32_to_cpu(resp->flags);
7523 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
7524 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
7525 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
7526 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
7527 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7528 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
7529 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7530 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
7531 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7532 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
7533 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7534 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
7535 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7536 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
7537 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7538 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7539
7540 flags_ext = le32_to_cpu(resp->flags_ext);
7541 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7542 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
7543
7544 bp->tx_push_thresh = 0;
7545 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7546 BNXT_FW_MAJ(bp) > 217)
7547 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7548
7549 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7550 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7551 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7552 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7553 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7554 if (!hw_resc->max_hw_ring_grps)
7555 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7556 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7557 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7558 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7559
7560 if (BNXT_PF(bp)) {
7561 struct bnxt_pf_info *pf = &bp->pf;
7562
7563 pf->fw_fid = le16_to_cpu(resp->fid);
7564 pf->port_id = le16_to_cpu(resp->port_id);
7565 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
7566 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7567 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7568 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7569 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7570 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7571 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7572 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7573 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
7574 bp->flags &= ~BNXT_FLAG_WOL_CAP;
7575 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
7576 bp->flags |= BNXT_FLAG_WOL_CAP;
7577 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
7578 __bnxt_hwrm_ptp_qcfg(bp);
7579 } else {
7580 kfree(bp->ptp_cfg);
7581 bp->ptp_cfg = NULL;
7582 }
7583 } else {
7584 #ifdef CONFIG_BNXT_SRIOV
7585 struct bnxt_vf_info *vf = &bp->vf;
7586
7587 vf->fw_fid = le16_to_cpu(resp->fid);
7588 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
7589 #endif
7590 }
7591
7592 hwrm_func_qcaps_exit:
7593 mutex_unlock(&bp->hwrm_cmd_lock);
7594 return rc;
7595 }
7596
7597 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7598
7599 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7600 {
7601 int rc;
7602
7603 rc = __bnxt_hwrm_func_qcaps(bp);
7604 if (rc)
7605 return rc;
7606 rc = bnxt_hwrm_queue_qportcfg(bp);
7607 if (rc) {
7608 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7609 return rc;
7610 }
7611 if (bp->hwrm_spec_code >= 0x10803) {
7612 rc = bnxt_alloc_ctx_mem(bp);
7613 if (rc)
7614 return rc;
7615 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7616 if (!rc)
7617 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7618 }
7619 return 0;
7620 }
7621
7622 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7623 {
7624 struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
7625 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7626 int rc = 0;
7627 u32 flags;
7628
7629 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7630 return 0;
7631
7632 resp = bp->hwrm_cmd_resp_addr;
7633 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
7634
7635 mutex_lock(&bp->hwrm_cmd_lock);
7636 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7637 if (rc)
7638 goto hwrm_cfa_adv_qcaps_exit;
7639
7640 flags = le32_to_cpu(resp->flags);
7641 if (flags &
7642 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7643 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7644
7645 hwrm_cfa_adv_qcaps_exit:
7646 mutex_unlock(&bp->hwrm_cmd_lock);
7647 return rc;
7648 }
7649
7650 static int __bnxt_alloc_fw_health(struct bnxt *bp)
7651 {
7652 if (bp->fw_health)
7653 return 0;
7654
7655 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7656 if (!bp->fw_health)
7657 return -ENOMEM;
7658
7659 return 0;
7660 }
7661
7662 static int bnxt_alloc_fw_health(struct bnxt *bp)
7663 {
7664 int rc;
7665
7666 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7667 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7668 return 0;
7669
7670 rc = __bnxt_alloc_fw_health(bp);
7671 if (rc) {
7672 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7673 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7674 return rc;
7675 }
7676
7677 return 0;
7678 }
7679
7680 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7681 {
7682 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7683 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7684 BNXT_FW_HEALTH_WIN_MAP_OFF);
7685 }
7686
7687 bool bnxt_is_fw_healthy(struct bnxt *bp)
7688 {
7689 if (bp->fw_health && bp->fw_health->status_reliable) {
7690 u32 fw_status;
7691
7692 fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
7693 if (fw_status && !BNXT_FW_IS_HEALTHY(fw_status))
7694 return false;
7695 }
7696
7697 return true;
7698 }
7699
7700 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
7701 {
7702 struct bnxt_fw_health *fw_health = bp->fw_health;
7703 u32 reg_type;
7704
7705 if (!fw_health || !fw_health->status_reliable)
7706 return;
7707
7708 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
7709 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7710 fw_health->status_reliable = false;
7711 }
7712
7713 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7714 {
7715 void __iomem *hs;
7716 u32 status_loc;
7717 u32 reg_type;
7718 u32 sig;
7719
7720 if (bp->fw_health)
7721 bp->fw_health->status_reliable = false;
7722
7723 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7724 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7725
7726 sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
7727 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
7728 if (!bp->chip_num) {
7729 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
7730 bp->chip_num = readl(bp->bar0 +
7731 BNXT_FW_HEALTH_WIN_BASE +
7732 BNXT_GRC_REG_CHIP_NUM);
7733 }
7734 if (!BNXT_CHIP_P5(bp))
7735 return;
7736
7737 status_loc = BNXT_GRC_REG_STATUS_P5 |
7738 BNXT_FW_HEALTH_REG_TYPE_BAR0;
7739 } else {
7740 status_loc = readl(hs + offsetof(struct hcomm_status,
7741 fw_status_loc));
7742 }
7743
7744 if (__bnxt_alloc_fw_health(bp)) {
7745 netdev_warn(bp->dev, "no memory for firmware status checks\n");
7746 return;
7747 }
7748
7749 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7750 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
7751 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
7752 __bnxt_map_fw_health_reg(bp, status_loc);
7753 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7754 BNXT_FW_HEALTH_WIN_OFF(status_loc);
7755 }
7756
7757 bp->fw_health->status_reliable = true;
7758 }
7759
7760 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7761 {
7762 struct bnxt_fw_health *fw_health = bp->fw_health;
7763 u32 reg_base = 0xffffffff;
7764 int i;
7765
7766 bp->fw_health->status_reliable = false;
7767 /* Only pre-map the monitoring GRC registers using window 3 */
7768 for (i = 0; i < 4; i++) {
7769 u32 reg = fw_health->regs[i];
7770
7771 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7772 continue;
7773 if (reg_base == 0xffffffff)
7774 reg_base = reg & BNXT_GRC_BASE_MASK;
7775 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7776 return -ERANGE;
7777 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
7778 }
7779 bp->fw_health->status_reliable = true;
7780 if (reg_base == 0xffffffff)
7781 return 0;
7782
7783 __bnxt_map_fw_health_reg(bp, reg_base);
7784 return 0;
7785 }
7786
7787 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7788 {
7789 struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7790 struct bnxt_fw_health *fw_health = bp->fw_health;
7791 struct hwrm_error_recovery_qcfg_input req = {0};
7792 int rc, i;
7793
7794 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7795 return 0;
7796
7797 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
7798 mutex_lock(&bp->hwrm_cmd_lock);
7799 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7800 if (rc)
7801 goto err_recovery_out;
7802 fw_health->flags = le32_to_cpu(resp->flags);
7803 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7804 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7805 rc = -EINVAL;
7806 goto err_recovery_out;
7807 }
7808 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7809 fw_health->master_func_wait_dsecs =
7810 le32_to_cpu(resp->master_func_wait_period);
7811 fw_health->normal_func_wait_dsecs =
7812 le32_to_cpu(resp->normal_func_wait_period);
7813 fw_health->post_reset_wait_dsecs =
7814 le32_to_cpu(resp->master_func_wait_period_after_reset);
7815 fw_health->post_reset_max_wait_dsecs =
7816 le32_to_cpu(resp->max_bailout_time_after_reset);
7817 fw_health->regs[BNXT_FW_HEALTH_REG] =
7818 le32_to_cpu(resp->fw_health_status_reg);
7819 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7820 le32_to_cpu(resp->fw_heartbeat_reg);
7821 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7822 le32_to_cpu(resp->fw_reset_cnt_reg);
7823 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7824 le32_to_cpu(resp->reset_inprogress_reg);
7825 fw_health->fw_reset_inprog_reg_mask =
7826 le32_to_cpu(resp->reset_inprogress_reg_mask);
7827 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7828 if (fw_health->fw_reset_seq_cnt >= 16) {
7829 rc = -EINVAL;
7830 goto err_recovery_out;
7831 }
7832 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7833 fw_health->fw_reset_seq_regs[i] =
7834 le32_to_cpu(resp->reset_reg[i]);
7835 fw_health->fw_reset_seq_vals[i] =
7836 le32_to_cpu(resp->reset_reg_val[i]);
7837 fw_health->fw_reset_seq_delay_msec[i] =
7838 resp->delay_after_reset[i];
7839 }
7840 err_recovery_out:
7841 mutex_unlock(&bp->hwrm_cmd_lock);
7842 if (!rc)
7843 rc = bnxt_map_fw_health_regs(bp);
7844 if (rc)
7845 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7846 return rc;
7847 }
7848
7849 static int bnxt_hwrm_func_reset(struct bnxt *bp)
7850 {
7851 struct hwrm_func_reset_input req = {0};
7852
7853 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
7854 req.enables = 0;
7855
7856 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
7857 }
7858
7859 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
7860 {
7861 struct hwrm_nvm_get_dev_info_output nvm_info;
7862
7863 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
7864 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
7865 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
7866 nvm_info.nvm_cfg_ver_upd);
7867 }
7868
7869 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7870 {
7871 int rc = 0;
7872 struct hwrm_queue_qportcfg_input req = {0};
7873 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
7874 u8 i, j, *qptr;
7875 bool no_rdma;
7876
7877 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
7878
7879 mutex_lock(&bp->hwrm_cmd_lock);
7880 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7881 if (rc)
7882 goto qportcfg_exit;
7883
7884 if (!resp->max_configurable_queues) {
7885 rc = -EINVAL;
7886 goto qportcfg_exit;
7887 }
7888 bp->max_tc = resp->max_configurable_queues;
7889 bp->max_lltc = resp->max_configurable_lossless_queues;
7890 if (bp->max_tc > BNXT_MAX_QUEUE)
7891 bp->max_tc = BNXT_MAX_QUEUE;
7892
7893 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7894 qptr = &resp->queue_id0;
7895 for (i = 0, j = 0; i < bp->max_tc; i++) {
7896 bp->q_info[j].queue_id = *qptr;
7897 bp->q_ids[i] = *qptr++;
7898 bp->q_info[j].queue_profile = *qptr++;
7899 bp->tc_to_qidx[j] = j;
7900 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7901 (no_rdma && BNXT_PF(bp)))
7902 j++;
7903 }
7904 bp->max_q = bp->max_tc;
7905 bp->max_tc = max_t(u8, j, 1);
7906
7907 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7908 bp->max_tc = 1;
7909
7910 if (bp->max_lltc > bp->max_tc)
7911 bp->max_lltc = bp->max_tc;
7912
7913 qportcfg_exit:
7914 mutex_unlock(&bp->hwrm_cmd_lock);
7915 return rc;
7916 }
7917
7918 static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
7919 {
7920 struct hwrm_ver_get_input req = {0};
7921 int rc;
7922
7923 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
7924 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
7925 req.hwrm_intf_min = HWRM_VERSION_MINOR;
7926 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
7927
7928 rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT,
7929 silent);
7930 return rc;
7931 }
7932
7933 static int bnxt_hwrm_ver_get(struct bnxt *bp)
7934 {
7935 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
7936 u16 fw_maj, fw_min, fw_bld, fw_rsv;
7937 u32 dev_caps_cfg, hwrm_ver;
7938 int rc, len;
7939
7940 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
7941 mutex_lock(&bp->hwrm_cmd_lock);
7942 rc = __bnxt_hwrm_ver_get(bp, false);
7943 if (rc)
7944 goto hwrm_ver_get_exit;
7945
7946 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7947
7948 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7949 resp->hwrm_intf_min_8b << 8 |
7950 resp->hwrm_intf_upd_8b;
7951 if (resp->hwrm_intf_maj_8b < 1) {
7952 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
7953 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7954 resp->hwrm_intf_upd_8b);
7955 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
7956 }
7957
7958 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
7959 HWRM_VERSION_UPDATE;
7960
7961 if (bp->hwrm_spec_code > hwrm_ver)
7962 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7963 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
7964 HWRM_VERSION_UPDATE);
7965 else
7966 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7967 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7968 resp->hwrm_intf_upd_8b);
7969
7970 fw_maj = le16_to_cpu(resp->hwrm_fw_major);
7971 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
7972 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
7973 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
7974 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
7975 len = FW_VER_STR_LEN;
7976 } else {
7977 fw_maj = resp->hwrm_fw_maj_8b;
7978 fw_min = resp->hwrm_fw_min_8b;
7979 fw_bld = resp->hwrm_fw_bld_8b;
7980 fw_rsv = resp->hwrm_fw_rsvd_8b;
7981 len = BC_HWRM_STR_LEN;
7982 }
7983 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
7984 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
7985 fw_rsv);
7986
7987 if (strlen(resp->active_pkg_name)) {
7988 int fw_ver_len = strlen(bp->fw_ver_str);
7989
7990 snprintf(bp->fw_ver_str + fw_ver_len,
7991 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
7992 resp->active_pkg_name);
7993 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
7994 }
7995
7996 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
7997 if (!bp->hwrm_cmd_timeout)
7998 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
7999
8000 if (resp->hwrm_intf_maj_8b >= 1) {
8001 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
8002 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
8003 }
8004 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
8005 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
8006
8007 bp->chip_num = le16_to_cpu(resp->chip_num);
8008 bp->chip_rev = resp->chip_rev;
8009 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
8010 !resp->chip_metal)
8011 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
8012
8013 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
8014 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
8015 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
8016 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
8017
8018 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
8019 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
8020
8021 if (dev_caps_cfg &
8022 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
8023 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
8024
8025 if (dev_caps_cfg &
8026 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
8027 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
8028
8029 if (dev_caps_cfg &
8030 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
8031 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
8032
8033 hwrm_ver_get_exit:
8034 mutex_unlock(&bp->hwrm_cmd_lock);
8035 return rc;
8036 }
8037
8038 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
8039 {
8040 struct hwrm_fw_set_time_input req = {0};
8041 struct tm tm;
8042 time64_t now = ktime_get_real_seconds();
8043
8044 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
8045 bp->hwrm_spec_code < 0x10400)
8046 return -EOPNOTSUPP;
8047
8048 time64_to_tm(now, 0, &tm);
8049 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
8050 req.year = cpu_to_le16(1900 + tm.tm_year);
8051 req.month = 1 + tm.tm_mon;
8052 req.day = tm.tm_mday;
8053 req.hour = tm.tm_hour;
8054 req.minute = tm.tm_min;
8055 req.second = tm.tm_sec;
8056 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8057 }
8058
8059 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
8060 {
8061 u64 sw_tmp;
8062
8063 hw &= mask;
8064 sw_tmp = (*sw & ~mask) | hw;
8065 if (hw < (*sw & mask))
8066 sw_tmp += mask + 1;
8067 WRITE_ONCE(*sw, sw_tmp);
8068 }
8069
8070 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
8071 int count, bool ignore_zero)
8072 {
8073 int i;
8074
8075 for (i = 0; i < count; i++) {
8076 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
8077
8078 if (ignore_zero && !hw)
8079 continue;
8080
8081 if (masks[i] == -1ULL)
8082 sw_stats[i] = hw;
8083 else
8084 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
8085 }
8086 }
8087
8088 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
8089 {
8090 if (!stats->hw_stats)
8091 return;
8092
8093 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8094 stats->hw_masks, stats->len / 8, false);
8095 }
8096
8097 static void bnxt_accumulate_all_stats(struct bnxt *bp)
8098 {
8099 struct bnxt_stats_mem *ring0_stats;
8100 bool ignore_zero = false;
8101 int i;
8102
8103 /* Chip bug. Counter intermittently becomes 0. */
8104 if (bp->flags & BNXT_FLAG_CHIP_P5)
8105 ignore_zero = true;
8106
8107 for (i = 0; i < bp->cp_nr_rings; i++) {
8108 struct bnxt_napi *bnapi = bp->bnapi[i];
8109 struct bnxt_cp_ring_info *cpr;
8110 struct bnxt_stats_mem *stats;
8111
8112 cpr = &bnapi->cp_ring;
8113 stats = &cpr->stats;
8114 if (!i)
8115 ring0_stats = stats;
8116 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8117 ring0_stats->hw_masks,
8118 ring0_stats->len / 8, ignore_zero);
8119 }
8120 if (bp->flags & BNXT_FLAG_PORT_STATS) {
8121 struct bnxt_stats_mem *stats = &bp->port_stats;
8122 __le64 *hw_stats = stats->hw_stats;
8123 u64 *sw_stats = stats->sw_stats;
8124 u64 *masks = stats->hw_masks;
8125 int cnt;
8126
8127 cnt = sizeof(struct rx_port_stats) / 8;
8128 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8129
8130 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8131 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8132 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8133 cnt = sizeof(struct tx_port_stats) / 8;
8134 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8135 }
8136 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
8137 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
8138 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
8139 }
8140 }
8141
8142 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
8143 {
8144 struct bnxt_pf_info *pf = &bp->pf;
8145 struct hwrm_port_qstats_input req = {0};
8146
8147 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
8148 return 0;
8149
8150 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8151 return -EOPNOTSUPP;
8152
8153 req.flags = flags;
8154 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
8155 req.port_id = cpu_to_le16(pf->port_id);
8156 req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
8157 BNXT_TX_PORT_STATS_BYTE_OFFSET);
8158 req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
8159 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8160 }
8161
8162 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
8163 {
8164 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
8165 struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
8166 struct hwrm_port_qstats_ext_input req = {0};
8167 struct bnxt_pf_info *pf = &bp->pf;
8168 u32 tx_stat_size;
8169 int rc;
8170
8171 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
8172 return 0;
8173
8174 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8175 return -EOPNOTSUPP;
8176
8177 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
8178 req.flags = flags;
8179 req.port_id = cpu_to_le16(pf->port_id);
8180 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
8181 req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
8182 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
8183 sizeof(struct tx_port_stats_ext) : 0;
8184 req.tx_stat_size = cpu_to_le16(tx_stat_size);
8185 req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
8186 mutex_lock(&bp->hwrm_cmd_lock);
8187 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8188 if (!rc) {
8189 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
8190 bp->fw_tx_stats_ext_size = tx_stat_size ?
8191 le16_to_cpu(resp->tx_stat_size) / 8 : 0;
8192 } else {
8193 bp->fw_rx_stats_ext_size = 0;
8194 bp->fw_tx_stats_ext_size = 0;
8195 }
8196 if (flags)
8197 goto qstats_done;
8198
8199 if (bp->fw_tx_stats_ext_size <=
8200 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
8201 mutex_unlock(&bp->hwrm_cmd_lock);
8202 bp->pri2cos_valid = 0;
8203 return rc;
8204 }
8205
8206 bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
8207 req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
8208
8209 rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
8210 if (!rc) {
8211 struct hwrm_queue_pri2cos_qcfg_output *resp2;
8212 u8 *pri2cos;
8213 int i, j;
8214
8215 resp2 = bp->hwrm_cmd_resp_addr;
8216 pri2cos = &resp2->pri0_cos_queue_id;
8217 for (i = 0; i < 8; i++) {
8218 u8 queue_id = pri2cos[i];
8219 u8 queue_idx;
8220
8221 /* Per port queue IDs start from 0, 10, 20, etc */
8222 queue_idx = queue_id % 10;
8223 if (queue_idx > BNXT_MAX_QUEUE) {
8224 bp->pri2cos_valid = false;
8225 goto qstats_done;
8226 }
8227 for (j = 0; j < bp->max_q; j++) {
8228 if (bp->q_ids[j] == queue_id)
8229 bp->pri2cos_idx[i] = queue_idx;
8230 }
8231 }
8232 bp->pri2cos_valid = 1;
8233 }
8234 qstats_done:
8235 mutex_unlock(&bp->hwrm_cmd_lock);
8236 return rc;
8237 }
8238
8239 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8240 {
8241 if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID)
8242 bnxt_hwrm_tunnel_dst_port_free(
8243 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8244 if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID)
8245 bnxt_hwrm_tunnel_dst_port_free(
8246 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
8247 }
8248
8249 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8250 {
8251 int rc, i;
8252 u32 tpa_flags = 0;
8253
8254 if (set_tpa)
8255 tpa_flags = bp->flags & BNXT_FLAG_TPA;
8256 else if (BNXT_NO_FW_ACCESS(bp))
8257 return 0;
8258 for (i = 0; i < bp->nr_vnics; i++) {
8259 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8260 if (rc) {
8261 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
8262 i, rc);
8263 return rc;
8264 }
8265 }
8266 return 0;
8267 }
8268
8269 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8270 {
8271 int i;
8272
8273 for (i = 0; i < bp->nr_vnics; i++)
8274 bnxt_hwrm_vnic_set_rss(bp, i, false);
8275 }
8276
8277 static void bnxt_clear_vnic(struct bnxt *bp)
8278 {
8279 if (!bp->vnic_info)
8280 return;
8281
8282 bnxt_hwrm_clear_vnic_filter(bp);
8283 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
8284 /* clear all RSS setting before free vnic ctx */
8285 bnxt_hwrm_clear_vnic_rss(bp);
8286 bnxt_hwrm_vnic_ctx_free(bp);
8287 }
8288 /* before free the vnic, undo the vnic tpa settings */
8289 if (bp->flags & BNXT_FLAG_TPA)
8290 bnxt_set_tpa(bp, false);
8291 bnxt_hwrm_vnic_free(bp);
8292 if (bp->flags & BNXT_FLAG_CHIP_P5)
8293 bnxt_hwrm_vnic_ctx_free(bp);
8294 }
8295
8296 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8297 bool irq_re_init)
8298 {
8299 bnxt_clear_vnic(bp);
8300 bnxt_hwrm_ring_free(bp, close_path);
8301 bnxt_hwrm_ring_grp_free(bp);
8302 if (irq_re_init) {
8303 bnxt_hwrm_stat_ctx_free(bp);
8304 bnxt_hwrm_free_tunnel_ports(bp);
8305 }
8306 }
8307
8308 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8309 {
8310 struct hwrm_func_cfg_input req = {0};
8311
8312 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
8313 req.fid = cpu_to_le16(0xffff);
8314 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
8315 if (br_mode == BRIDGE_MODE_VEB)
8316 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
8317 else if (br_mode == BRIDGE_MODE_VEPA)
8318 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
8319 else
8320 return -EINVAL;
8321 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8322 }
8323
8324 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8325 {
8326 struct hwrm_func_cfg_input req = {0};
8327
8328 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8329 return 0;
8330
8331 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
8332 req.fid = cpu_to_le16(0xffff);
8333 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
8334 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
8335 if (size == 128)
8336 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
8337
8338 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8339 }
8340
8341 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8342 {
8343 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
8344 int rc;
8345
8346 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8347 goto skip_rss_ctx;
8348
8349 /* allocate context for vnic */
8350 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
8351 if (rc) {
8352 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8353 vnic_id, rc);
8354 goto vnic_setup_err;
8355 }
8356 bp->rsscos_nr_ctxs++;
8357
8358 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8359 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8360 if (rc) {
8361 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8362 vnic_id, rc);
8363 goto vnic_setup_err;
8364 }
8365 bp->rsscos_nr_ctxs++;
8366 }
8367
8368 skip_rss_ctx:
8369 /* configure default vnic, ring grp */
8370 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8371 if (rc) {
8372 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8373 vnic_id, rc);
8374 goto vnic_setup_err;
8375 }
8376
8377 /* Enable RSS hashing on vnic */
8378 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8379 if (rc) {
8380 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8381 vnic_id, rc);
8382 goto vnic_setup_err;
8383 }
8384
8385 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8386 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8387 if (rc) {
8388 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8389 vnic_id, rc);
8390 }
8391 }
8392
8393 vnic_setup_err:
8394 return rc;
8395 }
8396
8397 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8398 {
8399 int rc, i, nr_ctxs;
8400
8401 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
8402 for (i = 0; i < nr_ctxs; i++) {
8403 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8404 if (rc) {
8405 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8406 vnic_id, i, rc);
8407 break;
8408 }
8409 bp->rsscos_nr_ctxs++;
8410 }
8411 if (i < nr_ctxs)
8412 return -ENOMEM;
8413
8414 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8415 if (rc) {
8416 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8417 vnic_id, rc);
8418 return rc;
8419 }
8420 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8421 if (rc) {
8422 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8423 vnic_id, rc);
8424 return rc;
8425 }
8426 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8427 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8428 if (rc) {
8429 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8430 vnic_id, rc);
8431 }
8432 }
8433 return rc;
8434 }
8435
8436 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8437 {
8438 if (bp->flags & BNXT_FLAG_CHIP_P5)
8439 return __bnxt_setup_vnic_p5(bp, vnic_id);
8440 else
8441 return __bnxt_setup_vnic(bp, vnic_id);
8442 }
8443
8444 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8445 {
8446 #ifdef CONFIG_RFS_ACCEL
8447 int i, rc = 0;
8448
8449 if (bp->flags & BNXT_FLAG_CHIP_P5)
8450 return 0;
8451
8452 for (i = 0; i < bp->rx_nr_rings; i++) {
8453 struct bnxt_vnic_info *vnic;
8454 u16 vnic_id = i + 1;
8455 u16 ring_id = i;
8456
8457 if (vnic_id >= bp->nr_vnics)
8458 break;
8459
8460 vnic = &bp->vnic_info[vnic_id];
8461 vnic->flags |= BNXT_VNIC_RFS_FLAG;
8462 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8463 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
8464 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
8465 if (rc) {
8466 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8467 vnic_id, rc);
8468 break;
8469 }
8470 rc = bnxt_setup_vnic(bp, vnic_id);
8471 if (rc)
8472 break;
8473 }
8474 return rc;
8475 #else
8476 return 0;
8477 #endif
8478 }
8479
8480 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
8481 static bool bnxt_promisc_ok(struct bnxt *bp)
8482 {
8483 #ifdef CONFIG_BNXT_SRIOV
8484 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
8485 return false;
8486 #endif
8487 return true;
8488 }
8489
8490 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8491 {
8492 unsigned int rc = 0;
8493
8494 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8495 if (rc) {
8496 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8497 rc);
8498 return rc;
8499 }
8500
8501 rc = bnxt_hwrm_vnic_cfg(bp, 1);
8502 if (rc) {
8503 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8504 rc);
8505 return rc;
8506 }
8507 return rc;
8508 }
8509
8510 static int bnxt_cfg_rx_mode(struct bnxt *);
8511 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
8512
8513 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8514 {
8515 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8516 int rc = 0;
8517 unsigned int rx_nr_rings = bp->rx_nr_rings;
8518
8519 if (irq_re_init) {
8520 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8521 if (rc) {
8522 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8523 rc);
8524 goto err_out;
8525 }
8526 }
8527
8528 rc = bnxt_hwrm_ring_alloc(bp);
8529 if (rc) {
8530 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8531 goto err_out;
8532 }
8533
8534 rc = bnxt_hwrm_ring_grp_alloc(bp);
8535 if (rc) {
8536 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8537 goto err_out;
8538 }
8539
8540 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8541 rx_nr_rings--;
8542
8543 /* default vnic 0 */
8544 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
8545 if (rc) {
8546 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8547 goto err_out;
8548 }
8549
8550 rc = bnxt_setup_vnic(bp, 0);
8551 if (rc)
8552 goto err_out;
8553
8554 if (bp->flags & BNXT_FLAG_RFS) {
8555 rc = bnxt_alloc_rfs_vnics(bp);
8556 if (rc)
8557 goto err_out;
8558 }
8559
8560 if (bp->flags & BNXT_FLAG_TPA) {
8561 rc = bnxt_set_tpa(bp, true);
8562 if (rc)
8563 goto err_out;
8564 }
8565
8566 if (BNXT_VF(bp))
8567 bnxt_update_vf_mac(bp);
8568
8569 /* Filter for default vnic 0 */
8570 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8571 if (rc) {
8572 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8573 goto err_out;
8574 }
8575 vnic->uc_filter_count = 1;
8576
8577 vnic->rx_mask = 0;
8578 if (bp->dev->flags & IFF_BROADCAST)
8579 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8580
8581 if (bp->dev->flags & IFF_PROMISC)
8582 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8583
8584 if (bp->dev->flags & IFF_ALLMULTI) {
8585 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8586 vnic->mc_list_count = 0;
8587 } else {
8588 u32 mask = 0;
8589
8590 bnxt_mc_list_updated(bp, &mask);
8591 vnic->rx_mask |= mask;
8592 }
8593
8594 rc = bnxt_cfg_rx_mode(bp);
8595 if (rc)
8596 goto err_out;
8597
8598 rc = bnxt_hwrm_set_coal(bp);
8599 if (rc)
8600 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
8601 rc);
8602
8603 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8604 rc = bnxt_setup_nitroa0_vnic(bp);
8605 if (rc)
8606 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8607 rc);
8608 }
8609
8610 if (BNXT_VF(bp)) {
8611 bnxt_hwrm_func_qcfg(bp);
8612 netdev_update_features(bp->dev);
8613 }
8614
8615 return 0;
8616
8617 err_out:
8618 bnxt_hwrm_resource_free(bp, 0, true);
8619
8620 return rc;
8621 }
8622
8623 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8624 {
8625 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8626 return 0;
8627 }
8628
8629 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8630 {
8631 bnxt_init_cp_rings(bp);
8632 bnxt_init_rx_rings(bp);
8633 bnxt_init_tx_rings(bp);
8634 bnxt_init_ring_grps(bp, irq_re_init);
8635 bnxt_init_vnics(bp);
8636
8637 return bnxt_init_chip(bp, irq_re_init);
8638 }
8639
8640 static int bnxt_set_real_num_queues(struct bnxt *bp)
8641 {
8642 int rc;
8643 struct net_device *dev = bp->dev;
8644
8645 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8646 bp->tx_nr_rings_xdp);
8647 if (rc)
8648 return rc;
8649
8650 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8651 if (rc)
8652 return rc;
8653
8654 #ifdef CONFIG_RFS_ACCEL
8655 if (bp->flags & BNXT_FLAG_RFS)
8656 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
8657 #endif
8658
8659 return rc;
8660 }
8661
8662 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8663 bool shared)
8664 {
8665 int _rx = *rx, _tx = *tx;
8666
8667 if (shared) {
8668 *rx = min_t(int, _rx, max);
8669 *tx = min_t(int, _tx, max);
8670 } else {
8671 if (max < 2)
8672 return -ENOMEM;
8673
8674 while (_rx + _tx > max) {
8675 if (_rx > _tx && _rx > 1)
8676 _rx--;
8677 else if (_tx > 1)
8678 _tx--;
8679 }
8680 *rx = _rx;
8681 *tx = _tx;
8682 }
8683 return 0;
8684 }
8685
8686 static void bnxt_setup_msix(struct bnxt *bp)
8687 {
8688 const int len = sizeof(bp->irq_tbl[0].name);
8689 struct net_device *dev = bp->dev;
8690 int tcs, i;
8691
8692 tcs = netdev_get_num_tc(dev);
8693 if (tcs) {
8694 int i, off, count;
8695
8696 for (i = 0; i < tcs; i++) {
8697 count = bp->tx_nr_rings_per_tc;
8698 off = i * count;
8699 netdev_set_tc_queue(dev, i, count, off);
8700 }
8701 }
8702
8703 for (i = 0; i < bp->cp_nr_rings; i++) {
8704 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8705 char *attr;
8706
8707 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8708 attr = "TxRx";
8709 else if (i < bp->rx_nr_rings)
8710 attr = "rx";
8711 else
8712 attr = "tx";
8713
8714 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8715 attr, i);
8716 bp->irq_tbl[map_idx].handler = bnxt_msix;
8717 }
8718 }
8719
8720 static void bnxt_setup_inta(struct bnxt *bp)
8721 {
8722 const int len = sizeof(bp->irq_tbl[0].name);
8723
8724 if (netdev_get_num_tc(bp->dev))
8725 netdev_reset_tc(bp->dev);
8726
8727 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8728 0);
8729 bp->irq_tbl[0].handler = bnxt_inta;
8730 }
8731
8732 static int bnxt_init_int_mode(struct bnxt *bp);
8733
8734 static int bnxt_setup_int_mode(struct bnxt *bp)
8735 {
8736 int rc;
8737
8738 if (!bp->irq_tbl) {
8739 rc = bnxt_init_int_mode(bp);
8740 if (rc || !bp->irq_tbl)
8741 return rc ?: -ENODEV;
8742 }
8743
8744 if (bp->flags & BNXT_FLAG_USING_MSIX)
8745 bnxt_setup_msix(bp);
8746 else
8747 bnxt_setup_inta(bp);
8748
8749 rc = bnxt_set_real_num_queues(bp);
8750 return rc;
8751 }
8752
8753 #ifdef CONFIG_RFS_ACCEL
8754 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
8755 {
8756 return bp->hw_resc.max_rsscos_ctxs;
8757 }
8758
8759 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
8760 {
8761 return bp->hw_resc.max_vnics;
8762 }
8763 #endif
8764
8765 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
8766 {
8767 return bp->hw_resc.max_stat_ctxs;
8768 }
8769
8770 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
8771 {
8772 return bp->hw_resc.max_cp_rings;
8773 }
8774
8775 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
8776 {
8777 unsigned int cp = bp->hw_resc.max_cp_rings;
8778
8779 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8780 cp -= bnxt_get_ulp_msix_num(bp);
8781
8782 return cp;
8783 }
8784
8785 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
8786 {
8787 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8788
8789 if (bp->flags & BNXT_FLAG_CHIP_P5)
8790 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
8791
8792 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
8793 }
8794
8795 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
8796 {
8797 bp->hw_resc.max_irqs = max_irqs;
8798 }
8799
8800 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
8801 {
8802 unsigned int cp;
8803
8804 cp = bnxt_get_max_func_cp_rings_for_en(bp);
8805 if (bp->flags & BNXT_FLAG_CHIP_P5)
8806 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8807 else
8808 return cp - bp->cp_nr_rings;
8809 }
8810
8811 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8812 {
8813 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
8814 }
8815
8816 int bnxt_get_avail_msix(struct bnxt *bp, int num)
8817 {
8818 int max_cp = bnxt_get_max_func_cp_rings(bp);
8819 int max_irq = bnxt_get_max_func_irqs(bp);
8820 int total_req = bp->cp_nr_rings + num;
8821 int max_idx, avail_msix;
8822
8823 max_idx = bp->total_irqs;
8824 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8825 max_idx = min_t(int, bp->total_irqs, max_cp);
8826 avail_msix = max_idx - bp->cp_nr_rings;
8827 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
8828 return avail_msix;
8829
8830 if (max_irq < total_req) {
8831 num = max_irq - bp->cp_nr_rings;
8832 if (num <= 0)
8833 return 0;
8834 }
8835 return num;
8836 }
8837
8838 static int bnxt_get_num_msix(struct bnxt *bp)
8839 {
8840 if (!BNXT_NEW_RM(bp))
8841 return bnxt_get_max_func_irqs(bp);
8842
8843 return bnxt_nq_rings_in_use(bp);
8844 }
8845
8846 static int bnxt_init_msix(struct bnxt *bp)
8847 {
8848 int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
8849 struct msix_entry *msix_ent;
8850
8851 total_vecs = bnxt_get_num_msix(bp);
8852 max = bnxt_get_max_func_irqs(bp);
8853 if (total_vecs > max)
8854 total_vecs = max;
8855
8856 if (!total_vecs)
8857 return 0;
8858
8859 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8860 if (!msix_ent)
8861 return -ENOMEM;
8862
8863 for (i = 0; i < total_vecs; i++) {
8864 msix_ent[i].entry = i;
8865 msix_ent[i].vector = 0;
8866 }
8867
8868 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8869 min = 2;
8870
8871 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
8872 ulp_msix = bnxt_get_ulp_msix_num(bp);
8873 if (total_vecs < 0 || total_vecs < ulp_msix) {
8874 rc = -ENODEV;
8875 goto msix_setup_exit;
8876 }
8877
8878 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8879 if (bp->irq_tbl) {
8880 for (i = 0; i < total_vecs; i++)
8881 bp->irq_tbl[i].vector = msix_ent[i].vector;
8882
8883 bp->total_irqs = total_vecs;
8884 /* Trim rings based upon num of vectors allocated */
8885 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
8886 total_vecs - ulp_msix, min == 1);
8887 if (rc)
8888 goto msix_setup_exit;
8889
8890 bp->cp_nr_rings = (min == 1) ?
8891 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8892 bp->tx_nr_rings + bp->rx_nr_rings;
8893
8894 } else {
8895 rc = -ENOMEM;
8896 goto msix_setup_exit;
8897 }
8898 bp->flags |= BNXT_FLAG_USING_MSIX;
8899 kfree(msix_ent);
8900 return 0;
8901
8902 msix_setup_exit:
8903 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8904 kfree(bp->irq_tbl);
8905 bp->irq_tbl = NULL;
8906 pci_disable_msix(bp->pdev);
8907 kfree(msix_ent);
8908 return rc;
8909 }
8910
8911 static int bnxt_init_inta(struct bnxt *bp)
8912 {
8913 bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
8914 if (!bp->irq_tbl)
8915 return -ENOMEM;
8916
8917 bp->total_irqs = 1;
8918 bp->rx_nr_rings = 1;
8919 bp->tx_nr_rings = 1;
8920 bp->cp_nr_rings = 1;
8921 bp->flags |= BNXT_FLAG_SHARED_RINGS;
8922 bp->irq_tbl[0].vector = bp->pdev->irq;
8923 return 0;
8924 }
8925
8926 static int bnxt_init_int_mode(struct bnxt *bp)
8927 {
8928 int rc = -ENODEV;
8929
8930 if (bp->flags & BNXT_FLAG_MSIX_CAP)
8931 rc = bnxt_init_msix(bp);
8932
8933 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
8934 /* fallback to INTA */
8935 rc = bnxt_init_inta(bp);
8936 }
8937 return rc;
8938 }
8939
8940 static void bnxt_clear_int_mode(struct bnxt *bp)
8941 {
8942 if (bp->flags & BNXT_FLAG_USING_MSIX)
8943 pci_disable_msix(bp->pdev);
8944
8945 kfree(bp->irq_tbl);
8946 bp->irq_tbl = NULL;
8947 bp->flags &= ~BNXT_FLAG_USING_MSIX;
8948 }
8949
8950 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
8951 {
8952 int tcs = netdev_get_num_tc(bp->dev);
8953 bool irq_cleared = false;
8954 int rc;
8955
8956 if (!bnxt_need_reserve_rings(bp))
8957 return 0;
8958
8959 if (irq_re_init && BNXT_NEW_RM(bp) &&
8960 bnxt_get_num_msix(bp) != bp->total_irqs) {
8961 bnxt_ulp_irq_stop(bp);
8962 bnxt_clear_int_mode(bp);
8963 irq_cleared = true;
8964 }
8965 rc = __bnxt_reserve_rings(bp);
8966 if (irq_cleared) {
8967 if (!rc)
8968 rc = bnxt_init_int_mode(bp);
8969 bnxt_ulp_irq_restart(bp, rc);
8970 }
8971 if (rc) {
8972 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
8973 return rc;
8974 }
8975 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
8976 netdev_err(bp->dev, "tx ring reservation failure\n");
8977 netdev_reset_tc(bp->dev);
8978 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8979 return -ENOMEM;
8980 }
8981 return 0;
8982 }
8983
8984 static void bnxt_free_irq(struct bnxt *bp)
8985 {
8986 struct bnxt_irq *irq;
8987 int i;
8988
8989 #ifdef CONFIG_RFS_ACCEL
8990 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
8991 bp->dev->rx_cpu_rmap = NULL;
8992 #endif
8993 if (!bp->irq_tbl || !bp->bnapi)
8994 return;
8995
8996 for (i = 0; i < bp->cp_nr_rings; i++) {
8997 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8998
8999 irq = &bp->irq_tbl[map_idx];
9000 if (irq->requested) {
9001 if (irq->have_cpumask) {
9002 irq_set_affinity_hint(irq->vector, NULL);
9003 free_cpumask_var(irq->cpu_mask);
9004 irq->have_cpumask = 0;
9005 }
9006 free_irq(irq->vector, bp->bnapi[i]);
9007 }
9008
9009 irq->requested = 0;
9010 }
9011 }
9012
9013 static int bnxt_request_irq(struct bnxt *bp)
9014 {
9015 int i, j, rc = 0;
9016 unsigned long flags = 0;
9017 #ifdef CONFIG_RFS_ACCEL
9018 struct cpu_rmap *rmap;
9019 #endif
9020
9021 rc = bnxt_setup_int_mode(bp);
9022 if (rc) {
9023 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
9024 rc);
9025 return rc;
9026 }
9027 #ifdef CONFIG_RFS_ACCEL
9028 rmap = bp->dev->rx_cpu_rmap;
9029 #endif
9030 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
9031 flags = IRQF_SHARED;
9032
9033 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
9034 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9035 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
9036
9037 #ifdef CONFIG_RFS_ACCEL
9038 if (rmap && bp->bnapi[i]->rx_ring) {
9039 rc = irq_cpu_rmap_add(rmap, irq->vector);
9040 if (rc)
9041 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
9042 j);
9043 j++;
9044 }
9045 #endif
9046 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
9047 bp->bnapi[i]);
9048 if (rc)
9049 break;
9050
9051 irq->requested = 1;
9052
9053 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
9054 int numa_node = dev_to_node(&bp->pdev->dev);
9055
9056 irq->have_cpumask = 1;
9057 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
9058 irq->cpu_mask);
9059 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
9060 if (rc) {
9061 netdev_warn(bp->dev,
9062 "Set affinity failed, IRQ = %d\n",
9063 irq->vector);
9064 break;
9065 }
9066 }
9067 }
9068 return rc;
9069 }
9070
9071 static void bnxt_del_napi(struct bnxt *bp)
9072 {
9073 int i;
9074
9075 if (!bp->bnapi)
9076 return;
9077
9078 for (i = 0; i < bp->cp_nr_rings; i++) {
9079 struct bnxt_napi *bnapi = bp->bnapi[i];
9080
9081 __netif_napi_del(&bnapi->napi);
9082 }
9083 /* We called __netif_napi_del(), we need
9084 * to respect an RCU grace period before freeing napi structures.
9085 */
9086 synchronize_net();
9087 }
9088
9089 static void bnxt_init_napi(struct bnxt *bp)
9090 {
9091 int i;
9092 unsigned int cp_nr_rings = bp->cp_nr_rings;
9093 struct bnxt_napi *bnapi;
9094
9095 if (bp->flags & BNXT_FLAG_USING_MSIX) {
9096 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
9097
9098 if (bp->flags & BNXT_FLAG_CHIP_P5)
9099 poll_fn = bnxt_poll_p5;
9100 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
9101 cp_nr_rings--;
9102 for (i = 0; i < cp_nr_rings; i++) {
9103 bnapi = bp->bnapi[i];
9104 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
9105 }
9106 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9107 bnapi = bp->bnapi[cp_nr_rings];
9108 netif_napi_add(bp->dev, &bnapi->napi,
9109 bnxt_poll_nitroa0, 64);
9110 }
9111 } else {
9112 bnapi = bp->bnapi[0];
9113 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
9114 }
9115 }
9116
9117 static void bnxt_disable_napi(struct bnxt *bp)
9118 {
9119 int i;
9120
9121 if (!bp->bnapi ||
9122 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
9123 return;
9124
9125 for (i = 0; i < bp->cp_nr_rings; i++) {
9126 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
9127
9128 if (bp->bnapi[i]->rx_ring)
9129 cancel_work_sync(&cpr->dim.work);
9130
9131 napi_disable(&bp->bnapi[i]->napi);
9132 }
9133 }
9134
9135 static void bnxt_enable_napi(struct bnxt *bp)
9136 {
9137 int i;
9138
9139 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
9140 for (i = 0; i < bp->cp_nr_rings; i++) {
9141 struct bnxt_napi *bnapi = bp->bnapi[i];
9142 struct bnxt_cp_ring_info *cpr;
9143
9144 cpr = &bnapi->cp_ring;
9145 if (bnapi->in_reset)
9146 cpr->sw_stats.rx.rx_resets++;
9147 bnapi->in_reset = false;
9148
9149 if (bnapi->rx_ring) {
9150 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
9151 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
9152 }
9153 napi_enable(&bnapi->napi);
9154 }
9155 }
9156
9157 void bnxt_tx_disable(struct bnxt *bp)
9158 {
9159 int i;
9160 struct bnxt_tx_ring_info *txr;
9161
9162 if (bp->tx_ring) {
9163 for (i = 0; i < bp->tx_nr_rings; i++) {
9164 txr = &bp->tx_ring[i];
9165 txr->dev_state = BNXT_DEV_STATE_CLOSING;
9166 }
9167 }
9168 /* Drop carrier first to prevent TX timeout */
9169 netif_carrier_off(bp->dev);
9170 /* Stop all TX queues */
9171 netif_tx_disable(bp->dev);
9172 }
9173
9174 void bnxt_tx_enable(struct bnxt *bp)
9175 {
9176 int i;
9177 struct bnxt_tx_ring_info *txr;
9178
9179 for (i = 0; i < bp->tx_nr_rings; i++) {
9180 txr = &bp->tx_ring[i];
9181 txr->dev_state = 0;
9182 }
9183 netif_tx_wake_all_queues(bp->dev);
9184 if (bp->link_info.link_up)
9185 netif_carrier_on(bp->dev);
9186 }
9187
9188 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
9189 {
9190 u8 active_fec = link_info->active_fec_sig_mode &
9191 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
9192
9193 switch (active_fec) {
9194 default:
9195 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
9196 return "None";
9197 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
9198 return "Clause 74 BaseR";
9199 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
9200 return "Clause 91 RS(528,514)";
9201 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
9202 return "Clause 91 RS544_1XN";
9203 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
9204 return "Clause 91 RS(544,514)";
9205 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
9206 return "Clause 91 RS272_1XN";
9207 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
9208 return "Clause 91 RS(272,257)";
9209 }
9210 }
9211
9212 static void bnxt_report_link(struct bnxt *bp)
9213 {
9214 if (bp->link_info.link_up) {
9215 const char *signal = "";
9216 const char *flow_ctrl;
9217 const char *duplex;
9218 u32 speed;
9219 u16 fec;
9220
9221 netif_carrier_on(bp->dev);
9222 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9223 if (speed == SPEED_UNKNOWN) {
9224 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9225 return;
9226 }
9227 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9228 duplex = "full";
9229 else
9230 duplex = "half";
9231 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9232 flow_ctrl = "ON - receive & transmit";
9233 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9234 flow_ctrl = "ON - transmit";
9235 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9236 flow_ctrl = "ON - receive";
9237 else
9238 flow_ctrl = "none";
9239 if (bp->link_info.phy_qcfg_resp.option_flags &
9240 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
9241 u8 sig_mode = bp->link_info.active_fec_sig_mode &
9242 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
9243 switch (sig_mode) {
9244 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
9245 signal = "(NRZ) ";
9246 break;
9247 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
9248 signal = "(PAM4) ";
9249 break;
9250 default:
9251 break;
9252 }
9253 }
9254 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
9255 speed, signal, duplex, flow_ctrl);
9256 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
9257 netdev_info(bp->dev, "EEE is %s\n",
9258 bp->eee.eee_active ? "active" :
9259 "not active");
9260 fec = bp->link_info.fec_cfg;
9261 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
9262 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
9263 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
9264 bnxt_report_fec(&bp->link_info));
9265 } else {
9266 netif_carrier_off(bp->dev);
9267 netdev_err(bp->dev, "NIC Link is Down\n");
9268 }
9269 }
9270
9271 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
9272 {
9273 if (!resp->supported_speeds_auto_mode &&
9274 !resp->supported_speeds_force_mode &&
9275 !resp->supported_pam4_speeds_auto_mode &&
9276 !resp->supported_pam4_speeds_force_mode)
9277 return true;
9278 return false;
9279 }
9280
9281 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9282 {
9283 int rc = 0;
9284 struct hwrm_port_phy_qcaps_input req = {0};
9285 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
9286 struct bnxt_link_info *link_info = &bp->link_info;
9287
9288 if (bp->hwrm_spec_code < 0x10201)
9289 return 0;
9290
9291 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
9292
9293 mutex_lock(&bp->hwrm_cmd_lock);
9294 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9295 if (rc)
9296 goto hwrm_phy_qcaps_exit;
9297
9298 bp->phy_flags = resp->flags;
9299 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
9300 struct ethtool_eee *eee = &bp->eee;
9301 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
9302
9303 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9304 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9305 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
9306 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9307 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
9308 }
9309
9310 if (bp->hwrm_spec_code >= 0x10a01) {
9311 if (bnxt_phy_qcaps_no_speed(resp)) {
9312 link_info->phy_state = BNXT_PHY_STATE_DISABLED;
9313 netdev_warn(bp->dev, "Ethernet link disabled\n");
9314 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
9315 link_info->phy_state = BNXT_PHY_STATE_ENABLED;
9316 netdev_info(bp->dev, "Ethernet link enabled\n");
9317 /* Phy re-enabled, reprobe the speeds */
9318 link_info->support_auto_speeds = 0;
9319 link_info->support_pam4_auto_speeds = 0;
9320 }
9321 }
9322 if (resp->supported_speeds_auto_mode)
9323 link_info->support_auto_speeds =
9324 le16_to_cpu(resp->supported_speeds_auto_mode);
9325 if (resp->supported_pam4_speeds_auto_mode)
9326 link_info->support_pam4_auto_speeds =
9327 le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
9328
9329 bp->port_count = resp->port_cnt;
9330
9331 hwrm_phy_qcaps_exit:
9332 mutex_unlock(&bp->hwrm_cmd_lock);
9333 return rc;
9334 }
9335
9336 static bool bnxt_support_dropped(u16 advertising, u16 supported)
9337 {
9338 u16 diff = advertising ^ supported;
9339
9340 return ((supported | diff) != supported);
9341 }
9342
9343 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
9344 {
9345 int rc = 0;
9346 struct bnxt_link_info *link_info = &bp->link_info;
9347 struct hwrm_port_phy_qcfg_input req = {0};
9348 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9349 u8 link_up = link_info->link_up;
9350 bool support_changed = false;
9351
9352 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
9353
9354 mutex_lock(&bp->hwrm_cmd_lock);
9355 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9356 if (rc) {
9357 mutex_unlock(&bp->hwrm_cmd_lock);
9358 return rc;
9359 }
9360
9361 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
9362 link_info->phy_link_status = resp->link;
9363 link_info->duplex = resp->duplex_cfg;
9364 if (bp->hwrm_spec_code >= 0x10800)
9365 link_info->duplex = resp->duplex_state;
9366 link_info->pause = resp->pause;
9367 link_info->auto_mode = resp->auto_mode;
9368 link_info->auto_pause_setting = resp->auto_pause;
9369 link_info->lp_pause = resp->link_partner_adv_pause;
9370 link_info->force_pause_setting = resp->force_pause;
9371 link_info->duplex_setting = resp->duplex_cfg;
9372 if (link_info->phy_link_status == BNXT_LINK_LINK)
9373 link_info->link_speed = le16_to_cpu(resp->link_speed);
9374 else
9375 link_info->link_speed = 0;
9376 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
9377 link_info->force_pam4_link_speed =
9378 le16_to_cpu(resp->force_pam4_link_speed);
9379 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
9380 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
9381 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
9382 link_info->auto_pam4_link_speeds =
9383 le16_to_cpu(resp->auto_pam4_link_speed_mask);
9384 link_info->lp_auto_link_speeds =
9385 le16_to_cpu(resp->link_partner_adv_speeds);
9386 link_info->lp_auto_pam4_link_speeds =
9387 resp->link_partner_pam4_adv_speeds;
9388 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
9389 link_info->phy_ver[0] = resp->phy_maj;
9390 link_info->phy_ver[1] = resp->phy_min;
9391 link_info->phy_ver[2] = resp->phy_bld;
9392 link_info->media_type = resp->media_type;
9393 link_info->phy_type = resp->phy_type;
9394 link_info->transceiver = resp->xcvr_pkg_type;
9395 link_info->phy_addr = resp->eee_config_phy_addr &
9396 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
9397 link_info->module_status = resp->module_status;
9398
9399 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
9400 struct ethtool_eee *eee = &bp->eee;
9401 u16 fw_speeds;
9402
9403 eee->eee_active = 0;
9404 if (resp->eee_config_phy_addr &
9405 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
9406 eee->eee_active = 1;
9407 fw_speeds = le16_to_cpu(
9408 resp->link_partner_adv_eee_link_speed_mask);
9409 eee->lp_advertised =
9410 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9411 }
9412
9413 /* Pull initial EEE config */
9414 if (!chng_link_state) {
9415 if (resp->eee_config_phy_addr &
9416 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
9417 eee->eee_enabled = 1;
9418
9419 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
9420 eee->advertised =
9421 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9422
9423 if (resp->eee_config_phy_addr &
9424 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
9425 __le32 tmr;
9426
9427 eee->tx_lpi_enabled = 1;
9428 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
9429 eee->tx_lpi_timer = le32_to_cpu(tmr) &
9430 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
9431 }
9432 }
9433 }
9434
9435 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
9436 if (bp->hwrm_spec_code >= 0x10504) {
9437 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
9438 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
9439 }
9440 /* TODO: need to add more logic to report VF link */
9441 if (chng_link_state) {
9442 if (link_info->phy_link_status == BNXT_LINK_LINK)
9443 link_info->link_up = 1;
9444 else
9445 link_info->link_up = 0;
9446 if (link_up != link_info->link_up)
9447 bnxt_report_link(bp);
9448 } else {
9449 /* alwasy link down if not require to update link state */
9450 link_info->link_up = 0;
9451 }
9452 mutex_unlock(&bp->hwrm_cmd_lock);
9453
9454 if (!BNXT_PHY_CFG_ABLE(bp))
9455 return 0;
9456
9457 /* Check if any advertised speeds are no longer supported. The caller
9458 * holds the link_lock mutex, so we can modify link_info settings.
9459 */
9460 if (bnxt_support_dropped(link_info->advertising,
9461 link_info->support_auto_speeds)) {
9462 link_info->advertising = link_info->support_auto_speeds;
9463 support_changed = true;
9464 }
9465 if (bnxt_support_dropped(link_info->advertising_pam4,
9466 link_info->support_pam4_auto_speeds)) {
9467 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
9468 support_changed = true;
9469 }
9470 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
9471 bnxt_hwrm_set_link_setting(bp, true, false);
9472 return 0;
9473 }
9474
9475 static void bnxt_get_port_module_status(struct bnxt *bp)
9476 {
9477 struct bnxt_link_info *link_info = &bp->link_info;
9478 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
9479 u8 module_status;
9480
9481 if (bnxt_update_link(bp, true))
9482 return;
9483
9484 module_status = link_info->module_status;
9485 switch (module_status) {
9486 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
9487 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
9488 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
9489 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9490 bp->pf.port_id);
9491 if (bp->hwrm_spec_code >= 0x10201) {
9492 netdev_warn(bp->dev, "Module part number %s\n",
9493 resp->phy_vendor_partnumber);
9494 }
9495 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
9496 netdev_warn(bp->dev, "TX is disabled\n");
9497 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
9498 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9499 }
9500 }
9501
9502 static void
9503 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9504 {
9505 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
9506 if (bp->hwrm_spec_code >= 0x10201)
9507 req->auto_pause =
9508 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
9509 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9510 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
9511 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9512 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
9513 req->enables |=
9514 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9515 } else {
9516 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9517 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
9518 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9519 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9520 req->enables |=
9521 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
9522 if (bp->hwrm_spec_code >= 0x10201) {
9523 req->auto_pause = req->force_pause;
9524 req->enables |= cpu_to_le32(
9525 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9526 }
9527 }
9528 }
9529
9530 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9531 {
9532 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9533 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9534 if (bp->link_info.advertising) {
9535 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9536 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9537 }
9538 if (bp->link_info.advertising_pam4) {
9539 req->enables |=
9540 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
9541 req->auto_link_pam4_speed_mask =
9542 cpu_to_le16(bp->link_info.advertising_pam4);
9543 }
9544 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
9545 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
9546 } else {
9547 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
9548 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9549 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9550 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
9551 } else {
9552 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9553 }
9554 }
9555
9556 /* tell chimp that the setting takes effect immediately */
9557 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9558 }
9559
9560 int bnxt_hwrm_set_pause(struct bnxt *bp)
9561 {
9562 struct hwrm_port_phy_cfg_input req = {0};
9563 int rc;
9564
9565 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9566 bnxt_hwrm_set_pause_common(bp, &req);
9567
9568 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9569 bp->link_info.force_link_chng)
9570 bnxt_hwrm_set_link_common(bp, &req);
9571
9572 mutex_lock(&bp->hwrm_cmd_lock);
9573 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9574 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9575 /* since changing of pause setting doesn't trigger any link
9576 * change event, the driver needs to update the current pause
9577 * result upon successfully return of the phy_cfg command
9578 */
9579 bp->link_info.pause =
9580 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9581 bp->link_info.auto_pause_setting = 0;
9582 if (!bp->link_info.force_link_chng)
9583 bnxt_report_link(bp);
9584 }
9585 bp->link_info.force_link_chng = false;
9586 mutex_unlock(&bp->hwrm_cmd_lock);
9587 return rc;
9588 }
9589
9590 static void bnxt_hwrm_set_eee(struct bnxt *bp,
9591 struct hwrm_port_phy_cfg_input *req)
9592 {
9593 struct ethtool_eee *eee = &bp->eee;
9594
9595 if (eee->eee_enabled) {
9596 u16 eee_speeds;
9597 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9598
9599 if (eee->tx_lpi_enabled)
9600 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9601 else
9602 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9603
9604 req->flags |= cpu_to_le32(flags);
9605 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9606 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9607 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9608 } else {
9609 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9610 }
9611 }
9612
9613 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
9614 {
9615 struct hwrm_port_phy_cfg_input req = {0};
9616
9617 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9618 if (set_pause)
9619 bnxt_hwrm_set_pause_common(bp, &req);
9620
9621 bnxt_hwrm_set_link_common(bp, &req);
9622
9623 if (set_eee)
9624 bnxt_hwrm_set_eee(bp, &req);
9625 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9626 }
9627
9628 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9629 {
9630 struct hwrm_port_phy_cfg_input req = {0};
9631
9632 if (!BNXT_SINGLE_PF(bp))
9633 return 0;
9634
9635 if (pci_num_vf(bp->pdev) &&
9636 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
9637 return 0;
9638
9639 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9640 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
9641 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9642 }
9643
9644 static int bnxt_fw_init_one(struct bnxt *bp);
9645
9646 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
9647 {
9648 #ifdef CONFIG_TEE_BNXT_FW
9649 int rc = tee_bnxt_fw_load();
9650
9651 if (rc)
9652 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
9653
9654 return rc;
9655 #else
9656 netdev_err(bp->dev, "OP-TEE not supported\n");
9657 return -ENODEV;
9658 #endif
9659 }
9660
9661 static int bnxt_try_recover_fw(struct bnxt *bp)
9662 {
9663 if (bp->fw_health && bp->fw_health->status_reliable) {
9664 int retry = 0, rc;
9665 u32 sts;
9666
9667 mutex_lock(&bp->hwrm_cmd_lock);
9668 do {
9669 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
9670 rc = __bnxt_hwrm_ver_get(bp, true);
9671 if (!BNXT_FW_IS_BOOTING(sts) &&
9672 !BNXT_FW_IS_RECOVERING(sts))
9673 break;
9674 retry++;
9675 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
9676 mutex_unlock(&bp->hwrm_cmd_lock);
9677
9678 if (!BNXT_FW_IS_HEALTHY(sts)) {
9679 netdev_err(bp->dev,
9680 "Firmware not responding, status: 0x%x\n",
9681 sts);
9682 rc = -ENODEV;
9683 }
9684 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
9685 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
9686 return bnxt_fw_reset_via_optee(bp);
9687 }
9688 return rc;
9689 }
9690
9691 return -ENODEV;
9692 }
9693
9694 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
9695 {
9696 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
9697 struct hwrm_func_drv_if_change_input req = {0};
9698 bool fw_reset = !bp->irq_tbl;
9699 bool resc_reinit = false;
9700 int rc, retry = 0;
9701 u32 flags = 0;
9702
9703 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
9704 return 0;
9705
9706 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
9707 if (up)
9708 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
9709 mutex_lock(&bp->hwrm_cmd_lock);
9710 while (retry < BNXT_FW_IF_RETRY) {
9711 rc = _hwrm_send_message(bp, &req, sizeof(req),
9712 HWRM_CMD_TIMEOUT);
9713 if (rc != -EAGAIN)
9714 break;
9715
9716 msleep(50);
9717 retry++;
9718 }
9719 if (!rc)
9720 flags = le32_to_cpu(resp->flags);
9721 mutex_unlock(&bp->hwrm_cmd_lock);
9722
9723 if (rc == -EAGAIN)
9724 return rc;
9725 if (rc && up) {
9726 rc = bnxt_try_recover_fw(bp);
9727 fw_reset = true;
9728 }
9729 if (rc)
9730 return rc;
9731
9732 if (!up) {
9733 bnxt_inv_fw_health_reg(bp);
9734 return 0;
9735 }
9736
9737 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
9738 resc_reinit = true;
9739 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
9740 fw_reset = true;
9741 else if (bp->fw_health && !bp->fw_health->status_reliable)
9742 bnxt_try_map_fw_health_reg(bp);
9743
9744 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
9745 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
9746 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9747 return -ENODEV;
9748 }
9749 if (resc_reinit || fw_reset) {
9750 if (fw_reset) {
9751 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9752 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9753 bnxt_ulp_stop(bp);
9754 bnxt_free_ctx_mem(bp);
9755 kfree(bp->ctx);
9756 bp->ctx = NULL;
9757 bnxt_dcb_free(bp);
9758 rc = bnxt_fw_init_one(bp);
9759 if (rc) {
9760 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9761 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9762 return rc;
9763 }
9764 bnxt_clear_int_mode(bp);
9765 rc = bnxt_init_int_mode(bp);
9766 if (rc) {
9767 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9768 netdev_err(bp->dev, "init int mode failed\n");
9769 return rc;
9770 }
9771 }
9772 if (BNXT_NEW_RM(bp)) {
9773 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9774
9775 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9776 if (rc)
9777 netdev_err(bp->dev, "resc_qcaps failed\n");
9778
9779 hw_resc->resv_cp_rings = 0;
9780 hw_resc->resv_stat_ctxs = 0;
9781 hw_resc->resv_irqs = 0;
9782 hw_resc->resv_tx_rings = 0;
9783 hw_resc->resv_rx_rings = 0;
9784 hw_resc->resv_hw_ring_grps = 0;
9785 hw_resc->resv_vnics = 0;
9786 if (!fw_reset) {
9787 bp->tx_nr_rings = 0;
9788 bp->rx_nr_rings = 0;
9789 }
9790 }
9791 }
9792 return rc;
9793 }
9794
9795 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
9796 {
9797 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
9798 struct hwrm_port_led_qcaps_input req = {0};
9799 struct bnxt_pf_info *pf = &bp->pf;
9800 int rc;
9801
9802 bp->num_leds = 0;
9803 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
9804 return 0;
9805
9806 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
9807 req.port_id = cpu_to_le16(pf->port_id);
9808 mutex_lock(&bp->hwrm_cmd_lock);
9809 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9810 if (rc) {
9811 mutex_unlock(&bp->hwrm_cmd_lock);
9812 return rc;
9813 }
9814 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
9815 int i;
9816
9817 bp->num_leds = resp->num_leds;
9818 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
9819 bp->num_leds);
9820 for (i = 0; i < bp->num_leds; i++) {
9821 struct bnxt_led_info *led = &bp->leds[i];
9822 __le16 caps = led->led_state_caps;
9823
9824 if (!led->led_group_id ||
9825 !BNXT_LED_ALT_BLINK_CAP(caps)) {
9826 bp->num_leds = 0;
9827 break;
9828 }
9829 }
9830 }
9831 mutex_unlock(&bp->hwrm_cmd_lock);
9832 return 0;
9833 }
9834
9835 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
9836 {
9837 struct hwrm_wol_filter_alloc_input req = {0};
9838 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
9839 int rc;
9840
9841 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
9842 req.port_id = cpu_to_le16(bp->pf.port_id);
9843 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
9844 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
9845 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
9846 mutex_lock(&bp->hwrm_cmd_lock);
9847 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9848 if (!rc)
9849 bp->wol_filter_id = resp->wol_filter_id;
9850 mutex_unlock(&bp->hwrm_cmd_lock);
9851 return rc;
9852 }
9853
9854 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
9855 {
9856 struct hwrm_wol_filter_free_input req = {0};
9857
9858 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
9859 req.port_id = cpu_to_le16(bp->pf.port_id);
9860 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
9861 req.wol_filter_id = bp->wol_filter_id;
9862 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9863 }
9864
9865 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
9866 {
9867 struct hwrm_wol_filter_qcfg_input req = {0};
9868 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9869 u16 next_handle = 0;
9870 int rc;
9871
9872 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
9873 req.port_id = cpu_to_le16(bp->pf.port_id);
9874 req.handle = cpu_to_le16(handle);
9875 mutex_lock(&bp->hwrm_cmd_lock);
9876 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9877 if (!rc) {
9878 next_handle = le16_to_cpu(resp->next_handle);
9879 if (next_handle != 0) {
9880 if (resp->wol_type ==
9881 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
9882 bp->wol = 1;
9883 bp->wol_filter_id = resp->wol_filter_id;
9884 }
9885 }
9886 }
9887 mutex_unlock(&bp->hwrm_cmd_lock);
9888 return next_handle;
9889 }
9890
9891 static void bnxt_get_wol_settings(struct bnxt *bp)
9892 {
9893 u16 handle = 0;
9894
9895 bp->wol = 0;
9896 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
9897 return;
9898
9899 do {
9900 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
9901 } while (handle && handle != 0xffff);
9902 }
9903
9904 #ifdef CONFIG_BNXT_HWMON
9905 static ssize_t bnxt_show_temp(struct device *dev,
9906 struct device_attribute *devattr, char *buf)
9907 {
9908 struct hwrm_temp_monitor_query_input req = {0};
9909 struct hwrm_temp_monitor_query_output *resp;
9910 struct bnxt *bp = dev_get_drvdata(dev);
9911 u32 len = 0;
9912 int rc;
9913
9914 resp = bp->hwrm_cmd_resp_addr;
9915 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
9916 mutex_lock(&bp->hwrm_cmd_lock);
9917 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9918 if (!rc)
9919 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
9920 mutex_unlock(&bp->hwrm_cmd_lock);
9921 if (rc)
9922 return rc;
9923 return len;
9924 }
9925 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
9926
9927 static struct attribute *bnxt_attrs[] = {
9928 &sensor_dev_attr_temp1_input.dev_attr.attr,
9929 NULL
9930 };
9931 ATTRIBUTE_GROUPS(bnxt);
9932
9933 static void bnxt_hwmon_close(struct bnxt *bp)
9934 {
9935 if (bp->hwmon_dev) {
9936 hwmon_device_unregister(bp->hwmon_dev);
9937 bp->hwmon_dev = NULL;
9938 }
9939 }
9940
9941 static void bnxt_hwmon_open(struct bnxt *bp)
9942 {
9943 struct hwrm_temp_monitor_query_input req = {0};
9944 struct pci_dev *pdev = bp->pdev;
9945 int rc;
9946
9947 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
9948 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9949 if (rc == -EACCES || rc == -EOPNOTSUPP) {
9950 bnxt_hwmon_close(bp);
9951 return;
9952 }
9953
9954 if (bp->hwmon_dev)
9955 return;
9956
9957 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
9958 DRV_MODULE_NAME, bp,
9959 bnxt_groups);
9960 if (IS_ERR(bp->hwmon_dev)) {
9961 bp->hwmon_dev = NULL;
9962 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
9963 }
9964 }
9965 #else
9966 static void bnxt_hwmon_close(struct bnxt *bp)
9967 {
9968 }
9969
9970 static void bnxt_hwmon_open(struct bnxt *bp)
9971 {
9972 }
9973 #endif
9974
9975 static bool bnxt_eee_config_ok(struct bnxt *bp)
9976 {
9977 struct ethtool_eee *eee = &bp->eee;
9978 struct bnxt_link_info *link_info = &bp->link_info;
9979
9980 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
9981 return true;
9982
9983 if (eee->eee_enabled) {
9984 u32 advertising =
9985 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
9986
9987 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9988 eee->eee_enabled = 0;
9989 return false;
9990 }
9991 if (eee->advertised & ~advertising) {
9992 eee->advertised = advertising & eee->supported;
9993 return false;
9994 }
9995 }
9996 return true;
9997 }
9998
9999 static int bnxt_update_phy_setting(struct bnxt *bp)
10000 {
10001 int rc;
10002 bool update_link = false;
10003 bool update_pause = false;
10004 bool update_eee = false;
10005 struct bnxt_link_info *link_info = &bp->link_info;
10006
10007 rc = bnxt_update_link(bp, true);
10008 if (rc) {
10009 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
10010 rc);
10011 return rc;
10012 }
10013 if (!BNXT_SINGLE_PF(bp))
10014 return 0;
10015
10016 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10017 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
10018 link_info->req_flow_ctrl)
10019 update_pause = true;
10020 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10021 link_info->force_pause_setting != link_info->req_flow_ctrl)
10022 update_pause = true;
10023 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10024 if (BNXT_AUTO_MODE(link_info->auto_mode))
10025 update_link = true;
10026 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
10027 link_info->req_link_speed != link_info->force_link_speed)
10028 update_link = true;
10029 else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
10030 link_info->req_link_speed != link_info->force_pam4_link_speed)
10031 update_link = true;
10032 if (link_info->req_duplex != link_info->duplex_setting)
10033 update_link = true;
10034 } else {
10035 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
10036 update_link = true;
10037 if (link_info->advertising != link_info->auto_link_speeds ||
10038 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
10039 update_link = true;
10040 }
10041
10042 /* The last close may have shutdown the link, so need to call
10043 * PHY_CFG to bring it back up.
10044 */
10045 if (!bp->link_info.link_up)
10046 update_link = true;
10047
10048 if (!bnxt_eee_config_ok(bp))
10049 update_eee = true;
10050
10051 if (update_link)
10052 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
10053 else if (update_pause)
10054 rc = bnxt_hwrm_set_pause(bp);
10055 if (rc) {
10056 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
10057 rc);
10058 return rc;
10059 }
10060
10061 return rc;
10062 }
10063
10064 /* Common routine to pre-map certain register block to different GRC window.
10065 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
10066 * in PF and 3 windows in VF that can be customized to map in different
10067 * register blocks.
10068 */
10069 static void bnxt_preset_reg_win(struct bnxt *bp)
10070 {
10071 if (BNXT_PF(bp)) {
10072 /* CAG registers map to GRC window #4 */
10073 writel(BNXT_CAG_REG_BASE,
10074 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
10075 }
10076 }
10077
10078 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
10079
10080 static int bnxt_reinit_after_abort(struct bnxt *bp)
10081 {
10082 int rc;
10083
10084 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10085 return -EBUSY;
10086
10087 if (bp->dev->reg_state == NETREG_UNREGISTERED)
10088 return -ENODEV;
10089
10090 rc = bnxt_fw_init_one(bp);
10091 if (!rc) {
10092 bnxt_clear_int_mode(bp);
10093 rc = bnxt_init_int_mode(bp);
10094 if (!rc) {
10095 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10096 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10097 }
10098 }
10099 return rc;
10100 }
10101
10102 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10103 {
10104 int rc = 0;
10105
10106 bnxt_preset_reg_win(bp);
10107 netif_carrier_off(bp->dev);
10108 if (irq_re_init) {
10109 /* Reserve rings now if none were reserved at driver probe. */
10110 rc = bnxt_init_dflt_ring_mode(bp);
10111 if (rc) {
10112 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
10113 return rc;
10114 }
10115 }
10116 rc = bnxt_reserve_rings(bp, irq_re_init);
10117 if (rc)
10118 return rc;
10119 if ((bp->flags & BNXT_FLAG_RFS) &&
10120 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
10121 /* disable RFS if falling back to INTA */
10122 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
10123 bp->flags &= ~BNXT_FLAG_RFS;
10124 }
10125
10126 rc = bnxt_alloc_mem(bp, irq_re_init);
10127 if (rc) {
10128 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10129 goto open_err_free_mem;
10130 }
10131
10132 if (irq_re_init) {
10133 bnxt_init_napi(bp);
10134 rc = bnxt_request_irq(bp);
10135 if (rc) {
10136 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
10137 goto open_err_irq;
10138 }
10139 }
10140
10141 rc = bnxt_init_nic(bp, irq_re_init);
10142 if (rc) {
10143 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10144 goto open_err_irq;
10145 }
10146
10147 bnxt_enable_napi(bp);
10148 bnxt_debug_dev_init(bp);
10149
10150 if (link_re_init) {
10151 mutex_lock(&bp->link_lock);
10152 rc = bnxt_update_phy_setting(bp);
10153 mutex_unlock(&bp->link_lock);
10154 if (rc) {
10155 netdev_warn(bp->dev, "failed to update phy settings\n");
10156 if (BNXT_SINGLE_PF(bp)) {
10157 bp->link_info.phy_retry = true;
10158 bp->link_info.phy_retry_expires =
10159 jiffies + 5 * HZ;
10160 }
10161 }
10162 }
10163
10164 if (irq_re_init)
10165 udp_tunnel_nic_reset_ntf(bp->dev);
10166
10167 set_bit(BNXT_STATE_OPEN, &bp->state);
10168 bnxt_enable_int(bp);
10169 /* Enable TX queues */
10170 bnxt_tx_enable(bp);
10171 mod_timer(&bp->timer, jiffies + bp->current_interval);
10172 /* Poll link status and check for SFP+ module status */
10173 bnxt_get_port_module_status(bp);
10174
10175 /* VF-reps may need to be re-opened after the PF is re-opened */
10176 if (BNXT_PF(bp))
10177 bnxt_vf_reps_open(bp);
10178 return 0;
10179
10180 open_err_irq:
10181 bnxt_del_napi(bp);
10182
10183 open_err_free_mem:
10184 bnxt_free_skbs(bp);
10185 bnxt_free_irq(bp);
10186 bnxt_free_mem(bp, true);
10187 return rc;
10188 }
10189
10190 /* rtnl_lock held */
10191 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10192 {
10193 int rc = 0;
10194
10195 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
10196 rc = -EIO;
10197 if (!rc)
10198 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
10199 if (rc) {
10200 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
10201 dev_close(bp->dev);
10202 }
10203 return rc;
10204 }
10205
10206 /* rtnl_lock held, open the NIC half way by allocating all resources, but
10207 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
10208 * self tests.
10209 */
10210 int bnxt_half_open_nic(struct bnxt *bp)
10211 {
10212 int rc = 0;
10213
10214 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10215 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
10216 rc = -ENODEV;
10217 goto half_open_err;
10218 }
10219
10220 rc = bnxt_alloc_mem(bp, false);
10221 if (rc) {
10222 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10223 goto half_open_err;
10224 }
10225 rc = bnxt_init_nic(bp, false);
10226 if (rc) {
10227 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10228 goto half_open_err;
10229 }
10230 return 0;
10231
10232 half_open_err:
10233 bnxt_free_skbs(bp);
10234 bnxt_free_mem(bp, false);
10235 dev_close(bp->dev);
10236 return rc;
10237 }
10238
10239 /* rtnl_lock held, this call can only be made after a previous successful
10240 * call to bnxt_half_open_nic().
10241 */
10242 void bnxt_half_close_nic(struct bnxt *bp)
10243 {
10244 bnxt_hwrm_resource_free(bp, false, false);
10245 bnxt_free_skbs(bp);
10246 bnxt_free_mem(bp, false);
10247 }
10248
10249 static void bnxt_reenable_sriov(struct bnxt *bp)
10250 {
10251 if (BNXT_PF(bp)) {
10252 struct bnxt_pf_info *pf = &bp->pf;
10253 int n = pf->active_vfs;
10254
10255 if (n)
10256 bnxt_cfg_hw_sriov(bp, &n, true);
10257 }
10258 }
10259
10260 static int bnxt_open(struct net_device *dev)
10261 {
10262 struct bnxt *bp = netdev_priv(dev);
10263 int rc;
10264
10265 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10266 rc = bnxt_reinit_after_abort(bp);
10267 if (rc) {
10268 if (rc == -EBUSY)
10269 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
10270 else
10271 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
10272 return -ENODEV;
10273 }
10274 }
10275
10276 rc = bnxt_hwrm_if_change(bp, true);
10277 if (rc)
10278 return rc;
10279
10280 if (bnxt_ptp_init(bp)) {
10281 netdev_warn(dev, "PTP initialization failed.\n");
10282 kfree(bp->ptp_cfg);
10283 bp->ptp_cfg = NULL;
10284 }
10285 rc = __bnxt_open_nic(bp, true, true);
10286 if (rc) {
10287 bnxt_hwrm_if_change(bp, false);
10288 bnxt_ptp_clear(bp);
10289 } else {
10290 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
10291 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10292 bnxt_ulp_start(bp, 0);
10293 bnxt_reenable_sriov(bp);
10294 }
10295 }
10296 bnxt_hwmon_open(bp);
10297 }
10298
10299 return rc;
10300 }
10301
10302 static bool bnxt_drv_busy(struct bnxt *bp)
10303 {
10304 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
10305 test_bit(BNXT_STATE_READ_STATS, &bp->state));
10306 }
10307
10308 static void bnxt_get_ring_stats(struct bnxt *bp,
10309 struct rtnl_link_stats64 *stats);
10310
10311 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
10312 bool link_re_init)
10313 {
10314 /* Close the VF-reps before closing PF */
10315 if (BNXT_PF(bp))
10316 bnxt_vf_reps_close(bp);
10317
10318 /* Change device state to avoid TX queue wake up's */
10319 bnxt_tx_disable(bp);
10320
10321 clear_bit(BNXT_STATE_OPEN, &bp->state);
10322 smp_mb__after_atomic();
10323 while (bnxt_drv_busy(bp))
10324 msleep(20);
10325
10326 /* Flush rings and and disable interrupts */
10327 bnxt_shutdown_nic(bp, irq_re_init);
10328
10329 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
10330
10331 bnxt_debug_dev_exit(bp);
10332 bnxt_disable_napi(bp);
10333 del_timer_sync(&bp->timer);
10334 bnxt_free_skbs(bp);
10335
10336 /* Save ring stats before shutdown */
10337 if (bp->bnapi && irq_re_init)
10338 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
10339 if (irq_re_init) {
10340 bnxt_free_irq(bp);
10341 bnxt_del_napi(bp);
10342 }
10343 bnxt_free_mem(bp, irq_re_init);
10344 }
10345
10346 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10347 {
10348 int rc = 0;
10349
10350 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10351 /* If we get here, it means firmware reset is in progress
10352 * while we are trying to close. We can safely proceed with
10353 * the close because we are holding rtnl_lock(). Some firmware
10354 * messages may fail as we proceed to close. We set the
10355 * ABORT_ERR flag here so that the FW reset thread will later
10356 * abort when it gets the rtnl_lock() and sees the flag.
10357 */
10358 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10359 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10360 }
10361
10362 #ifdef CONFIG_BNXT_SRIOV
10363 if (bp->sriov_cfg) {
10364 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10365 !bp->sriov_cfg,
10366 BNXT_SRIOV_CFG_WAIT_TMO);
10367 if (rc)
10368 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
10369 }
10370 #endif
10371 __bnxt_close_nic(bp, irq_re_init, link_re_init);
10372 return rc;
10373 }
10374
10375 static int bnxt_close(struct net_device *dev)
10376 {
10377 struct bnxt *bp = netdev_priv(dev);
10378
10379 bnxt_ptp_clear(bp);
10380 bnxt_hwmon_close(bp);
10381 bnxt_close_nic(bp, true, true);
10382 bnxt_hwrm_shutdown_link(bp);
10383 bnxt_hwrm_if_change(bp, false);
10384 return 0;
10385 }
10386
10387 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10388 u16 *val)
10389 {
10390 struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
10391 struct hwrm_port_phy_mdio_read_input req = {0};
10392 int rc;
10393
10394 if (bp->hwrm_spec_code < 0x10a00)
10395 return -EOPNOTSUPP;
10396
10397 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
10398 req.port_id = cpu_to_le16(bp->pf.port_id);
10399 req.phy_addr = phy_addr;
10400 req.reg_addr = cpu_to_le16(reg & 0x1f);
10401 if (mdio_phy_id_is_c45(phy_addr)) {
10402 req.cl45_mdio = 1;
10403 req.phy_addr = mdio_phy_id_prtad(phy_addr);
10404 req.dev_addr = mdio_phy_id_devad(phy_addr);
10405 req.reg_addr = cpu_to_le16(reg);
10406 }
10407
10408 mutex_lock(&bp->hwrm_cmd_lock);
10409 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10410 if (!rc)
10411 *val = le16_to_cpu(resp->reg_data);
10412 mutex_unlock(&bp->hwrm_cmd_lock);
10413 return rc;
10414 }
10415
10416 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10417 u16 val)
10418 {
10419 struct hwrm_port_phy_mdio_write_input req = {0};
10420
10421 if (bp->hwrm_spec_code < 0x10a00)
10422 return -EOPNOTSUPP;
10423
10424 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
10425 req.port_id = cpu_to_le16(bp->pf.port_id);
10426 req.phy_addr = phy_addr;
10427 req.reg_addr = cpu_to_le16(reg & 0x1f);
10428 if (mdio_phy_id_is_c45(phy_addr)) {
10429 req.cl45_mdio = 1;
10430 req.phy_addr = mdio_phy_id_prtad(phy_addr);
10431 req.dev_addr = mdio_phy_id_devad(phy_addr);
10432 req.reg_addr = cpu_to_le16(reg);
10433 }
10434 req.reg_data = cpu_to_le16(val);
10435
10436 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10437 }
10438
10439 /* rtnl_lock held */
10440 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10441 {
10442 struct mii_ioctl_data *mdio = if_mii(ifr);
10443 struct bnxt *bp = netdev_priv(dev);
10444 int rc;
10445
10446 switch (cmd) {
10447 case SIOCGMIIPHY:
10448 mdio->phy_id = bp->link_info.phy_addr;
10449
10450 fallthrough;
10451 case SIOCGMIIREG: {
10452 u16 mii_regval = 0;
10453
10454 if (!netif_running(dev))
10455 return -EAGAIN;
10456
10457 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10458 &mii_regval);
10459 mdio->val_out = mii_regval;
10460 return rc;
10461 }
10462
10463 case SIOCSMIIREG:
10464 if (!netif_running(dev))
10465 return -EAGAIN;
10466
10467 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10468 mdio->val_in);
10469
10470 case SIOCSHWTSTAMP:
10471 return bnxt_hwtstamp_set(dev, ifr);
10472
10473 case SIOCGHWTSTAMP:
10474 return bnxt_hwtstamp_get(dev, ifr);
10475
10476 default:
10477 /* do nothing */
10478 break;
10479 }
10480 return -EOPNOTSUPP;
10481 }
10482
10483 static void bnxt_get_ring_stats(struct bnxt *bp,
10484 struct rtnl_link_stats64 *stats)
10485 {
10486 int i;
10487
10488 for (i = 0; i < bp->cp_nr_rings; i++) {
10489 struct bnxt_napi *bnapi = bp->bnapi[i];
10490 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10491 u64 *sw = cpr->stats.sw_stats;
10492
10493 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
10494 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10495 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
10496
10497 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
10498 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
10499 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
10500
10501 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
10502 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
10503 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
10504
10505 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
10506 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
10507 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
10508
10509 stats->rx_missed_errors +=
10510 BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
10511
10512 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10513
10514 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
10515 }
10516 }
10517
10518 static void bnxt_add_prev_stats(struct bnxt *bp,
10519 struct rtnl_link_stats64 *stats)
10520 {
10521 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10522
10523 stats->rx_packets += prev_stats->rx_packets;
10524 stats->tx_packets += prev_stats->tx_packets;
10525 stats->rx_bytes += prev_stats->rx_bytes;
10526 stats->tx_bytes += prev_stats->tx_bytes;
10527 stats->rx_missed_errors += prev_stats->rx_missed_errors;
10528 stats->multicast += prev_stats->multicast;
10529 stats->tx_dropped += prev_stats->tx_dropped;
10530 }
10531
10532 static void
10533 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
10534 {
10535 struct bnxt *bp = netdev_priv(dev);
10536
10537 set_bit(BNXT_STATE_READ_STATS, &bp->state);
10538 /* Make sure bnxt_close_nic() sees that we are reading stats before
10539 * we check the BNXT_STATE_OPEN flag.
10540 */
10541 smp_mb__after_atomic();
10542 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10543 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10544 *stats = bp->net_stats_prev;
10545 return;
10546 }
10547
10548 bnxt_get_ring_stats(bp, stats);
10549 bnxt_add_prev_stats(bp, stats);
10550
10551 if (bp->flags & BNXT_FLAG_PORT_STATS) {
10552 u64 *rx = bp->port_stats.sw_stats;
10553 u64 *tx = bp->port_stats.sw_stats +
10554 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10555
10556 stats->rx_crc_errors =
10557 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
10558 stats->rx_frame_errors =
10559 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
10560 stats->rx_length_errors =
10561 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
10562 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
10563 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
10564 stats->rx_errors =
10565 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
10566 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
10567 stats->collisions =
10568 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
10569 stats->tx_fifo_errors =
10570 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
10571 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
10572 }
10573 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10574 }
10575
10576 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10577 {
10578 struct net_device *dev = bp->dev;
10579 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10580 struct netdev_hw_addr *ha;
10581 u8 *haddr;
10582 int mc_count = 0;
10583 bool update = false;
10584 int off = 0;
10585
10586 netdev_for_each_mc_addr(ha, dev) {
10587 if (mc_count >= BNXT_MAX_MC_ADDRS) {
10588 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10589 vnic->mc_list_count = 0;
10590 return false;
10591 }
10592 haddr = ha->addr;
10593 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
10594 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
10595 update = true;
10596 }
10597 off += ETH_ALEN;
10598 mc_count++;
10599 }
10600 if (mc_count)
10601 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
10602
10603 if (mc_count != vnic->mc_list_count) {
10604 vnic->mc_list_count = mc_count;
10605 update = true;
10606 }
10607 return update;
10608 }
10609
10610 static bool bnxt_uc_list_updated(struct bnxt *bp)
10611 {
10612 struct net_device *dev = bp->dev;
10613 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10614 struct netdev_hw_addr *ha;
10615 int off = 0;
10616
10617 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
10618 return true;
10619
10620 netdev_for_each_uc_addr(ha, dev) {
10621 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
10622 return true;
10623
10624 off += ETH_ALEN;
10625 }
10626 return false;
10627 }
10628
10629 static void bnxt_set_rx_mode(struct net_device *dev)
10630 {
10631 struct bnxt *bp = netdev_priv(dev);
10632 struct bnxt_vnic_info *vnic;
10633 bool mc_update = false;
10634 bool uc_update;
10635 u32 mask;
10636
10637 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
10638 return;
10639
10640 vnic = &bp->vnic_info[0];
10641 mask = vnic->rx_mask;
10642 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
10643 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
10644 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
10645 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
10646
10647 if (dev->flags & IFF_PROMISC)
10648 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10649
10650 uc_update = bnxt_uc_list_updated(bp);
10651
10652 if (dev->flags & IFF_BROADCAST)
10653 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
10654 if (dev->flags & IFF_ALLMULTI) {
10655 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10656 vnic->mc_list_count = 0;
10657 } else {
10658 mc_update = bnxt_mc_list_updated(bp, &mask);
10659 }
10660
10661 if (mask != vnic->rx_mask || uc_update || mc_update) {
10662 vnic->rx_mask = mask;
10663
10664 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
10665 bnxt_queue_sp_work(bp);
10666 }
10667 }
10668
10669 static int bnxt_cfg_rx_mode(struct bnxt *bp)
10670 {
10671 struct net_device *dev = bp->dev;
10672 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10673 struct netdev_hw_addr *ha;
10674 int i, off = 0, rc;
10675 bool uc_update;
10676
10677 netif_addr_lock_bh(dev);
10678 uc_update = bnxt_uc_list_updated(bp);
10679 netif_addr_unlock_bh(dev);
10680
10681 if (!uc_update)
10682 goto skip_uc;
10683
10684 mutex_lock(&bp->hwrm_cmd_lock);
10685 for (i = 1; i < vnic->uc_filter_count; i++) {
10686 struct hwrm_cfa_l2_filter_free_input req = {0};
10687
10688 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
10689 -1);
10690
10691 req.l2_filter_id = vnic->fw_l2_filter_id[i];
10692
10693 rc = _hwrm_send_message(bp, &req, sizeof(req),
10694 HWRM_CMD_TIMEOUT);
10695 }
10696 mutex_unlock(&bp->hwrm_cmd_lock);
10697
10698 vnic->uc_filter_count = 1;
10699
10700 netif_addr_lock_bh(dev);
10701 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
10702 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10703 } else {
10704 netdev_for_each_uc_addr(ha, dev) {
10705 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
10706 off += ETH_ALEN;
10707 vnic->uc_filter_count++;
10708 }
10709 }
10710 netif_addr_unlock_bh(dev);
10711
10712 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
10713 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
10714 if (rc) {
10715 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
10716 rc);
10717 vnic->uc_filter_count = i;
10718 return rc;
10719 }
10720 }
10721
10722 skip_uc:
10723 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
10724 !bnxt_promisc_ok(bp))
10725 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10726 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10727 if (rc && vnic->mc_list_count) {
10728 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
10729 rc);
10730 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10731 vnic->mc_list_count = 0;
10732 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10733 }
10734 if (rc)
10735 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
10736 rc);
10737
10738 return rc;
10739 }
10740
10741 static bool bnxt_can_reserve_rings(struct bnxt *bp)
10742 {
10743 #ifdef CONFIG_BNXT_SRIOV
10744 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
10745 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10746
10747 /* No minimum rings were provisioned by the PF. Don't
10748 * reserve rings by default when device is down.
10749 */
10750 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
10751 return true;
10752
10753 if (!netif_running(bp->dev))
10754 return false;
10755 }
10756 #endif
10757 return true;
10758 }
10759
10760 /* If the chip and firmware supports RFS */
10761 static bool bnxt_rfs_supported(struct bnxt *bp)
10762 {
10763 if (bp->flags & BNXT_FLAG_CHIP_P5) {
10764 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
10765 return true;
10766 return false;
10767 }
10768 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
10769 return true;
10770 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10771 return true;
10772 return false;
10773 }
10774
10775 /* If runtime conditions support RFS */
10776 static bool bnxt_rfs_capable(struct bnxt *bp)
10777 {
10778 #ifdef CONFIG_RFS_ACCEL
10779 int vnics, max_vnics, max_rss_ctxs;
10780
10781 if (bp->flags & BNXT_FLAG_CHIP_P5)
10782 return bnxt_rfs_supported(bp);
10783 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
10784 return false;
10785
10786 vnics = 1 + bp->rx_nr_rings;
10787 max_vnics = bnxt_get_max_func_vnics(bp);
10788 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
10789
10790 /* RSS contexts not a limiting factor */
10791 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10792 max_rss_ctxs = max_vnics;
10793 if (vnics > max_vnics || vnics > max_rss_ctxs) {
10794 if (bp->rx_nr_rings > 1)
10795 netdev_warn(bp->dev,
10796 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
10797 min(max_rss_ctxs - 1, max_vnics - 1));
10798 return false;
10799 }
10800
10801 if (!BNXT_NEW_RM(bp))
10802 return true;
10803
10804 if (vnics == bp->hw_resc.resv_vnics)
10805 return true;
10806
10807 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
10808 if (vnics <= bp->hw_resc.resv_vnics)
10809 return true;
10810
10811 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
10812 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
10813 return false;
10814 #else
10815 return false;
10816 #endif
10817 }
10818
10819 static netdev_features_t bnxt_fix_features(struct net_device *dev,
10820 netdev_features_t features)
10821 {
10822 struct bnxt *bp = netdev_priv(dev);
10823 netdev_features_t vlan_features;
10824
10825 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
10826 features &= ~NETIF_F_NTUPLE;
10827
10828 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10829 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10830
10831 if (!(features & NETIF_F_GRO))
10832 features &= ~NETIF_F_GRO_HW;
10833
10834 if (features & NETIF_F_GRO_HW)
10835 features &= ~NETIF_F_LRO;
10836
10837 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
10838 * turned on or off together.
10839 */
10840 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
10841 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
10842 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10843 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10844 else if (vlan_features)
10845 features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
10846 }
10847 #ifdef CONFIG_BNXT_SRIOV
10848 if (BNXT_VF(bp) && bp->vf.vlan)
10849 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10850 #endif
10851 return features;
10852 }
10853
10854 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
10855 {
10856 struct bnxt *bp = netdev_priv(dev);
10857 u32 flags = bp->flags;
10858 u32 changes;
10859 int rc = 0;
10860 bool re_init = false;
10861 bool update_tpa = false;
10862
10863 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
10864 if (features & NETIF_F_GRO_HW)
10865 flags |= BNXT_FLAG_GRO;
10866 else if (features & NETIF_F_LRO)
10867 flags |= BNXT_FLAG_LRO;
10868
10869 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10870 flags &= ~BNXT_FLAG_TPA;
10871
10872 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10873 flags |= BNXT_FLAG_STRIP_VLAN;
10874
10875 if (features & NETIF_F_NTUPLE)
10876 flags |= BNXT_FLAG_RFS;
10877
10878 changes = flags ^ bp->flags;
10879 if (changes & BNXT_FLAG_TPA) {
10880 update_tpa = true;
10881 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
10882 (flags & BNXT_FLAG_TPA) == 0 ||
10883 (bp->flags & BNXT_FLAG_CHIP_P5))
10884 re_init = true;
10885 }
10886
10887 if (changes & ~BNXT_FLAG_TPA)
10888 re_init = true;
10889
10890 if (flags != bp->flags) {
10891 u32 old_flags = bp->flags;
10892
10893 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10894 bp->flags = flags;
10895 if (update_tpa)
10896 bnxt_set_ring_params(bp);
10897 return rc;
10898 }
10899
10900 if (re_init) {
10901 bnxt_close_nic(bp, false, false);
10902 bp->flags = flags;
10903 if (update_tpa)
10904 bnxt_set_ring_params(bp);
10905
10906 return bnxt_open_nic(bp, false, false);
10907 }
10908 if (update_tpa) {
10909 bp->flags = flags;
10910 rc = bnxt_set_tpa(bp,
10911 (flags & BNXT_FLAG_TPA) ?
10912 true : false);
10913 if (rc)
10914 bp->flags = old_flags;
10915 }
10916 }
10917 return rc;
10918 }
10919
10920 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
10921 u8 **nextp)
10922 {
10923 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
10924 int hdr_count = 0;
10925 u8 *nexthdr;
10926 int start;
10927
10928 /* Check that there are at most 2 IPv6 extension headers, no
10929 * fragment header, and each is <= 64 bytes.
10930 */
10931 start = nw_off + sizeof(*ip6h);
10932 nexthdr = &ip6h->nexthdr;
10933 while (ipv6_ext_hdr(*nexthdr)) {
10934 struct ipv6_opt_hdr *hp;
10935 int hdrlen;
10936
10937 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
10938 *nexthdr == NEXTHDR_FRAGMENT)
10939 return false;
10940 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
10941 skb_headlen(skb), NULL);
10942 if (!hp)
10943 return false;
10944 if (*nexthdr == NEXTHDR_AUTH)
10945 hdrlen = ipv6_authlen(hp);
10946 else
10947 hdrlen = ipv6_optlen(hp);
10948
10949 if (hdrlen > 64)
10950 return false;
10951 nexthdr = &hp->nexthdr;
10952 start += hdrlen;
10953 hdr_count++;
10954 }
10955 if (nextp) {
10956 /* Caller will check inner protocol */
10957 if (skb->encapsulation) {
10958 *nextp = nexthdr;
10959 return true;
10960 }
10961 *nextp = NULL;
10962 }
10963 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
10964 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
10965 }
10966
10967 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
10968 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
10969 {
10970 struct udphdr *uh = udp_hdr(skb);
10971 __be16 udp_port = uh->dest;
10972
10973 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
10974 return false;
10975 if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
10976 struct ethhdr *eh = inner_eth_hdr(skb);
10977
10978 switch (eh->h_proto) {
10979 case htons(ETH_P_IP):
10980 return true;
10981 case htons(ETH_P_IPV6):
10982 return bnxt_exthdr_check(bp, skb,
10983 skb_inner_network_offset(skb),
10984 NULL);
10985 }
10986 }
10987 return false;
10988 }
10989
10990 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
10991 {
10992 switch (l4_proto) {
10993 case IPPROTO_UDP:
10994 return bnxt_udp_tunl_check(bp, skb);
10995 case IPPROTO_IPIP:
10996 return true;
10997 case IPPROTO_GRE: {
10998 switch (skb->inner_protocol) {
10999 default:
11000 return false;
11001 case htons(ETH_P_IP):
11002 return true;
11003 case htons(ETH_P_IPV6):
11004 fallthrough;
11005 }
11006 }
11007 case IPPROTO_IPV6:
11008 /* Check ext headers of inner ipv6 */
11009 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
11010 NULL);
11011 }
11012 return false;
11013 }
11014
11015 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
11016 struct net_device *dev,
11017 netdev_features_t features)
11018 {
11019 struct bnxt *bp = netdev_priv(dev);
11020 u8 *l4_proto;
11021
11022 features = vlan_features_check(skb, features);
11023 switch (vlan_get_protocol(skb)) {
11024 case htons(ETH_P_IP):
11025 if (!skb->encapsulation)
11026 return features;
11027 l4_proto = &ip_hdr(skb)->protocol;
11028 if (bnxt_tunl_check(bp, skb, *l4_proto))
11029 return features;
11030 break;
11031 case htons(ETH_P_IPV6):
11032 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
11033 &l4_proto))
11034 break;
11035 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
11036 return features;
11037 break;
11038 }
11039 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11040 }
11041
11042 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
11043 u32 *reg_buf)
11044 {
11045 struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr;
11046 struct hwrm_dbg_read_direct_input req = {0};
11047 __le32 *dbg_reg_buf;
11048 dma_addr_t mapping;
11049 int rc, i;
11050
11051 dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4,
11052 &mapping, GFP_KERNEL);
11053 if (!dbg_reg_buf)
11054 return -ENOMEM;
11055 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1);
11056 req.host_dest_addr = cpu_to_le64(mapping);
11057 req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
11058 req.read_len32 = cpu_to_le32(num_words);
11059 mutex_lock(&bp->hwrm_cmd_lock);
11060 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11061 if (rc || resp->error_code) {
11062 rc = -EIO;
11063 goto dbg_rd_reg_exit;
11064 }
11065 for (i = 0; i < num_words; i++)
11066 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
11067
11068 dbg_rd_reg_exit:
11069 mutex_unlock(&bp->hwrm_cmd_lock);
11070 dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping);
11071 return rc;
11072 }
11073
11074 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
11075 u32 ring_id, u32 *prod, u32 *cons)
11076 {
11077 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
11078 struct hwrm_dbg_ring_info_get_input req = {0};
11079 int rc;
11080
11081 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
11082 req.ring_type = ring_type;
11083 req.fw_ring_id = cpu_to_le32(ring_id);
11084 mutex_lock(&bp->hwrm_cmd_lock);
11085 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11086 if (!rc) {
11087 *prod = le32_to_cpu(resp->producer_index);
11088 *cons = le32_to_cpu(resp->consumer_index);
11089 }
11090 mutex_unlock(&bp->hwrm_cmd_lock);
11091 return rc;
11092 }
11093
11094 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
11095 {
11096 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
11097 int i = bnapi->index;
11098
11099 if (!txr)
11100 return;
11101
11102 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
11103 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
11104 txr->tx_cons);
11105 }
11106
11107 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
11108 {
11109 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
11110 int i = bnapi->index;
11111
11112 if (!rxr)
11113 return;
11114
11115 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
11116 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
11117 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
11118 rxr->rx_sw_agg_prod);
11119 }
11120
11121 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
11122 {
11123 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
11124 int i = bnapi->index;
11125
11126 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
11127 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
11128 }
11129
11130 static void bnxt_dbg_dump_states(struct bnxt *bp)
11131 {
11132 int i;
11133 struct bnxt_napi *bnapi;
11134
11135 for (i = 0; i < bp->cp_nr_rings; i++) {
11136 bnapi = bp->bnapi[i];
11137 if (netif_msg_drv(bp)) {
11138 bnxt_dump_tx_sw_state(bnapi);
11139 bnxt_dump_rx_sw_state(bnapi);
11140 bnxt_dump_cp_sw_state(bnapi);
11141 }
11142 }
11143 }
11144
11145 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
11146 {
11147 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
11148 struct hwrm_ring_reset_input req = {0};
11149 struct bnxt_napi *bnapi = rxr->bnapi;
11150 struct bnxt_cp_ring_info *cpr;
11151 u16 cp_ring_id;
11152
11153 cpr = &bnapi->cp_ring;
11154 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
11155 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_RESET, cp_ring_id, -1);
11156 req.ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
11157 req.ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
11158 return hwrm_send_message_silent(bp, &req, sizeof(req),
11159 HWRM_CMD_TIMEOUT);
11160 }
11161
11162 static void bnxt_reset_task(struct bnxt *bp, bool silent)
11163 {
11164 if (!silent)
11165 bnxt_dbg_dump_states(bp);
11166 if (netif_running(bp->dev)) {
11167 int rc;
11168
11169 if (silent) {
11170 bnxt_close_nic(bp, false, false);
11171 bnxt_open_nic(bp, false, false);
11172 } else {
11173 bnxt_ulp_stop(bp);
11174 bnxt_close_nic(bp, true, false);
11175 rc = bnxt_open_nic(bp, true, false);
11176 bnxt_ulp_start(bp, rc);
11177 }
11178 }
11179 }
11180
11181 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
11182 {
11183 struct bnxt *bp = netdev_priv(dev);
11184
11185 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
11186 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
11187 bnxt_queue_sp_work(bp);
11188 }
11189
11190 static void bnxt_fw_health_check(struct bnxt *bp)
11191 {
11192 struct bnxt_fw_health *fw_health = bp->fw_health;
11193 u32 val;
11194
11195 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11196 return;
11197
11198 if (fw_health->tmr_counter) {
11199 fw_health->tmr_counter--;
11200 return;
11201 }
11202
11203 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11204 if (val == fw_health->last_fw_heartbeat)
11205 goto fw_reset;
11206
11207 fw_health->last_fw_heartbeat = val;
11208
11209 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11210 if (val != fw_health->last_fw_reset_cnt)
11211 goto fw_reset;
11212
11213 fw_health->tmr_counter = fw_health->tmr_multiplier;
11214 return;
11215
11216 fw_reset:
11217 set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
11218 bnxt_queue_sp_work(bp);
11219 }
11220
11221 static void bnxt_timer(struct timer_list *t)
11222 {
11223 struct bnxt *bp = from_timer(bp, t, timer);
11224 struct net_device *dev = bp->dev;
11225
11226 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
11227 return;
11228
11229 if (atomic_read(&bp->intr_sem) != 0)
11230 goto bnxt_restart_timer;
11231
11232 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
11233 bnxt_fw_health_check(bp);
11234
11235 if (bp->link_info.link_up && bp->stats_coal_ticks) {
11236 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
11237 bnxt_queue_sp_work(bp);
11238 }
11239
11240 if (bnxt_tc_flower_enabled(bp)) {
11241 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
11242 bnxt_queue_sp_work(bp);
11243 }
11244
11245 #ifdef CONFIG_RFS_ACCEL
11246 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
11247 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11248 bnxt_queue_sp_work(bp);
11249 }
11250 #endif /*CONFIG_RFS_ACCEL*/
11251
11252 if (bp->link_info.phy_retry) {
11253 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
11254 bp->link_info.phy_retry = false;
11255 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
11256 } else {
11257 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
11258 bnxt_queue_sp_work(bp);
11259 }
11260 }
11261
11262 if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
11263 netif_carrier_ok(dev)) {
11264 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
11265 bnxt_queue_sp_work(bp);
11266 }
11267 bnxt_restart_timer:
11268 mod_timer(&bp->timer, jiffies + bp->current_interval);
11269 }
11270
11271 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
11272 {
11273 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
11274 * set. If the device is being closed, bnxt_close() may be holding
11275 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
11276 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
11277 */
11278 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11279 rtnl_lock();
11280 }
11281
11282 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
11283 {
11284 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11285 rtnl_unlock();
11286 }
11287
11288 /* Only called from bnxt_sp_task() */
11289 static void bnxt_reset(struct bnxt *bp, bool silent)
11290 {
11291 bnxt_rtnl_lock_sp(bp);
11292 if (test_bit(BNXT_STATE_OPEN, &bp->state))
11293 bnxt_reset_task(bp, silent);
11294 bnxt_rtnl_unlock_sp(bp);
11295 }
11296
11297 /* Only called from bnxt_sp_task() */
11298 static void bnxt_rx_ring_reset(struct bnxt *bp)
11299 {
11300 int i;
11301
11302 bnxt_rtnl_lock_sp(bp);
11303 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11304 bnxt_rtnl_unlock_sp(bp);
11305 return;
11306 }
11307 /* Disable and flush TPA before resetting the RX ring */
11308 if (bp->flags & BNXT_FLAG_TPA)
11309 bnxt_set_tpa(bp, false);
11310 for (i = 0; i < bp->rx_nr_rings; i++) {
11311 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
11312 struct bnxt_cp_ring_info *cpr;
11313 int rc;
11314
11315 if (!rxr->bnapi->in_reset)
11316 continue;
11317
11318 rc = bnxt_hwrm_rx_ring_reset(bp, i);
11319 if (rc) {
11320 if (rc == -EINVAL || rc == -EOPNOTSUPP)
11321 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
11322 else
11323 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
11324 rc);
11325 bnxt_reset_task(bp, true);
11326 break;
11327 }
11328 bnxt_free_one_rx_ring_skbs(bp, i);
11329 rxr->rx_prod = 0;
11330 rxr->rx_agg_prod = 0;
11331 rxr->rx_sw_agg_prod = 0;
11332 rxr->rx_next_cons = 0;
11333 rxr->bnapi->in_reset = false;
11334 bnxt_alloc_one_rx_ring(bp, i);
11335 cpr = &rxr->bnapi->cp_ring;
11336 cpr->sw_stats.rx.rx_resets++;
11337 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11338 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
11339 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
11340 }
11341 if (bp->flags & BNXT_FLAG_TPA)
11342 bnxt_set_tpa(bp, true);
11343 bnxt_rtnl_unlock_sp(bp);
11344 }
11345
11346 static void bnxt_fw_reset_close(struct bnxt *bp)
11347 {
11348 bnxt_ulp_stop(bp);
11349 /* When firmware is in fatal state, quiesce device and disable
11350 * bus master to prevent any potential bad DMAs before freeing
11351 * kernel memory.
11352 */
11353 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
11354 u16 val = 0;
11355
11356 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
11357 if (val == 0xffff)
11358 bp->fw_reset_min_dsecs = 0;
11359 bnxt_tx_disable(bp);
11360 bnxt_disable_napi(bp);
11361 bnxt_disable_int_sync(bp);
11362 bnxt_free_irq(bp);
11363 bnxt_clear_int_mode(bp);
11364 pci_disable_device(bp->pdev);
11365 }
11366 bnxt_ptp_clear(bp);
11367 __bnxt_close_nic(bp, true, false);
11368 bnxt_vf_reps_free(bp);
11369 bnxt_clear_int_mode(bp);
11370 bnxt_hwrm_func_drv_unrgtr(bp);
11371 if (pci_is_enabled(bp->pdev))
11372 pci_disable_device(bp->pdev);
11373 bnxt_free_ctx_mem(bp);
11374 kfree(bp->ctx);
11375 bp->ctx = NULL;
11376 }
11377
11378 static bool is_bnxt_fw_ok(struct bnxt *bp)
11379 {
11380 struct bnxt_fw_health *fw_health = bp->fw_health;
11381 bool no_heartbeat = false, has_reset = false;
11382 u32 val;
11383
11384 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11385 if (val == fw_health->last_fw_heartbeat)
11386 no_heartbeat = true;
11387
11388 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11389 if (val != fw_health->last_fw_reset_cnt)
11390 has_reset = true;
11391
11392 if (!no_heartbeat && has_reset)
11393 return true;
11394
11395 return false;
11396 }
11397
11398 /* rtnl_lock is acquired before calling this function */
11399 static void bnxt_force_fw_reset(struct bnxt *bp)
11400 {
11401 struct bnxt_fw_health *fw_health = bp->fw_health;
11402 u32 wait_dsecs;
11403
11404 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
11405 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11406 return;
11407
11408 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11409 bnxt_fw_reset_close(bp);
11410 wait_dsecs = fw_health->master_func_wait_dsecs;
11411 if (fw_health->master) {
11412 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
11413 wait_dsecs = 0;
11414 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11415 } else {
11416 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
11417 wait_dsecs = fw_health->normal_func_wait_dsecs;
11418 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11419 }
11420
11421 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
11422 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
11423 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11424 }
11425
11426 void bnxt_fw_exception(struct bnxt *bp)
11427 {
11428 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
11429 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11430 bnxt_rtnl_lock_sp(bp);
11431 bnxt_force_fw_reset(bp);
11432 bnxt_rtnl_unlock_sp(bp);
11433 }
11434
11435 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
11436 * < 0 on error.
11437 */
11438 static int bnxt_get_registered_vfs(struct bnxt *bp)
11439 {
11440 #ifdef CONFIG_BNXT_SRIOV
11441 int rc;
11442
11443 if (!BNXT_PF(bp))
11444 return 0;
11445
11446 rc = bnxt_hwrm_func_qcfg(bp);
11447 if (rc) {
11448 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
11449 return rc;
11450 }
11451 if (bp->pf.registered_vfs)
11452 return bp->pf.registered_vfs;
11453 if (bp->sriov_cfg)
11454 return 1;
11455 #endif
11456 return 0;
11457 }
11458
11459 void bnxt_fw_reset(struct bnxt *bp)
11460 {
11461 bnxt_rtnl_lock_sp(bp);
11462 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11463 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11464 int n = 0, tmo;
11465
11466 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11467 if (bp->pf.active_vfs &&
11468 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11469 n = bnxt_get_registered_vfs(bp);
11470 if (n < 0) {
11471 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11472 n);
11473 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11474 dev_close(bp->dev);
11475 goto fw_reset_exit;
11476 } else if (n > 0) {
11477 u16 vf_tmo_dsecs = n * 10;
11478
11479 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11480 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11481 bp->fw_reset_state =
11482 BNXT_FW_RESET_STATE_POLL_VF;
11483 bnxt_queue_fw_reset_work(bp, HZ / 10);
11484 goto fw_reset_exit;
11485 }
11486 bnxt_fw_reset_close(bp);
11487 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11488 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11489 tmo = HZ / 10;
11490 } else {
11491 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11492 tmo = bp->fw_reset_min_dsecs * HZ / 10;
11493 }
11494 bnxt_queue_fw_reset_work(bp, tmo);
11495 }
11496 fw_reset_exit:
11497 bnxt_rtnl_unlock_sp(bp);
11498 }
11499
11500 static void bnxt_chk_missed_irq(struct bnxt *bp)
11501 {
11502 int i;
11503
11504 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11505 return;
11506
11507 for (i = 0; i < bp->cp_nr_rings; i++) {
11508 struct bnxt_napi *bnapi = bp->bnapi[i];
11509 struct bnxt_cp_ring_info *cpr;
11510 u32 fw_ring_id;
11511 int j;
11512
11513 if (!bnapi)
11514 continue;
11515
11516 cpr = &bnapi->cp_ring;
11517 for (j = 0; j < 2; j++) {
11518 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
11519 u32 val[2];
11520
11521 if (!cpr2 || cpr2->has_more_work ||
11522 !bnxt_has_work(bp, cpr2))
11523 continue;
11524
11525 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
11526 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
11527 continue;
11528 }
11529 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
11530 bnxt_dbg_hwrm_ring_info_get(bp,
11531 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
11532 fw_ring_id, &val[0], &val[1]);
11533 cpr->sw_stats.cmn.missed_irqs++;
11534 }
11535 }
11536 }
11537
11538 static void bnxt_cfg_ntp_filters(struct bnxt *);
11539
11540 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
11541 {
11542 struct bnxt_link_info *link_info = &bp->link_info;
11543
11544 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
11545 link_info->autoneg = BNXT_AUTONEG_SPEED;
11546 if (bp->hwrm_spec_code >= 0x10201) {
11547 if (link_info->auto_pause_setting &
11548 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
11549 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11550 } else {
11551 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11552 }
11553 link_info->advertising = link_info->auto_link_speeds;
11554 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
11555 } else {
11556 link_info->req_link_speed = link_info->force_link_speed;
11557 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
11558 if (link_info->force_pam4_link_speed) {
11559 link_info->req_link_speed =
11560 link_info->force_pam4_link_speed;
11561 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
11562 }
11563 link_info->req_duplex = link_info->duplex_setting;
11564 }
11565 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
11566 link_info->req_flow_ctrl =
11567 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
11568 else
11569 link_info->req_flow_ctrl = link_info->force_pause_setting;
11570 }
11571
11572 static void bnxt_fw_echo_reply(struct bnxt *bp)
11573 {
11574 struct bnxt_fw_health *fw_health = bp->fw_health;
11575 struct hwrm_func_echo_response_input req = {0};
11576
11577 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_ECHO_RESPONSE, -1, -1);
11578 req.event_data1 = cpu_to_le32(fw_health->echo_req_data1);
11579 req.event_data2 = cpu_to_le32(fw_health->echo_req_data2);
11580 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11581 }
11582
11583 static void bnxt_sp_task(struct work_struct *work)
11584 {
11585 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
11586
11587 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11588 smp_mb__after_atomic();
11589 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11590 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11591 return;
11592 }
11593
11594 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
11595 bnxt_cfg_rx_mode(bp);
11596
11597 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
11598 bnxt_cfg_ntp_filters(bp);
11599 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
11600 bnxt_hwrm_exec_fwd_req(bp);
11601 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
11602 bnxt_hwrm_port_qstats(bp, 0);
11603 bnxt_hwrm_port_qstats_ext(bp, 0);
11604 bnxt_accumulate_all_stats(bp);
11605 }
11606
11607 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
11608 int rc;
11609
11610 mutex_lock(&bp->link_lock);
11611 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
11612 &bp->sp_event))
11613 bnxt_hwrm_phy_qcaps(bp);
11614
11615 rc = bnxt_update_link(bp, true);
11616 if (rc)
11617 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
11618 rc);
11619
11620 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
11621 &bp->sp_event))
11622 bnxt_init_ethtool_link_settings(bp);
11623 mutex_unlock(&bp->link_lock);
11624 }
11625 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
11626 int rc;
11627
11628 mutex_lock(&bp->link_lock);
11629 rc = bnxt_update_phy_setting(bp);
11630 mutex_unlock(&bp->link_lock);
11631 if (rc) {
11632 netdev_warn(bp->dev, "update phy settings retry failed\n");
11633 } else {
11634 bp->link_info.phy_retry = false;
11635 netdev_info(bp->dev, "update phy settings retry succeeded\n");
11636 }
11637 }
11638 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
11639 mutex_lock(&bp->link_lock);
11640 bnxt_get_port_module_status(bp);
11641 mutex_unlock(&bp->link_lock);
11642 }
11643
11644 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
11645 bnxt_tc_flow_stats_work(bp);
11646
11647 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
11648 bnxt_chk_missed_irq(bp);
11649
11650 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
11651 bnxt_fw_echo_reply(bp);
11652
11653 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
11654 * must be the last functions to be called before exiting.
11655 */
11656 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
11657 bnxt_reset(bp, false);
11658
11659 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
11660 bnxt_reset(bp, true);
11661
11662 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
11663 bnxt_rx_ring_reset(bp);
11664
11665 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
11666 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
11667
11668 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
11669 if (!is_bnxt_fw_ok(bp))
11670 bnxt_devlink_health_report(bp,
11671 BNXT_FW_EXCEPTION_SP_EVENT);
11672 }
11673
11674 smp_mb__before_atomic();
11675 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11676 }
11677
11678 /* Under rtnl_lock */
11679 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
11680 int tx_xdp)
11681 {
11682 int max_rx, max_tx, tx_sets = 1;
11683 int tx_rings_needed, stats;
11684 int rx_rings = rx;
11685 int cp, vnics, rc;
11686
11687 if (tcs)
11688 tx_sets = tcs;
11689
11690 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
11691 if (rc)
11692 return rc;
11693
11694 if (max_rx < rx)
11695 return -ENOMEM;
11696
11697 tx_rings_needed = tx * tx_sets + tx_xdp;
11698 if (max_tx < tx_rings_needed)
11699 return -ENOMEM;
11700
11701 vnics = 1;
11702 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
11703 vnics += rx_rings;
11704
11705 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11706 rx_rings <<= 1;
11707 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
11708 stats = cp;
11709 if (BNXT_NEW_RM(bp)) {
11710 cp += bnxt_get_ulp_msix_num(bp);
11711 stats += bnxt_get_ulp_stat_ctxs(bp);
11712 }
11713 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
11714 stats, vnics);
11715 }
11716
11717 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
11718 {
11719 if (bp->bar2) {
11720 pci_iounmap(pdev, bp->bar2);
11721 bp->bar2 = NULL;
11722 }
11723
11724 if (bp->bar1) {
11725 pci_iounmap(pdev, bp->bar1);
11726 bp->bar1 = NULL;
11727 }
11728
11729 if (bp->bar0) {
11730 pci_iounmap(pdev, bp->bar0);
11731 bp->bar0 = NULL;
11732 }
11733 }
11734
11735 static void bnxt_cleanup_pci(struct bnxt *bp)
11736 {
11737 bnxt_unmap_bars(bp, bp->pdev);
11738 pci_release_regions(bp->pdev);
11739 if (pci_is_enabled(bp->pdev))
11740 pci_disable_device(bp->pdev);
11741 }
11742
11743 static void bnxt_init_dflt_coal(struct bnxt *bp)
11744 {
11745 struct bnxt_coal *coal;
11746
11747 /* Tick values in micro seconds.
11748 * 1 coal_buf x bufs_per_record = 1 completion record.
11749 */
11750 coal = &bp->rx_coal;
11751 coal->coal_ticks = 10;
11752 coal->coal_bufs = 30;
11753 coal->coal_ticks_irq = 1;
11754 coal->coal_bufs_irq = 2;
11755 coal->idle_thresh = 50;
11756 coal->bufs_per_record = 2;
11757 coal->budget = 64; /* NAPI budget */
11758
11759 coal = &bp->tx_coal;
11760 coal->coal_ticks = 28;
11761 coal->coal_bufs = 30;
11762 coal->coal_ticks_irq = 2;
11763 coal->coal_bufs_irq = 2;
11764 coal->bufs_per_record = 1;
11765
11766 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
11767 }
11768
11769 static int bnxt_fw_init_one_p1(struct bnxt *bp)
11770 {
11771 int rc;
11772
11773 bp->fw_cap = 0;
11774 rc = bnxt_hwrm_ver_get(bp);
11775 bnxt_try_map_fw_health_reg(bp);
11776 if (rc) {
11777 rc = bnxt_try_recover_fw(bp);
11778 if (rc)
11779 return rc;
11780 rc = bnxt_hwrm_ver_get(bp);
11781 if (rc)
11782 return rc;
11783 }
11784
11785 if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
11786 rc = bnxt_alloc_kong_hwrm_resources(bp);
11787 if (rc)
11788 bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
11789 }
11790
11791 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
11792 bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
11793 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
11794 if (rc)
11795 return rc;
11796 }
11797 bnxt_nvm_cfg_ver_get(bp);
11798
11799 rc = bnxt_hwrm_func_reset(bp);
11800 if (rc)
11801 return -ENODEV;
11802
11803 bnxt_hwrm_fw_set_time(bp);
11804 return 0;
11805 }
11806
11807 static int bnxt_fw_init_one_p2(struct bnxt *bp)
11808 {
11809 int rc;
11810
11811 /* Get the MAX capabilities for this function */
11812 rc = bnxt_hwrm_func_qcaps(bp);
11813 if (rc) {
11814 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
11815 rc);
11816 return -ENODEV;
11817 }
11818
11819 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
11820 if (rc)
11821 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
11822 rc);
11823
11824 if (bnxt_alloc_fw_health(bp)) {
11825 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
11826 } else {
11827 rc = bnxt_hwrm_error_recovery_qcfg(bp);
11828 if (rc)
11829 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
11830 rc);
11831 }
11832
11833 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
11834 if (rc)
11835 return -ENODEV;
11836
11837 bnxt_hwrm_func_qcfg(bp);
11838 bnxt_hwrm_vnic_qcaps(bp);
11839 bnxt_hwrm_port_led_qcaps(bp);
11840 bnxt_ethtool_init(bp);
11841 bnxt_dcb_init(bp);
11842 return 0;
11843 }
11844
11845 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
11846 {
11847 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
11848 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
11849 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
11850 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
11851 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
11852 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
11853 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
11854 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
11855 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
11856 }
11857 }
11858
11859 static void bnxt_set_dflt_rfs(struct bnxt *bp)
11860 {
11861 struct net_device *dev = bp->dev;
11862
11863 dev->hw_features &= ~NETIF_F_NTUPLE;
11864 dev->features &= ~NETIF_F_NTUPLE;
11865 bp->flags &= ~BNXT_FLAG_RFS;
11866 if (bnxt_rfs_supported(bp)) {
11867 dev->hw_features |= NETIF_F_NTUPLE;
11868 if (bnxt_rfs_capable(bp)) {
11869 bp->flags |= BNXT_FLAG_RFS;
11870 dev->features |= NETIF_F_NTUPLE;
11871 }
11872 }
11873 }
11874
11875 static void bnxt_fw_init_one_p3(struct bnxt *bp)
11876 {
11877 struct pci_dev *pdev = bp->pdev;
11878
11879 bnxt_set_dflt_rss_hash_type(bp);
11880 bnxt_set_dflt_rfs(bp);
11881
11882 bnxt_get_wol_settings(bp);
11883 if (bp->flags & BNXT_FLAG_WOL_CAP)
11884 device_set_wakeup_enable(&pdev->dev, bp->wol);
11885 else
11886 device_set_wakeup_capable(&pdev->dev, false);
11887
11888 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
11889 bnxt_hwrm_coal_params_qcaps(bp);
11890 }
11891
11892 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
11893
11894 static int bnxt_fw_init_one(struct bnxt *bp)
11895 {
11896 int rc;
11897
11898 rc = bnxt_fw_init_one_p1(bp);
11899 if (rc) {
11900 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
11901 return rc;
11902 }
11903 rc = bnxt_fw_init_one_p2(bp);
11904 if (rc) {
11905 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
11906 return rc;
11907 }
11908 rc = bnxt_probe_phy(bp, false);
11909 if (rc)
11910 return rc;
11911 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
11912 if (rc)
11913 return rc;
11914
11915 /* In case fw capabilities have changed, destroy the unneeded
11916 * reporters and create newly capable ones.
11917 */
11918 bnxt_dl_fw_reporters_destroy(bp, false);
11919 bnxt_dl_fw_reporters_create(bp);
11920 bnxt_fw_init_one_p3(bp);
11921 return 0;
11922 }
11923
11924 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
11925 {
11926 struct bnxt_fw_health *fw_health = bp->fw_health;
11927 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
11928 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
11929 u32 reg_type, reg_off, delay_msecs;
11930
11931 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
11932 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
11933 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
11934 switch (reg_type) {
11935 case BNXT_FW_HEALTH_REG_TYPE_CFG:
11936 pci_write_config_dword(bp->pdev, reg_off, val);
11937 break;
11938 case BNXT_FW_HEALTH_REG_TYPE_GRC:
11939 writel(reg_off & BNXT_GRC_BASE_MASK,
11940 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
11941 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
11942 fallthrough;
11943 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
11944 writel(val, bp->bar0 + reg_off);
11945 break;
11946 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
11947 writel(val, bp->bar1 + reg_off);
11948 break;
11949 }
11950 if (delay_msecs) {
11951 pci_read_config_dword(bp->pdev, 0, &val);
11952 msleep(delay_msecs);
11953 }
11954 }
11955
11956 static void bnxt_reset_all(struct bnxt *bp)
11957 {
11958 struct bnxt_fw_health *fw_health = bp->fw_health;
11959 int i, rc;
11960
11961 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11962 bnxt_fw_reset_via_optee(bp);
11963 bp->fw_reset_timestamp = jiffies;
11964 return;
11965 }
11966
11967 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
11968 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
11969 bnxt_fw_reset_writel(bp, i);
11970 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
11971 struct hwrm_fw_reset_input req = {0};
11972
11973 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
11974 req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
11975 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
11976 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
11977 req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
11978 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11979 if (rc != -ENODEV)
11980 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
11981 }
11982 bp->fw_reset_timestamp = jiffies;
11983 }
11984
11985 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
11986 {
11987 return time_after(jiffies, bp->fw_reset_timestamp +
11988 (bp->fw_reset_max_dsecs * HZ / 10));
11989 }
11990
11991 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
11992 {
11993 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11994 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
11995 bnxt_ulp_start(bp, rc);
11996 bnxt_dl_health_status_update(bp, false);
11997 }
11998 bp->fw_reset_state = 0;
11999 dev_close(bp->dev);
12000 }
12001
12002 static void bnxt_fw_reset_task(struct work_struct *work)
12003 {
12004 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
12005 int rc = 0;
12006
12007 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12008 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
12009 return;
12010 }
12011
12012 switch (bp->fw_reset_state) {
12013 case BNXT_FW_RESET_STATE_POLL_VF: {
12014 int n = bnxt_get_registered_vfs(bp);
12015 int tmo;
12016
12017 if (n < 0) {
12018 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
12019 n, jiffies_to_msecs(jiffies -
12020 bp->fw_reset_timestamp));
12021 goto fw_reset_abort;
12022 } else if (n > 0) {
12023 if (bnxt_fw_reset_timeout(bp)) {
12024 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12025 bp->fw_reset_state = 0;
12026 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
12027 n);
12028 return;
12029 }
12030 bnxt_queue_fw_reset_work(bp, HZ / 10);
12031 return;
12032 }
12033 bp->fw_reset_timestamp = jiffies;
12034 rtnl_lock();
12035 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
12036 bnxt_fw_reset_abort(bp, rc);
12037 rtnl_unlock();
12038 return;
12039 }
12040 bnxt_fw_reset_close(bp);
12041 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12042 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
12043 tmo = HZ / 10;
12044 } else {
12045 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12046 tmo = bp->fw_reset_min_dsecs * HZ / 10;
12047 }
12048 rtnl_unlock();
12049 bnxt_queue_fw_reset_work(bp, tmo);
12050 return;
12051 }
12052 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
12053 u32 val;
12054
12055 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12056 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
12057 !bnxt_fw_reset_timeout(bp)) {
12058 bnxt_queue_fw_reset_work(bp, HZ / 5);
12059 return;
12060 }
12061
12062 if (!bp->fw_health->master) {
12063 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
12064
12065 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12066 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
12067 return;
12068 }
12069 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
12070 }
12071 fallthrough;
12072 case BNXT_FW_RESET_STATE_RESET_FW:
12073 bnxt_reset_all(bp);
12074 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12075 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
12076 return;
12077 case BNXT_FW_RESET_STATE_ENABLE_DEV:
12078 bnxt_inv_fw_health_reg(bp);
12079 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
12080 !bp->fw_reset_min_dsecs) {
12081 u16 val;
12082
12083 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
12084 if (val == 0xffff) {
12085 if (bnxt_fw_reset_timeout(bp)) {
12086 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
12087 rc = -ETIMEDOUT;
12088 goto fw_reset_abort;
12089 }
12090 bnxt_queue_fw_reset_work(bp, HZ / 1000);
12091 return;
12092 }
12093 }
12094 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
12095 if (pci_enable_device(bp->pdev)) {
12096 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
12097 rc = -ENODEV;
12098 goto fw_reset_abort;
12099 }
12100 pci_set_master(bp->pdev);
12101 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
12102 fallthrough;
12103 case BNXT_FW_RESET_STATE_POLL_FW:
12104 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
12105 rc = __bnxt_hwrm_ver_get(bp, true);
12106 if (rc) {
12107 if (bnxt_fw_reset_timeout(bp)) {
12108 netdev_err(bp->dev, "Firmware reset aborted\n");
12109 goto fw_reset_abort_status;
12110 }
12111 bnxt_queue_fw_reset_work(bp, HZ / 5);
12112 return;
12113 }
12114 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
12115 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
12116 fallthrough;
12117 case BNXT_FW_RESET_STATE_OPENING:
12118 while (!rtnl_trylock()) {
12119 bnxt_queue_fw_reset_work(bp, HZ / 10);
12120 return;
12121 }
12122 rc = bnxt_open(bp->dev);
12123 if (rc) {
12124 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
12125 bnxt_fw_reset_abort(bp, rc);
12126 rtnl_unlock();
12127 return;
12128 }
12129
12130 bp->fw_reset_state = 0;
12131 /* Make sure fw_reset_state is 0 before clearing the flag */
12132 smp_mb__before_atomic();
12133 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12134 bnxt_ulp_start(bp, 0);
12135 bnxt_reenable_sriov(bp);
12136 bnxt_vf_reps_alloc(bp);
12137 bnxt_vf_reps_open(bp);
12138 bnxt_dl_health_recovery_done(bp);
12139 bnxt_dl_health_status_update(bp, true);
12140 rtnl_unlock();
12141 break;
12142 }
12143 return;
12144
12145 fw_reset_abort_status:
12146 if (bp->fw_health->status_reliable ||
12147 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
12148 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12149
12150 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
12151 }
12152 fw_reset_abort:
12153 rtnl_lock();
12154 bnxt_fw_reset_abort(bp, rc);
12155 rtnl_unlock();
12156 }
12157
12158 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
12159 {
12160 int rc;
12161 struct bnxt *bp = netdev_priv(dev);
12162
12163 SET_NETDEV_DEV(dev, &pdev->dev);
12164
12165 /* enable device (incl. PCI PM wakeup), and bus-mastering */
12166 rc = pci_enable_device(pdev);
12167 if (rc) {
12168 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
12169 goto init_err;
12170 }
12171
12172 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12173 dev_err(&pdev->dev,
12174 "Cannot find PCI device base address, aborting\n");
12175 rc = -ENODEV;
12176 goto init_err_disable;
12177 }
12178
12179 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12180 if (rc) {
12181 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
12182 goto init_err_disable;
12183 }
12184
12185 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
12186 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12187 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
12188 rc = -EIO;
12189 goto init_err_release;
12190 }
12191
12192 pci_set_master(pdev);
12193
12194 bp->dev = dev;
12195 bp->pdev = pdev;
12196
12197 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
12198 * determines the BAR size.
12199 */
12200 bp->bar0 = pci_ioremap_bar(pdev, 0);
12201 if (!bp->bar0) {
12202 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
12203 rc = -ENOMEM;
12204 goto init_err_release;
12205 }
12206
12207 bp->bar2 = pci_ioremap_bar(pdev, 4);
12208 if (!bp->bar2) {
12209 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
12210 rc = -ENOMEM;
12211 goto init_err_release;
12212 }
12213
12214 pci_enable_pcie_error_reporting(pdev);
12215
12216 INIT_WORK(&bp->sp_task, bnxt_sp_task);
12217 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
12218
12219 spin_lock_init(&bp->ntp_fltr_lock);
12220 #if BITS_PER_LONG == 32
12221 spin_lock_init(&bp->db_lock);
12222 #endif
12223
12224 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
12225 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
12226
12227 bnxt_init_dflt_coal(bp);
12228
12229 timer_setup(&bp->timer, bnxt_timer, 0);
12230 bp->current_interval = BNXT_TIMER_INTERVAL;
12231
12232 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
12233 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
12234
12235 clear_bit(BNXT_STATE_OPEN, &bp->state);
12236 return 0;
12237
12238 init_err_release:
12239 bnxt_unmap_bars(bp, pdev);
12240 pci_release_regions(pdev);
12241
12242 init_err_disable:
12243 pci_disable_device(pdev);
12244
12245 init_err:
12246 return rc;
12247 }
12248
12249 /* rtnl_lock held */
12250 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
12251 {
12252 struct sockaddr *addr = p;
12253 struct bnxt *bp = netdev_priv(dev);
12254 int rc = 0;
12255
12256 if (!is_valid_ether_addr(addr->sa_data))
12257 return -EADDRNOTAVAIL;
12258
12259 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
12260 return 0;
12261
12262 rc = bnxt_approve_mac(bp, addr->sa_data, true);
12263 if (rc)
12264 return rc;
12265
12266 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12267 if (netif_running(dev)) {
12268 bnxt_close_nic(bp, false, false);
12269 rc = bnxt_open_nic(bp, false, false);
12270 }
12271
12272 return rc;
12273 }
12274
12275 /* rtnl_lock held */
12276 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
12277 {
12278 struct bnxt *bp = netdev_priv(dev);
12279
12280 if (netif_running(dev))
12281 bnxt_close_nic(bp, true, false);
12282
12283 dev->mtu = new_mtu;
12284 bnxt_set_ring_params(bp);
12285
12286 if (netif_running(dev))
12287 return bnxt_open_nic(bp, true, false);
12288
12289 return 0;
12290 }
12291
12292 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
12293 {
12294 struct bnxt *bp = netdev_priv(dev);
12295 bool sh = false;
12296 int rc;
12297
12298 if (tc > bp->max_tc) {
12299 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
12300 tc, bp->max_tc);
12301 return -EINVAL;
12302 }
12303
12304 if (netdev_get_num_tc(dev) == tc)
12305 return 0;
12306
12307 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
12308 sh = true;
12309
12310 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
12311 sh, tc, bp->tx_nr_rings_xdp);
12312 if (rc)
12313 return rc;
12314
12315 /* Needs to close the device and do hw resource re-allocations */
12316 if (netif_running(bp->dev))
12317 bnxt_close_nic(bp, true, false);
12318
12319 if (tc) {
12320 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
12321 netdev_set_num_tc(dev, tc);
12322 } else {
12323 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12324 netdev_reset_tc(dev);
12325 }
12326 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
12327 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
12328 bp->tx_nr_rings + bp->rx_nr_rings;
12329
12330 if (netif_running(bp->dev))
12331 return bnxt_open_nic(bp, true, false);
12332
12333 return 0;
12334 }
12335
12336 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
12337 void *cb_priv)
12338 {
12339 struct bnxt *bp = cb_priv;
12340
12341 if (!bnxt_tc_flower_enabled(bp) ||
12342 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
12343 return -EOPNOTSUPP;
12344
12345 switch (type) {
12346 case TC_SETUP_CLSFLOWER:
12347 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
12348 default:
12349 return -EOPNOTSUPP;
12350 }
12351 }
12352
12353 LIST_HEAD(bnxt_block_cb_list);
12354
12355 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
12356 void *type_data)
12357 {
12358 struct bnxt *bp = netdev_priv(dev);
12359
12360 switch (type) {
12361 case TC_SETUP_BLOCK:
12362 return flow_block_cb_setup_simple(type_data,
12363 &bnxt_block_cb_list,
12364 bnxt_setup_tc_block_cb,
12365 bp, bp, true);
12366 case TC_SETUP_QDISC_MQPRIO: {
12367 struct tc_mqprio_qopt *mqprio = type_data;
12368
12369 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
12370
12371 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
12372 }
12373 default:
12374 return -EOPNOTSUPP;
12375 }
12376 }
12377
12378 #ifdef CONFIG_RFS_ACCEL
12379 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
12380 struct bnxt_ntuple_filter *f2)
12381 {
12382 struct flow_keys *keys1 = &f1->fkeys;
12383 struct flow_keys *keys2 = &f2->fkeys;
12384
12385 if (keys1->basic.n_proto != keys2->basic.n_proto ||
12386 keys1->basic.ip_proto != keys2->basic.ip_proto)
12387 return false;
12388
12389 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
12390 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
12391 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
12392 return false;
12393 } else {
12394 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
12395 sizeof(keys1->addrs.v6addrs.src)) ||
12396 memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
12397 sizeof(keys1->addrs.v6addrs.dst)))
12398 return false;
12399 }
12400
12401 if (keys1->ports.ports == keys2->ports.ports &&
12402 keys1->control.flags == keys2->control.flags &&
12403 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
12404 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
12405 return true;
12406
12407 return false;
12408 }
12409
12410 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
12411 u16 rxq_index, u32 flow_id)
12412 {
12413 struct bnxt *bp = netdev_priv(dev);
12414 struct bnxt_ntuple_filter *fltr, *new_fltr;
12415 struct flow_keys *fkeys;
12416 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
12417 int rc = 0, idx, bit_id, l2_idx = 0;
12418 struct hlist_head *head;
12419 u32 flags;
12420
12421 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
12422 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12423 int off = 0, j;
12424
12425 netif_addr_lock_bh(dev);
12426 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
12427 if (ether_addr_equal(eth->h_dest,
12428 vnic->uc_list + off)) {
12429 l2_idx = j + 1;
12430 break;
12431 }
12432 }
12433 netif_addr_unlock_bh(dev);
12434 if (!l2_idx)
12435 return -EINVAL;
12436 }
12437 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
12438 if (!new_fltr)
12439 return -ENOMEM;
12440
12441 fkeys = &new_fltr->fkeys;
12442 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
12443 rc = -EPROTONOSUPPORT;
12444 goto err_free;
12445 }
12446
12447 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
12448 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
12449 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
12450 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
12451 rc = -EPROTONOSUPPORT;
12452 goto err_free;
12453 }
12454 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
12455 bp->hwrm_spec_code < 0x10601) {
12456 rc = -EPROTONOSUPPORT;
12457 goto err_free;
12458 }
12459 flags = fkeys->control.flags;
12460 if (((flags & FLOW_DIS_ENCAPSULATION) &&
12461 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
12462 rc = -EPROTONOSUPPORT;
12463 goto err_free;
12464 }
12465
12466 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
12467 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
12468
12469 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
12470 head = &bp->ntp_fltr_hash_tbl[idx];
12471 rcu_read_lock();
12472 hlist_for_each_entry_rcu(fltr, head, hash) {
12473 if (bnxt_fltr_match(fltr, new_fltr)) {
12474 rcu_read_unlock();
12475 rc = 0;
12476 goto err_free;
12477 }
12478 }
12479 rcu_read_unlock();
12480
12481 spin_lock_bh(&bp->ntp_fltr_lock);
12482 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
12483 BNXT_NTP_FLTR_MAX_FLTR, 0);
12484 if (bit_id < 0) {
12485 spin_unlock_bh(&bp->ntp_fltr_lock);
12486 rc = -ENOMEM;
12487 goto err_free;
12488 }
12489
12490 new_fltr->sw_id = (u16)bit_id;
12491 new_fltr->flow_id = flow_id;
12492 new_fltr->l2_fltr_idx = l2_idx;
12493 new_fltr->rxq = rxq_index;
12494 hlist_add_head_rcu(&new_fltr->hash, head);
12495 bp->ntp_fltr_count++;
12496 spin_unlock_bh(&bp->ntp_fltr_lock);
12497
12498 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
12499 bnxt_queue_sp_work(bp);
12500
12501 return new_fltr->sw_id;
12502
12503 err_free:
12504 kfree(new_fltr);
12505 return rc;
12506 }
12507
12508 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12509 {
12510 int i;
12511
12512 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
12513 struct hlist_head *head;
12514 struct hlist_node *tmp;
12515 struct bnxt_ntuple_filter *fltr;
12516 int rc;
12517
12518 head = &bp->ntp_fltr_hash_tbl[i];
12519 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
12520 bool del = false;
12521
12522 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
12523 if (rps_may_expire_flow(bp->dev, fltr->rxq,
12524 fltr->flow_id,
12525 fltr->sw_id)) {
12526 bnxt_hwrm_cfa_ntuple_filter_free(bp,
12527 fltr);
12528 del = true;
12529 }
12530 } else {
12531 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
12532 fltr);
12533 if (rc)
12534 del = true;
12535 else
12536 set_bit(BNXT_FLTR_VALID, &fltr->state);
12537 }
12538
12539 if (del) {
12540 spin_lock_bh(&bp->ntp_fltr_lock);
12541 hlist_del_rcu(&fltr->hash);
12542 bp->ntp_fltr_count--;
12543 spin_unlock_bh(&bp->ntp_fltr_lock);
12544 synchronize_rcu();
12545 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
12546 kfree(fltr);
12547 }
12548 }
12549 }
12550 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
12551 netdev_info(bp->dev, "Receive PF driver unload event!\n");
12552 }
12553
12554 #else
12555
12556 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12557 {
12558 }
12559
12560 #endif /* CONFIG_RFS_ACCEL */
12561
12562 static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
12563 {
12564 struct bnxt *bp = netdev_priv(netdev);
12565 struct udp_tunnel_info ti;
12566 unsigned int cmd;
12567
12568 udp_tunnel_nic_get_port(netdev, table, 0, &ti);
12569 if (ti.type == UDP_TUNNEL_TYPE_VXLAN) {
12570 bp->vxlan_port = ti.port;
12571 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
12572 } else {
12573 bp->nge_port = ti.port;
12574 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
12575 }
12576
12577 if (ti.port)
12578 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
12579
12580 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
12581 }
12582
12583 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
12584 .sync_table = bnxt_udp_tunnel_sync,
12585 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
12586 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
12587 .tables = {
12588 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
12589 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
12590 },
12591 };
12592
12593 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12594 struct net_device *dev, u32 filter_mask,
12595 int nlflags)
12596 {
12597 struct bnxt *bp = netdev_priv(dev);
12598
12599 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
12600 nlflags, filter_mask, NULL);
12601 }
12602
12603 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
12604 u16 flags, struct netlink_ext_ack *extack)
12605 {
12606 struct bnxt *bp = netdev_priv(dev);
12607 struct nlattr *attr, *br_spec;
12608 int rem, rc = 0;
12609
12610 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
12611 return -EOPNOTSUPP;
12612
12613 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12614 if (!br_spec)
12615 return -EINVAL;
12616
12617 nla_for_each_nested(attr, br_spec, rem) {
12618 u16 mode;
12619
12620 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12621 continue;
12622
12623 if (nla_len(attr) < sizeof(mode))
12624 return -EINVAL;
12625
12626 mode = nla_get_u16(attr);
12627 if (mode == bp->br_mode)
12628 break;
12629
12630 rc = bnxt_hwrm_set_br_mode(bp, mode);
12631 if (!rc)
12632 bp->br_mode = mode;
12633 break;
12634 }
12635 return rc;
12636 }
12637
12638 int bnxt_get_port_parent_id(struct net_device *dev,
12639 struct netdev_phys_item_id *ppid)
12640 {
12641 struct bnxt *bp = netdev_priv(dev);
12642
12643 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
12644 return -EOPNOTSUPP;
12645
12646 /* The PF and it's VF-reps only support the switchdev framework */
12647 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
12648 return -EOPNOTSUPP;
12649
12650 ppid->id_len = sizeof(bp->dsn);
12651 memcpy(ppid->id, bp->dsn, ppid->id_len);
12652
12653 return 0;
12654 }
12655
12656 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
12657 {
12658 struct bnxt *bp = netdev_priv(dev);
12659
12660 return &bp->dl_port;
12661 }
12662
12663 static const struct net_device_ops bnxt_netdev_ops = {
12664 .ndo_open = bnxt_open,
12665 .ndo_start_xmit = bnxt_start_xmit,
12666 .ndo_stop = bnxt_close,
12667 .ndo_get_stats64 = bnxt_get_stats64,
12668 .ndo_set_rx_mode = bnxt_set_rx_mode,
12669 .ndo_do_ioctl = bnxt_ioctl,
12670 .ndo_validate_addr = eth_validate_addr,
12671 .ndo_set_mac_address = bnxt_change_mac_addr,
12672 .ndo_change_mtu = bnxt_change_mtu,
12673 .ndo_fix_features = bnxt_fix_features,
12674 .ndo_set_features = bnxt_set_features,
12675 .ndo_features_check = bnxt_features_check,
12676 .ndo_tx_timeout = bnxt_tx_timeout,
12677 #ifdef CONFIG_BNXT_SRIOV
12678 .ndo_get_vf_config = bnxt_get_vf_config,
12679 .ndo_set_vf_mac = bnxt_set_vf_mac,
12680 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
12681 .ndo_set_vf_rate = bnxt_set_vf_bw,
12682 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
12683 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
12684 .ndo_set_vf_trust = bnxt_set_vf_trust,
12685 #endif
12686 .ndo_setup_tc = bnxt_setup_tc,
12687 #ifdef CONFIG_RFS_ACCEL
12688 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
12689 #endif
12690 .ndo_bpf = bnxt_xdp,
12691 .ndo_xdp_xmit = bnxt_xdp_xmit,
12692 .ndo_bridge_getlink = bnxt_bridge_getlink,
12693 .ndo_bridge_setlink = bnxt_bridge_setlink,
12694 .ndo_get_devlink_port = bnxt_get_devlink_port,
12695 };
12696
12697 static void bnxt_remove_one(struct pci_dev *pdev)
12698 {
12699 struct net_device *dev = pci_get_drvdata(pdev);
12700 struct bnxt *bp = netdev_priv(dev);
12701
12702 if (BNXT_PF(bp))
12703 bnxt_sriov_disable(bp);
12704
12705 if (BNXT_PF(bp))
12706 devlink_port_type_clear(&bp->dl_port);
12707
12708 pci_disable_pcie_error_reporting(pdev);
12709 unregister_netdev(dev);
12710 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12711 /* Flush any pending tasks */
12712 cancel_work_sync(&bp->sp_task);
12713 cancel_delayed_work_sync(&bp->fw_reset_task);
12714 bp->sp_event = 0;
12715
12716 bnxt_dl_fw_reporters_destroy(bp, true);
12717 bnxt_dl_unregister(bp);
12718 bnxt_shutdown_tc(bp);
12719
12720 bnxt_clear_int_mode(bp);
12721 bnxt_hwrm_func_drv_unrgtr(bp);
12722 bnxt_free_hwrm_resources(bp);
12723 bnxt_free_hwrm_short_cmd_req(bp);
12724 bnxt_ethtool_free(bp);
12725 bnxt_dcb_free(bp);
12726 kfree(bp->edev);
12727 bp->edev = NULL;
12728 kfree(bp->ptp_cfg);
12729 bp->ptp_cfg = NULL;
12730 kfree(bp->fw_health);
12731 bp->fw_health = NULL;
12732 bnxt_cleanup_pci(bp);
12733 bnxt_free_ctx_mem(bp);
12734 kfree(bp->ctx);
12735 bp->ctx = NULL;
12736 kfree(bp->rss_indir_tbl);
12737 bp->rss_indir_tbl = NULL;
12738 bnxt_free_port_stats(bp);
12739 free_netdev(dev);
12740 }
12741
12742 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
12743 {
12744 int rc = 0;
12745 struct bnxt_link_info *link_info = &bp->link_info;
12746
12747 bp->phy_flags = 0;
12748 rc = bnxt_hwrm_phy_qcaps(bp);
12749 if (rc) {
12750 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
12751 rc);
12752 return rc;
12753 }
12754 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
12755 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
12756 else
12757 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
12758 if (!fw_dflt)
12759 return 0;
12760
12761 rc = bnxt_update_link(bp, false);
12762 if (rc) {
12763 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
12764 rc);
12765 return rc;
12766 }
12767
12768 /* Older firmware does not have supported_auto_speeds, so assume
12769 * that all supported speeds can be autonegotiated.
12770 */
12771 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
12772 link_info->support_auto_speeds = link_info->support_speeds;
12773
12774 bnxt_init_ethtool_link_settings(bp);
12775 return 0;
12776 }
12777
12778 static int bnxt_get_max_irq(struct pci_dev *pdev)
12779 {
12780 u16 ctrl;
12781
12782 if (!pdev->msix_cap)
12783 return 1;
12784
12785 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
12786 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
12787 }
12788
12789 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12790 int *max_cp)
12791 {
12792 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12793 int max_ring_grps = 0, max_irq;
12794
12795 *max_tx = hw_resc->max_tx_rings;
12796 *max_rx = hw_resc->max_rx_rings;
12797 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
12798 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
12799 bnxt_get_ulp_msix_num(bp),
12800 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
12801 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
12802 *max_cp = min_t(int, *max_cp, max_irq);
12803 max_ring_grps = hw_resc->max_hw_ring_grps;
12804 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
12805 *max_cp -= 1;
12806 *max_rx -= 2;
12807 }
12808 if (bp->flags & BNXT_FLAG_AGG_RINGS)
12809 *max_rx >>= 1;
12810 if (bp->flags & BNXT_FLAG_CHIP_P5) {
12811 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
12812 /* On P5 chips, max_cp output param should be available NQs */
12813 *max_cp = max_irq;
12814 }
12815 *max_rx = min_t(int, *max_rx, max_ring_grps);
12816 }
12817
12818 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
12819 {
12820 int rx, tx, cp;
12821
12822 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
12823 *max_rx = rx;
12824 *max_tx = tx;
12825 if (!rx || !tx || !cp)
12826 return -ENOMEM;
12827
12828 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
12829 }
12830
12831 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12832 bool shared)
12833 {
12834 int rc;
12835
12836 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12837 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
12838 /* Not enough rings, try disabling agg rings. */
12839 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
12840 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12841 if (rc) {
12842 /* set BNXT_FLAG_AGG_RINGS back for consistency */
12843 bp->flags |= BNXT_FLAG_AGG_RINGS;
12844 return rc;
12845 }
12846 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
12847 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12848 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12849 bnxt_set_ring_params(bp);
12850 }
12851
12852 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
12853 int max_cp, max_stat, max_irq;
12854
12855 /* Reserve minimum resources for RoCE */
12856 max_cp = bnxt_get_max_func_cp_rings(bp);
12857 max_stat = bnxt_get_max_func_stat_ctxs(bp);
12858 max_irq = bnxt_get_max_func_irqs(bp);
12859 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
12860 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
12861 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
12862 return 0;
12863
12864 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
12865 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
12866 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
12867 max_cp = min_t(int, max_cp, max_irq);
12868 max_cp = min_t(int, max_cp, max_stat);
12869 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
12870 if (rc)
12871 rc = 0;
12872 }
12873 return rc;
12874 }
12875
12876 /* In initial default shared ring setting, each shared ring must have a
12877 * RX/TX ring pair.
12878 */
12879 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
12880 {
12881 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
12882 bp->rx_nr_rings = bp->cp_nr_rings;
12883 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
12884 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12885 }
12886
12887 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
12888 {
12889 int dflt_rings, max_rx_rings, max_tx_rings, rc;
12890
12891 if (!bnxt_can_reserve_rings(bp))
12892 return 0;
12893
12894 if (sh)
12895 bp->flags |= BNXT_FLAG_SHARED_RINGS;
12896 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
12897 /* Reduce default rings on multi-port cards so that total default
12898 * rings do not exceed CPU count.
12899 */
12900 if (bp->port_count > 1) {
12901 int max_rings =
12902 max_t(int, num_online_cpus() / bp->port_count, 1);
12903
12904 dflt_rings = min_t(int, dflt_rings, max_rings);
12905 }
12906 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
12907 if (rc)
12908 return rc;
12909 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
12910 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
12911 if (sh)
12912 bnxt_trim_dflt_sh_rings(bp);
12913 else
12914 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
12915 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12916
12917 rc = __bnxt_reserve_rings(bp);
12918 if (rc)
12919 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
12920 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12921 if (sh)
12922 bnxt_trim_dflt_sh_rings(bp);
12923
12924 /* Rings may have been trimmed, re-reserve the trimmed rings. */
12925 if (bnxt_need_reserve_rings(bp)) {
12926 rc = __bnxt_reserve_rings(bp);
12927 if (rc)
12928 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
12929 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12930 }
12931 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
12932 bp->rx_nr_rings++;
12933 bp->cp_nr_rings++;
12934 }
12935 if (rc) {
12936 bp->tx_nr_rings = 0;
12937 bp->rx_nr_rings = 0;
12938 }
12939 return rc;
12940 }
12941
12942 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
12943 {
12944 int rc;
12945
12946 if (bp->tx_nr_rings)
12947 return 0;
12948
12949 bnxt_ulp_irq_stop(bp);
12950 bnxt_clear_int_mode(bp);
12951 rc = bnxt_set_dflt_rings(bp, true);
12952 if (rc) {
12953 netdev_err(bp->dev, "Not enough rings available.\n");
12954 goto init_dflt_ring_err;
12955 }
12956 rc = bnxt_init_int_mode(bp);
12957 if (rc)
12958 goto init_dflt_ring_err;
12959
12960 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12961 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
12962 bp->flags |= BNXT_FLAG_RFS;
12963 bp->dev->features |= NETIF_F_NTUPLE;
12964 }
12965 init_dflt_ring_err:
12966 bnxt_ulp_irq_restart(bp, rc);
12967 return rc;
12968 }
12969
12970 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
12971 {
12972 int rc;
12973
12974 ASSERT_RTNL();
12975 bnxt_hwrm_func_qcaps(bp);
12976
12977 if (netif_running(bp->dev))
12978 __bnxt_close_nic(bp, true, false);
12979
12980 bnxt_ulp_irq_stop(bp);
12981 bnxt_clear_int_mode(bp);
12982 rc = bnxt_init_int_mode(bp);
12983 bnxt_ulp_irq_restart(bp, rc);
12984
12985 if (netif_running(bp->dev)) {
12986 if (rc)
12987 dev_close(bp->dev);
12988 else
12989 rc = bnxt_open_nic(bp, true, false);
12990 }
12991
12992 return rc;
12993 }
12994
12995 static int bnxt_init_mac_addr(struct bnxt *bp)
12996 {
12997 int rc = 0;
12998
12999 if (BNXT_PF(bp)) {
13000 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
13001 } else {
13002 #ifdef CONFIG_BNXT_SRIOV
13003 struct bnxt_vf_info *vf = &bp->vf;
13004 bool strict_approval = true;
13005
13006 if (is_valid_ether_addr(vf->mac_addr)) {
13007 /* overwrite netdev dev_addr with admin VF MAC */
13008 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
13009 /* Older PF driver or firmware may not approve this
13010 * correctly.
13011 */
13012 strict_approval = false;
13013 } else {
13014 eth_hw_addr_random(bp->dev);
13015 }
13016 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
13017 #endif
13018 }
13019 return rc;
13020 }
13021
13022 #define BNXT_VPD_LEN 512
13023 static void bnxt_vpd_read_info(struct bnxt *bp)
13024 {
13025 struct pci_dev *pdev = bp->pdev;
13026 int i, len, pos, ro_size, size;
13027 ssize_t vpd_size;
13028 u8 *vpd_data;
13029
13030 vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL);
13031 if (!vpd_data)
13032 return;
13033
13034 vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data);
13035 if (vpd_size <= 0) {
13036 netdev_err(bp->dev, "Unable to read VPD\n");
13037 goto exit;
13038 }
13039
13040 i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
13041 if (i < 0) {
13042 netdev_err(bp->dev, "VPD READ-Only not found\n");
13043 goto exit;
13044 }
13045
13046 ro_size = pci_vpd_lrdt_size(&vpd_data[i]);
13047 i += PCI_VPD_LRDT_TAG_SIZE;
13048 if (i + ro_size > vpd_size)
13049 goto exit;
13050
13051 pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
13052 PCI_VPD_RO_KEYWORD_PARTNO);
13053 if (pos < 0)
13054 goto read_sn;
13055
13056 len = pci_vpd_info_field_size(&vpd_data[pos]);
13057 pos += PCI_VPD_INFO_FLD_HDR_SIZE;
13058 if (len + pos > vpd_size)
13059 goto read_sn;
13060
13061 size = min(len, BNXT_VPD_FLD_LEN - 1);
13062 memcpy(bp->board_partno, &vpd_data[pos], size);
13063
13064 read_sn:
13065 pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
13066 PCI_VPD_RO_KEYWORD_SERIALNO);
13067 if (pos < 0)
13068 goto exit;
13069
13070 len = pci_vpd_info_field_size(&vpd_data[pos]);
13071 pos += PCI_VPD_INFO_FLD_HDR_SIZE;
13072 if (len + pos > vpd_size)
13073 goto exit;
13074
13075 size = min(len, BNXT_VPD_FLD_LEN - 1);
13076 memcpy(bp->board_serialno, &vpd_data[pos], size);
13077 exit:
13078 kfree(vpd_data);
13079 }
13080
13081 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
13082 {
13083 struct pci_dev *pdev = bp->pdev;
13084 u64 qword;
13085
13086 qword = pci_get_dsn(pdev);
13087 if (!qword) {
13088 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
13089 return -EOPNOTSUPP;
13090 }
13091
13092 put_unaligned_le64(qword, dsn);
13093
13094 bp->flags |= BNXT_FLAG_DSN_VALID;
13095 return 0;
13096 }
13097
13098 static int bnxt_map_db_bar(struct bnxt *bp)
13099 {
13100 if (!bp->db_size)
13101 return -ENODEV;
13102 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
13103 if (!bp->bar1)
13104 return -ENOMEM;
13105 return 0;
13106 }
13107
13108 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
13109 {
13110 struct net_device *dev;
13111 struct bnxt *bp;
13112 int rc, max_irqs;
13113
13114 if (pci_is_bridge(pdev))
13115 return -ENODEV;
13116
13117 /* Clear any pending DMA transactions from crash kernel
13118 * while loading driver in capture kernel.
13119 */
13120 if (is_kdump_kernel()) {
13121 pci_clear_master(pdev);
13122 pcie_flr(pdev);
13123 }
13124
13125 max_irqs = bnxt_get_max_irq(pdev);
13126 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
13127 if (!dev)
13128 return -ENOMEM;
13129
13130 bp = netdev_priv(dev);
13131 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
13132 bnxt_set_max_func_irqs(bp, max_irqs);
13133
13134 if (bnxt_vf_pciid(ent->driver_data))
13135 bp->flags |= BNXT_FLAG_VF;
13136
13137 if (pdev->msix_cap)
13138 bp->flags |= BNXT_FLAG_MSIX_CAP;
13139
13140 rc = bnxt_init_board(pdev, dev);
13141 if (rc < 0)
13142 goto init_err_free;
13143
13144 dev->netdev_ops = &bnxt_netdev_ops;
13145 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
13146 dev->ethtool_ops = &bnxt_ethtool_ops;
13147 pci_set_drvdata(pdev, dev);
13148
13149 rc = bnxt_alloc_hwrm_resources(bp);
13150 if (rc)
13151 goto init_err_pci_clean;
13152
13153 mutex_init(&bp->hwrm_cmd_lock);
13154 mutex_init(&bp->link_lock);
13155
13156 rc = bnxt_fw_init_one_p1(bp);
13157 if (rc)
13158 goto init_err_pci_clean;
13159
13160 if (BNXT_PF(bp))
13161 bnxt_vpd_read_info(bp);
13162
13163 if (BNXT_CHIP_P5(bp)) {
13164 bp->flags |= BNXT_FLAG_CHIP_P5;
13165 if (BNXT_CHIP_SR2(bp))
13166 bp->flags |= BNXT_FLAG_CHIP_SR2;
13167 }
13168
13169 rc = bnxt_alloc_rss_indir_tbl(bp);
13170 if (rc)
13171 goto init_err_pci_clean;
13172
13173 rc = bnxt_fw_init_one_p2(bp);
13174 if (rc)
13175 goto init_err_pci_clean;
13176
13177 rc = bnxt_map_db_bar(bp);
13178 if (rc) {
13179 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
13180 rc);
13181 goto init_err_pci_clean;
13182 }
13183
13184 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13185 NETIF_F_TSO | NETIF_F_TSO6 |
13186 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13187 NETIF_F_GSO_IPXIP4 |
13188 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13189 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
13190 NETIF_F_RXCSUM | NETIF_F_GRO;
13191
13192 if (BNXT_SUPPORTS_TPA(bp))
13193 dev->hw_features |= NETIF_F_LRO;
13194
13195 dev->hw_enc_features =
13196 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13197 NETIF_F_TSO | NETIF_F_TSO6 |
13198 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13199 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13200 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
13201 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
13202
13203 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
13204 NETIF_F_GSO_GRE_CSUM;
13205 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
13206 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
13207 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13208 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
13209 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
13210 if (BNXT_SUPPORTS_TPA(bp))
13211 dev->hw_features |= NETIF_F_GRO_HW;
13212 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
13213 if (dev->features & NETIF_F_GRO_HW)
13214 dev->features &= ~NETIF_F_LRO;
13215 dev->priv_flags |= IFF_UNICAST_FLT;
13216
13217 #ifdef CONFIG_BNXT_SRIOV
13218 init_waitqueue_head(&bp->sriov_cfg_wait);
13219 mutex_init(&bp->sriov_lock);
13220 #endif
13221 if (BNXT_SUPPORTS_TPA(bp)) {
13222 bp->gro_func = bnxt_gro_func_5730x;
13223 if (BNXT_CHIP_P4(bp))
13224 bp->gro_func = bnxt_gro_func_5731x;
13225 else if (BNXT_CHIP_P5(bp))
13226 bp->gro_func = bnxt_gro_func_5750x;
13227 }
13228 if (!BNXT_CHIP_P4_PLUS(bp))
13229 bp->flags |= BNXT_FLAG_DOUBLE_DB;
13230
13231 rc = bnxt_init_mac_addr(bp);
13232 if (rc) {
13233 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
13234 rc = -EADDRNOTAVAIL;
13235 goto init_err_pci_clean;
13236 }
13237
13238 if (BNXT_PF(bp)) {
13239 /* Read the adapter's DSN to use as the eswitch switch_id */
13240 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
13241 }
13242
13243 /* MTU range: 60 - FW defined max */
13244 dev->min_mtu = ETH_ZLEN;
13245 dev->max_mtu = bp->max_mtu;
13246
13247 rc = bnxt_probe_phy(bp, true);
13248 if (rc)
13249 goto init_err_pci_clean;
13250
13251 bnxt_set_rx_skb_mode(bp, false);
13252 bnxt_set_tpa_flags(bp);
13253 bnxt_set_ring_params(bp);
13254 rc = bnxt_set_dflt_rings(bp, true);
13255 if (rc) {
13256 netdev_err(bp->dev, "Not enough rings available.\n");
13257 rc = -ENOMEM;
13258 goto init_err_pci_clean;
13259 }
13260
13261 bnxt_fw_init_one_p3(bp);
13262
13263 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13264 bp->flags |= BNXT_FLAG_STRIP_VLAN;
13265
13266 rc = bnxt_init_int_mode(bp);
13267 if (rc)
13268 goto init_err_pci_clean;
13269
13270 /* No TC has been set yet and rings may have been trimmed due to
13271 * limited MSIX, so we re-initialize the TX rings per TC.
13272 */
13273 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13274
13275 if (BNXT_PF(bp)) {
13276 if (!bnxt_pf_wq) {
13277 bnxt_pf_wq =
13278 create_singlethread_workqueue("bnxt_pf_wq");
13279 if (!bnxt_pf_wq) {
13280 dev_err(&pdev->dev, "Unable to create workqueue.\n");
13281 rc = -ENOMEM;
13282 goto init_err_pci_clean;
13283 }
13284 }
13285 rc = bnxt_init_tc(bp);
13286 if (rc)
13287 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
13288 rc);
13289 }
13290
13291 bnxt_inv_fw_health_reg(bp);
13292 bnxt_dl_register(bp);
13293
13294 rc = register_netdev(dev);
13295 if (rc)
13296 goto init_err_cleanup;
13297
13298 if (BNXT_PF(bp))
13299 devlink_port_type_eth_set(&bp->dl_port, bp->dev);
13300 bnxt_dl_fw_reporters_create(bp);
13301
13302 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
13303 board_info[ent->driver_data].name,
13304 (long)pci_resource_start(pdev, 0), dev->dev_addr);
13305 pcie_print_link_status(pdev);
13306
13307 pci_save_state(pdev);
13308 return 0;
13309
13310 init_err_cleanup:
13311 bnxt_dl_unregister(bp);
13312 bnxt_shutdown_tc(bp);
13313 bnxt_clear_int_mode(bp);
13314
13315 init_err_pci_clean:
13316 bnxt_hwrm_func_drv_unrgtr(bp);
13317 bnxt_free_hwrm_short_cmd_req(bp);
13318 bnxt_free_hwrm_resources(bp);
13319 bnxt_ethtool_free(bp);
13320 kfree(bp->ptp_cfg);
13321 bp->ptp_cfg = NULL;
13322 kfree(bp->fw_health);
13323 bp->fw_health = NULL;
13324 bnxt_cleanup_pci(bp);
13325 bnxt_free_ctx_mem(bp);
13326 kfree(bp->ctx);
13327 bp->ctx = NULL;
13328 kfree(bp->rss_indir_tbl);
13329 bp->rss_indir_tbl = NULL;
13330
13331 init_err_free:
13332 free_netdev(dev);
13333 return rc;
13334 }
13335
13336 static void bnxt_shutdown(struct pci_dev *pdev)
13337 {
13338 struct net_device *dev = pci_get_drvdata(pdev);
13339 struct bnxt *bp;
13340
13341 if (!dev)
13342 return;
13343
13344 rtnl_lock();
13345 bp = netdev_priv(dev);
13346 if (!bp)
13347 goto shutdown_exit;
13348
13349 if (netif_running(dev))
13350 dev_close(dev);
13351
13352 bnxt_ulp_shutdown(bp);
13353 bnxt_clear_int_mode(bp);
13354 pci_disable_device(pdev);
13355
13356 if (system_state == SYSTEM_POWER_OFF) {
13357 pci_wake_from_d3(pdev, bp->wol);
13358 pci_set_power_state(pdev, PCI_D3hot);
13359 }
13360
13361 shutdown_exit:
13362 rtnl_unlock();
13363 }
13364
13365 #ifdef CONFIG_PM_SLEEP
13366 static int bnxt_suspend(struct device *device)
13367 {
13368 struct net_device *dev = dev_get_drvdata(device);
13369 struct bnxt *bp = netdev_priv(dev);
13370 int rc = 0;
13371
13372 rtnl_lock();
13373 bnxt_ulp_stop(bp);
13374 if (netif_running(dev)) {
13375 netif_device_detach(dev);
13376 rc = bnxt_close(dev);
13377 }
13378 bnxt_hwrm_func_drv_unrgtr(bp);
13379 pci_disable_device(bp->pdev);
13380 bnxt_free_ctx_mem(bp);
13381 kfree(bp->ctx);
13382 bp->ctx = NULL;
13383 rtnl_unlock();
13384 return rc;
13385 }
13386
13387 static int bnxt_resume(struct device *device)
13388 {
13389 struct net_device *dev = dev_get_drvdata(device);
13390 struct bnxt *bp = netdev_priv(dev);
13391 int rc = 0;
13392
13393 rtnl_lock();
13394 rc = pci_enable_device(bp->pdev);
13395 if (rc) {
13396 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
13397 rc);
13398 goto resume_exit;
13399 }
13400 pci_set_master(bp->pdev);
13401 if (bnxt_hwrm_ver_get(bp)) {
13402 rc = -ENODEV;
13403 goto resume_exit;
13404 }
13405 rc = bnxt_hwrm_func_reset(bp);
13406 if (rc) {
13407 rc = -EBUSY;
13408 goto resume_exit;
13409 }
13410
13411 rc = bnxt_hwrm_func_qcaps(bp);
13412 if (rc)
13413 goto resume_exit;
13414
13415 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
13416 rc = -ENODEV;
13417 goto resume_exit;
13418 }
13419
13420 bnxt_get_wol_settings(bp);
13421 if (netif_running(dev)) {
13422 rc = bnxt_open(dev);
13423 if (!rc)
13424 netif_device_attach(dev);
13425 }
13426
13427 resume_exit:
13428 bnxt_ulp_start(bp, rc);
13429 if (!rc)
13430 bnxt_reenable_sriov(bp);
13431 rtnl_unlock();
13432 return rc;
13433 }
13434
13435 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
13436 #define BNXT_PM_OPS (&bnxt_pm_ops)
13437
13438 #else
13439
13440 #define BNXT_PM_OPS NULL
13441
13442 #endif /* CONFIG_PM_SLEEP */
13443
13444 /**
13445 * bnxt_io_error_detected - called when PCI error is detected
13446 * @pdev: Pointer to PCI device
13447 * @state: The current pci connection state
13448 *
13449 * This function is called after a PCI bus error affecting
13450 * this device has been detected.
13451 */
13452 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
13453 pci_channel_state_t state)
13454 {
13455 struct net_device *netdev = pci_get_drvdata(pdev);
13456 struct bnxt *bp = netdev_priv(netdev);
13457
13458 netdev_info(netdev, "PCI I/O error detected\n");
13459
13460 rtnl_lock();
13461 netif_device_detach(netdev);
13462
13463 bnxt_ulp_stop(bp);
13464
13465 if (state == pci_channel_io_perm_failure) {
13466 rtnl_unlock();
13467 return PCI_ERS_RESULT_DISCONNECT;
13468 }
13469
13470 if (state == pci_channel_io_frozen)
13471 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
13472
13473 if (netif_running(netdev))
13474 bnxt_close(netdev);
13475
13476 if (pci_is_enabled(pdev))
13477 pci_disable_device(pdev);
13478 bnxt_free_ctx_mem(bp);
13479 kfree(bp->ctx);
13480 bp->ctx = NULL;
13481 rtnl_unlock();
13482
13483 /* Request a slot slot reset. */
13484 return PCI_ERS_RESULT_NEED_RESET;
13485 }
13486
13487 /**
13488 * bnxt_io_slot_reset - called after the pci bus has been reset.
13489 * @pdev: Pointer to PCI device
13490 *
13491 * Restart the card from scratch, as if from a cold-boot.
13492 * At this point, the card has exprienced a hard reset,
13493 * followed by fixups by BIOS, and has its config space
13494 * set up identically to what it was at cold boot.
13495 */
13496 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
13497 {
13498 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
13499 struct net_device *netdev = pci_get_drvdata(pdev);
13500 struct bnxt *bp = netdev_priv(netdev);
13501 int err = 0, off;
13502
13503 netdev_info(bp->dev, "PCI Slot Reset\n");
13504
13505 rtnl_lock();
13506
13507 if (pci_enable_device(pdev)) {
13508 dev_err(&pdev->dev,
13509 "Cannot re-enable PCI device after reset.\n");
13510 } else {
13511 pci_set_master(pdev);
13512 /* Upon fatal error, our device internal logic that latches to
13513 * BAR value is getting reset and will restore only upon
13514 * rewritting the BARs.
13515 *
13516 * As pci_restore_state() does not re-write the BARs if the
13517 * value is same as saved value earlier, driver needs to
13518 * write the BARs to 0 to force restore, in case of fatal error.
13519 */
13520 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
13521 &bp->state)) {
13522 for (off = PCI_BASE_ADDRESS_0;
13523 off <= PCI_BASE_ADDRESS_5; off += 4)
13524 pci_write_config_dword(bp->pdev, off, 0);
13525 }
13526 pci_restore_state(pdev);
13527 pci_save_state(pdev);
13528
13529 err = bnxt_hwrm_func_reset(bp);
13530 if (!err)
13531 result = PCI_ERS_RESULT_RECOVERED;
13532 }
13533
13534 rtnl_unlock();
13535
13536 return result;
13537 }
13538
13539 /**
13540 * bnxt_io_resume - called when traffic can start flowing again.
13541 * @pdev: Pointer to PCI device
13542 *
13543 * This callback is called when the error recovery driver tells
13544 * us that its OK to resume normal operation.
13545 */
13546 static void bnxt_io_resume(struct pci_dev *pdev)
13547 {
13548 struct net_device *netdev = pci_get_drvdata(pdev);
13549 struct bnxt *bp = netdev_priv(netdev);
13550 int err;
13551
13552 netdev_info(bp->dev, "PCI Slot Resume\n");
13553 rtnl_lock();
13554
13555 err = bnxt_hwrm_func_qcaps(bp);
13556 if (!err && netif_running(netdev))
13557 err = bnxt_open(netdev);
13558
13559 bnxt_ulp_start(bp, err);
13560 if (!err) {
13561 bnxt_reenable_sriov(bp);
13562 netif_device_attach(netdev);
13563 }
13564
13565 rtnl_unlock();
13566 }
13567
13568 static const struct pci_error_handlers bnxt_err_handler = {
13569 .error_detected = bnxt_io_error_detected,
13570 .slot_reset = bnxt_io_slot_reset,
13571 .resume = bnxt_io_resume
13572 };
13573
13574 static struct pci_driver bnxt_pci_driver = {
13575 .name = DRV_MODULE_NAME,
13576 .id_table = bnxt_pci_tbl,
13577 .probe = bnxt_init_one,
13578 .remove = bnxt_remove_one,
13579 .shutdown = bnxt_shutdown,
13580 .driver.pm = BNXT_PM_OPS,
13581 .err_handler = &bnxt_err_handler,
13582 #if defined(CONFIG_BNXT_SRIOV)
13583 .sriov_configure = bnxt_sriov_configure,
13584 #endif
13585 };
13586
13587 static int __init bnxt_init(void)
13588 {
13589 bnxt_debug_init();
13590 return pci_register_driver(&bnxt_pci_driver);
13591 }
13592
13593 static void __exit bnxt_exit(void)
13594 {
13595 pci_unregister_driver(&bnxt_pci_driver);
13596 if (bnxt_pf_wq)
13597 destroy_workqueue(bnxt_pf_wq);
13598 bnxt_debug_exit();
13599 }
13600
13601 module_init(bnxt_init);
13602 module_exit(bnxt_exit);