]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/broadcom/bnxt/bnxt.c
bnxt: don't lock the tx queue from napi poll
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
CommitLineData
c0c050c5
MC
1/* Broadcom NetXtreme-C/E network driver.
2 *
11f15ed3 3 * Copyright (c) 2014-2016 Broadcom Corporation
c6cc32a2 4 * Copyright (c) 2016-2019 Broadcom Limited
c0c050c5
MC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12
13#include <linux/stringify.h>
14#include <linux/kernel.h>
15#include <linux/timer.h>
16#include <linux/errno.h>
17#include <linux/ioport.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/interrupt.h>
21#include <linux/pci.h>
22#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/skbuff.h>
25#include <linux/dma-mapping.h>
26#include <linux/bitops.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29#include <linux/delay.h>
30#include <asm/byteorder.h>
31#include <asm/page.h>
32#include <linux/time.h>
33#include <linux/mii.h>
0ca12be9 34#include <linux/mdio.h>
c0c050c5
MC
35#include <linux/if.h>
36#include <linux/if_vlan.h>
32e8239c 37#include <linux/if_bridge.h>
5ac67d8b 38#include <linux/rtc.h>
c6d30e83 39#include <linux/bpf.h>
c0c050c5
MC
40#include <net/ip.h>
41#include <net/tcp.h>
42#include <net/udp.h>
43#include <net/checksum.h>
44#include <net/ip6_checksum.h>
ad51b8e9 45#include <net/udp_tunnel.h>
c0c050c5
MC
46#include <linux/workqueue.h>
47#include <linux/prefetch.h>
48#include <linux/cache.h>
49#include <linux/log2.h>
50#include <linux/aer.h>
51#include <linux/bitmap.h>
ae5c42f0
MC
52#include <linux/ptp_clock_kernel.h>
53#include <linux/timecounter.h>
c0c050c5 54#include <linux/cpu_rmap.h>
56f0fd80 55#include <linux/cpumask.h>
2ae7408f 56#include <net/pkt_cls.h>
cde49a42
VV
57#include <linux/hwmon.h>
58#include <linux/hwmon-sysfs.h>
322b87ca 59#include <net/page_pool.h>
c0c050c5
MC
60
61#include "bnxt_hsi.h"
62#include "bnxt.h"
a588e458 63#include "bnxt_ulp.h"
c0c050c5
MC
64#include "bnxt_sriov.h"
65#include "bnxt_ethtool.h"
7df4ae9f 66#include "bnxt_dcb.h"
c6d30e83 67#include "bnxt_xdp.h"
ae5c42f0 68#include "bnxt_ptp.h"
4ab0c6a8 69#include "bnxt_vfr.h"
2ae7408f 70#include "bnxt_tc.h"
3c467bf3 71#include "bnxt_devlink.h"
cabfb09d 72#include "bnxt_debugfs.h"
c0c050c5
MC
73
74#define BNXT_TX_TIMEOUT (5 * HZ)
8fb35cd3 75#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW)
c0c050c5 76
c0c050c5
MC
77MODULE_LICENSE("GPL");
78MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
c0c050c5
MC
79
80#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
81#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
82#define BNXT_RX_COPY_THRESH 256
83
4419dbe6 84#define BNXT_TX_PUSH_THRESH 164
c0c050c5
MC
85
86enum board_idx {
fbc9a523 87 BCM57301,
c0c050c5
MC
88 BCM57302,
89 BCM57304,
1f681688 90 BCM57417_NPAR,
fa853dda 91 BCM58700,
b24eb6ae
MC
92 BCM57311,
93 BCM57312,
fbc9a523 94 BCM57402,
c0c050c5
MC
95 BCM57404,
96 BCM57406,
1f681688
MC
97 BCM57402_NPAR,
98 BCM57407,
b24eb6ae
MC
99 BCM57412,
100 BCM57414,
101 BCM57416,
102 BCM57417,
1f681688 103 BCM57412_NPAR,
5049e33b 104 BCM57314,
1f681688
MC
105 BCM57417_SFP,
106 BCM57416_SFP,
107 BCM57404_NPAR,
108 BCM57406_NPAR,
109 BCM57407_SFP,
adbc8305 110 BCM57407_NPAR,
1f681688
MC
111 BCM57414_NPAR,
112 BCM57416_NPAR,
32b40798
DK
113 BCM57452,
114 BCM57454,
92abef36 115 BCM5745x_NPAR,
1ab968d2 116 BCM57508,
c6cc32a2 117 BCM57504,
51fec80d 118 BCM57502,
49c98421
MC
119 BCM57508_NPAR,
120 BCM57504_NPAR,
121 BCM57502_NPAR,
4a58139b 122 BCM58802,
8ed693b7 123 BCM58804,
4a58139b 124 BCM58808,
adbc8305
MC
125 NETXTREME_E_VF,
126 NETXTREME_C_VF,
618784e3 127 NETXTREME_S_VF,
7fbf359b
MC
128 NETXTREME_C_VF_HV,
129 NETXTREME_E_VF_HV,
b16b6891 130 NETXTREME_E_P5_VF,
7fbf359b 131 NETXTREME_E_P5_VF_HV,
c0c050c5
MC
132};
133
134/* indexed by enum above */
135static const struct {
136 char *name;
137} board_info[] = {
27573a7d
SB
138 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
139 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
140 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
141 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
142 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
143 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
144 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
145 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
146 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
147 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
148 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
149 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
150 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
151 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
152 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
153 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
154 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
155 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
156 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
157 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
158 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
159 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
160 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
161 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
162 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
163 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
164 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
165 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
92abef36 166 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
1ab968d2 167 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
c6cc32a2 168 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
51fec80d 169 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
49c98421
MC
170 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
171 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
172 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
27573a7d 173 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
8ed693b7 174 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
27573a7d
SB
175 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
176 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
177 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
618784e3 178 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
7fbf359b
MC
179 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
180 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
b16b6891 181 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
7fbf359b 182 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
c0c050c5
MC
183};
184
185static const struct pci_device_id bnxt_pci_tbl[] = {
92abef36
VV
186 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
187 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
4a58139b 188 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
adbc8305 189 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
fbc9a523 190 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
c0c050c5
MC
191 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
192 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
1f681688 193 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
fa853dda 194 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
b24eb6ae
MC
195 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
196 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
fbc9a523 197 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
c0c050c5
MC
198 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
199 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
1f681688
MC
200 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
201 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
b24eb6ae
MC
202 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
203 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
204 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
205 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
1f681688 206 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
5049e33b 207 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
1f681688
MC
208 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
209 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
210 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
211 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
212 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
adbc8305
MC
213 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
214 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
1f681688 215 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
adbc8305 216 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
1f681688 217 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
adbc8305 218 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
4a58139b 219 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
32b40798 220 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
1ab968d2 221 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
c6cc32a2 222 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
51fec80d 223 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
49c98421
MC
224 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
225 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
226 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
227 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
228 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
229 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
4a58139b 230 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
8ed693b7 231 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
c0c050c5 232#ifdef CONFIG_BNXT_SRIOV
c7ef35eb 233 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
7fbf359b
MC
234 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
235 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
c7ef35eb 236 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
7fbf359b 237 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
adbc8305 238 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
7fbf359b
MC
239 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
240 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
241 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
242 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
adbc8305
MC
243 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
244 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
245 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
246 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
247 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
7fbf359b 248 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
51fec80d 249 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
b16b6891 250 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
7fbf359b
MC
251 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
252 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
618784e3 253 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
c0c050c5
MC
254#endif
255 { 0 }
256};
257
258MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
259
260static const u16 bnxt_vf_req_snif[] = {
261 HWRM_FUNC_CFG,
91cdda40 262 HWRM_FUNC_VF_CFG,
c0c050c5
MC
263 HWRM_PORT_PHY_QCFG,
264 HWRM_CFA_L2_FILTER_ALLOC,
265};
266
25be8623 267static const u16 bnxt_async_events_arr[] = {
87c374de 268 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
b1613e78 269 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
87c374de
MC
270 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
271 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
272 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
273 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
b1613e78 274 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
2151fe08 275 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
7e914027 276 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
a44daa8f 277 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
8d4bd96b 278 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
df97b34d 279 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
25be8623
MC
280};
281
c213eae8
MC
282static struct workqueue_struct *bnxt_pf_wq;
283
c0c050c5
MC
284static bool bnxt_vf_pciid(enum board_idx idx)
285{
618784e3 286 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
7fbf359b 287 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
ab21494b
AG
288 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
289 idx == NETXTREME_E_P5_VF_HV);
c0c050c5
MC
290}
291
292#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
293#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
294#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
295
c0c050c5
MC
296#define BNXT_CP_DB_IRQ_DIS(db) \
297 writel(DB_CP_IRQ_DIS_FLAGS, db)
298
697197e5
MC
299#define BNXT_DB_CQ(db, idx) \
300 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
301
302#define BNXT_DB_NQ_P5(db, idx) \
303 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
304
305#define BNXT_DB_CQ_ARM(db, idx) \
306 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
307
308#define BNXT_DB_NQ_ARM_P5(db, idx) \
309 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
310
311static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
312{
313 if (bp->flags & BNXT_FLAG_CHIP_P5)
314 BNXT_DB_NQ_P5(db, idx);
315 else
316 BNXT_DB_CQ(db, idx);
317}
318
319static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
320{
321 if (bp->flags & BNXT_FLAG_CHIP_P5)
322 BNXT_DB_NQ_ARM_P5(db, idx);
323 else
324 BNXT_DB_CQ_ARM(db, idx);
325}
326
327static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
328{
329 if (bp->flags & BNXT_FLAG_CHIP_P5)
330 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
331 db->doorbell);
332 else
333 BNXT_DB_CQ(db, idx);
334}
335
38413406 336const u16 bnxt_lhint_arr[] = {
c0c050c5
MC
337 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
338 TX_BD_FLAGS_LHINT_512_TO_1023,
339 TX_BD_FLAGS_LHINT_1024_TO_2047,
340 TX_BD_FLAGS_LHINT_1024_TO_2047,
341 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
342 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
343 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
344 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
345 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
346 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
347 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
348 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
349 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
350 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
351 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
352 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
353 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
354 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
355 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
356};
357
ee5c7fb3
SP
358static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
359{
360 struct metadata_dst *md_dst = skb_metadata_dst(skb);
361
362 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
363 return 0;
364
365 return md_dst->u.port_info.port_id;
366}
367
3c603136
JK
368static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
369 struct bnxt_tx_ring_info *txr,
370 struct netdev_queue *txq)
371{
372 netif_tx_stop_queue(txq);
373
374 /* netif_tx_stop_queue() must be done before checking
375 * tx index in bnxt_tx_avail() below, because in
376 * bnxt_tx_int(), we update tx index before checking for
377 * netif_tx_queue_stopped().
378 */
379 smp_mb();
380 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
381 netif_tx_wake_queue(txq);
382 return false;
383 }
384
385 return true;
386}
387
c0c050c5
MC
388static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
389{
390 struct bnxt *bp = netdev_priv(dev);
391 struct tx_bd *txbd;
392 struct tx_bd_ext *txbd1;
393 struct netdev_queue *txq;
394 int i;
395 dma_addr_t mapping;
396 unsigned int length, pad = 0;
397 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
398 u16 prod, last_frag;
399 struct pci_dev *pdev = bp->pdev;
c0c050c5
MC
400 struct bnxt_tx_ring_info *txr;
401 struct bnxt_sw_tx_bd *tx_buf;
dade5e15 402 __le32 lflags = 0;
c0c050c5
MC
403
404 i = skb_get_queue_mapping(skb);
405 if (unlikely(i >= bp->tx_nr_rings)) {
406 dev_kfree_skb_any(skb);
407 return NETDEV_TX_OK;
408 }
409
c0c050c5 410 txq = netdev_get_tx_queue(dev, i);
a960dec9 411 txr = &bp->tx_ring[bp->tx_ring_map[i]];
c0c050c5
MC
412 prod = txr->tx_prod;
413
414 free_size = bnxt_tx_avail(bp, txr);
415 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
3c603136
JK
416 if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
417 return NETDEV_TX_BUSY;
c0c050c5
MC
418 }
419
420 length = skb->len;
421 len = skb_headlen(skb);
422 last_frag = skb_shinfo(skb)->nr_frags;
423
424 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
425
426 txbd->tx_bd_opaque = prod;
427
428 tx_buf = &txr->tx_buf_ring[prod];
429 tx_buf->skb = skb;
430 tx_buf->nr_frags = last_frag;
431
432 vlan_tag_flags = 0;
ee5c7fb3 433 cfa_action = bnxt_xmit_get_cfa_action(skb);
c0c050c5
MC
434 if (skb_vlan_tag_present(skb)) {
435 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
436 skb_vlan_tag_get(skb);
437 /* Currently supports 8021Q, 8021AD vlan offloads
438 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
439 */
440 if (skb->vlan_proto == htons(ETH_P_8021Q))
441 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
442 }
443
83bb623c
PC
444 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
445 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
446
447 if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
448 atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
9e266807
MC
449 if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
450 &ptp->tx_hdr_off)) {
451 if (vlan_tag_flags)
452 ptp->tx_hdr_off += VLAN_HLEN;
83bb623c
PC
453 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
454 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
455 } else {
456 atomic_inc(&bp->ptp_cfg->tx_avail);
457 }
458 }
dade5e15
MC
459 }
460
83bb623c
PC
461 if (unlikely(skb->no_fcs))
462 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
463
464 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
465 !lflags) {
4419dbe6
MC
466 struct tx_push_buffer *tx_push_buf = txr->tx_push;
467 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
468 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
697197e5 469 void __iomem *db = txr->tx_db.doorbell;
4419dbe6
MC
470 void *pdata = tx_push_buf->data;
471 u64 *end;
472 int j, push_len;
c0c050c5
MC
473
474 /* Set COAL_NOW to be ready quickly for the next push */
475 tx_push->tx_bd_len_flags_type =
476 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
477 TX_BD_TYPE_LONG_TX_BD |
478 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
479 TX_BD_FLAGS_COAL_NOW |
480 TX_BD_FLAGS_PACKET_END |
481 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
482
483 if (skb->ip_summed == CHECKSUM_PARTIAL)
484 tx_push1->tx_bd_hsize_lflags =
485 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
486 else
487 tx_push1->tx_bd_hsize_lflags = 0;
488
489 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
ee5c7fb3
SP
490 tx_push1->tx_bd_cfa_action =
491 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
c0c050c5 492
fbb0fa8b
MC
493 end = pdata + length;
494 end = PTR_ALIGN(end, 8) - 1;
4419dbe6
MC
495 *end = 0;
496
c0c050c5
MC
497 skb_copy_from_linear_data(skb, pdata, len);
498 pdata += len;
499 for (j = 0; j < last_frag; j++) {
500 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
501 void *fptr;
502
503 fptr = skb_frag_address_safe(frag);
504 if (!fptr)
505 goto normal_tx;
506
507 memcpy(pdata, fptr, skb_frag_size(frag));
508 pdata += skb_frag_size(frag);
509 }
510
4419dbe6
MC
511 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
512 txbd->tx_bd_haddr = txr->data_mapping;
c0c050c5
MC
513 prod = NEXT_TX(prod);
514 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
515 memcpy(txbd, tx_push1, sizeof(*txbd));
516 prod = NEXT_TX(prod);
4419dbe6 517 tx_push->doorbell =
c0c050c5
MC
518 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
519 txr->tx_prod = prod;
520
b9a8460a 521 tx_buf->is_push = 1;
c0c050c5 522 netdev_tx_sent_queue(txq, skb->len);
b9a8460a 523 wmb(); /* Sync is_push and byte queue before pushing data */
c0c050c5 524
4419dbe6
MC
525 push_len = (length + sizeof(*tx_push) + 7) / 8;
526 if (push_len > 16) {
697197e5
MC
527 __iowrite64_copy(db, tx_push_buf, 16);
528 __iowrite32_copy(db + 4, tx_push_buf + 1,
9d13744b 529 (push_len - 16) << 1);
4419dbe6 530 } else {
697197e5 531 __iowrite64_copy(db, tx_push_buf, push_len);
4419dbe6 532 }
c0c050c5 533
c0c050c5
MC
534 goto tx_done;
535 }
536
537normal_tx:
538 if (length < BNXT_MIN_PKT_SIZE) {
539 pad = BNXT_MIN_PKT_SIZE - length;
540 if (skb_pad(skb, pad)) {
541 /* SKB already freed. */
542 tx_buf->skb = NULL;
543 return NETDEV_TX_OK;
544 }
545 length = BNXT_MIN_PKT_SIZE;
546 }
547
548 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
549
550 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
551 dev_kfree_skb_any(skb);
552 tx_buf->skb = NULL;
553 return NETDEV_TX_OK;
554 }
555
556 dma_unmap_addr_set(tx_buf, mapping, mapping);
557 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
558 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
559
560 txbd->tx_bd_haddr = cpu_to_le64(mapping);
561
562 prod = NEXT_TX(prod);
563 txbd1 = (struct tx_bd_ext *)
564 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
565
dade5e15 566 txbd1->tx_bd_hsize_lflags = lflags;
c0c050c5
MC
567 if (skb_is_gso(skb)) {
568 u32 hdr_len;
569
570 if (skb->encapsulation)
571 hdr_len = skb_inner_network_offset(skb) +
572 skb_inner_network_header_len(skb) +
573 inner_tcp_hdrlen(skb);
574 else
575 hdr_len = skb_transport_offset(skb) +
576 tcp_hdrlen(skb);
577
dade5e15 578 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
c0c050c5
MC
579 TX_BD_FLAGS_T_IPID |
580 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
581 length = skb_shinfo(skb)->gso_size;
582 txbd1->tx_bd_mss = cpu_to_le32(length);
583 length += hdr_len;
584 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
dade5e15 585 txbd1->tx_bd_hsize_lflags |=
c0c050c5
MC
586 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
587 txbd1->tx_bd_mss = 0;
588 }
589
590 length >>= 9;
2b3c6885
MC
591 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
592 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
593 skb->len);
594 i = 0;
595 goto tx_dma_error;
596 }
c0c050c5
MC
597 flags |= bnxt_lhint_arr[length];
598 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
599
600 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
ee5c7fb3
SP
601 txbd1->tx_bd_cfa_action =
602 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
c0c050c5
MC
603 for (i = 0; i < last_frag; i++) {
604 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
605
606 prod = NEXT_TX(prod);
607 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
608
609 len = skb_frag_size(frag);
610 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
611 DMA_TO_DEVICE);
612
613 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
614 goto tx_dma_error;
615
616 tx_buf = &txr->tx_buf_ring[prod];
617 dma_unmap_addr_set(tx_buf, mapping, mapping);
618
619 txbd->tx_bd_haddr = cpu_to_le64(mapping);
620
621 flags = len << TX_BD_LEN_SHIFT;
622 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
623 }
624
625 flags &= ~TX_BD_LEN;
626 txbd->tx_bd_len_flags_type =
627 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
628 TX_BD_FLAGS_PACKET_END);
629
630 netdev_tx_sent_queue(txq, skb->len);
631
83bb623c
PC
632 skb_tx_timestamp(skb);
633
c0c050c5
MC
634 /* Sync BD data before updating doorbell */
635 wmb();
636
637 prod = NEXT_TX(prod);
638 txr->tx_prod = prod;
639
6b16f9ee 640 if (!netdev_xmit_more() || netif_xmit_stopped(txq))
697197e5 641 bnxt_db_write(bp, &txr->tx_db, prod);
c0c050c5
MC
642
643tx_done:
644
c0c050c5 645 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
6b16f9ee 646 if (netdev_xmit_more() && !tx_buf->is_push)
697197e5 647 bnxt_db_write(bp, &txr->tx_db, prod);
4d172f21 648
3c603136 649 bnxt_txr_netif_try_stop_queue(bp, txr, txq);
c0c050c5
MC
650 }
651 return NETDEV_TX_OK;
652
653tx_dma_error:
83bb623c
PC
654 if (BNXT_TX_PTP_IS_SET(lflags))
655 atomic_inc(&bp->ptp_cfg->tx_avail);
656
c0c050c5
MC
657 last_frag = i;
658
659 /* start back at beginning and unmap skb */
660 prod = txr->tx_prod;
661 tx_buf = &txr->tx_buf_ring[prod];
662 tx_buf->skb = NULL;
663 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
664 skb_headlen(skb), PCI_DMA_TODEVICE);
665 prod = NEXT_TX(prod);
666
667 /* unmap remaining mapped pages */
668 for (i = 0; i < last_frag; i++) {
669 prod = NEXT_TX(prod);
670 tx_buf = &txr->tx_buf_ring[prod];
671 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
672 skb_frag_size(&skb_shinfo(skb)->frags[i]),
673 PCI_DMA_TODEVICE);
674 }
675
676 dev_kfree_skb_any(skb);
677 return NETDEV_TX_OK;
678}
679
680static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
681{
b6ab4b01 682 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
a960dec9 683 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
c0c050c5
MC
684 u16 cons = txr->tx_cons;
685 struct pci_dev *pdev = bp->pdev;
686 int i;
687 unsigned int tx_bytes = 0;
688
689 for (i = 0; i < nr_pkts; i++) {
690 struct bnxt_sw_tx_bd *tx_buf;
83bb623c 691 bool compl_deferred = false;
c0c050c5
MC
692 struct sk_buff *skb;
693 int j, last;
694
695 tx_buf = &txr->tx_buf_ring[cons];
696 cons = NEXT_TX(cons);
697 skb = tx_buf->skb;
698 tx_buf->skb = NULL;
699
700 if (tx_buf->is_push) {
701 tx_buf->is_push = 0;
702 goto next_tx_int;
703 }
704
705 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
706 skb_headlen(skb), PCI_DMA_TODEVICE);
707 last = tx_buf->nr_frags;
708
709 for (j = 0; j < last; j++) {
710 cons = NEXT_TX(cons);
711 tx_buf = &txr->tx_buf_ring[cons];
712 dma_unmap_page(
713 &pdev->dev,
714 dma_unmap_addr(tx_buf, mapping),
715 skb_frag_size(&skb_shinfo(skb)->frags[j]),
716 PCI_DMA_TODEVICE);
717 }
83bb623c
PC
718 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
719 if (bp->flags & BNXT_FLAG_CHIP_P5) {
720 if (!bnxt_get_tx_ts_p5(bp, skb))
721 compl_deferred = true;
722 else
723 atomic_inc(&bp->ptp_cfg->tx_avail);
724 }
725 }
c0c050c5
MC
726
727next_tx_int:
728 cons = NEXT_TX(cons);
729
730 tx_bytes += skb->len;
83bb623c
PC
731 if (!compl_deferred)
732 dev_kfree_skb_any(skb);
c0c050c5
MC
733 }
734
735 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
736 txr->tx_cons = cons;
737
738 /* Need to make the tx_cons update visible to bnxt_start_xmit()
739 * before checking for netif_tx_queue_stopped(). Without the
740 * memory barrier, there is a small possibility that bnxt_start_xmit()
741 * will miss it and cause the queue to be stopped forever.
742 */
743 smp_mb();
744
745 if (unlikely(netif_tx_queue_stopped(txq)) &&
3c603136
JK
746 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
747 READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
748 netif_tx_wake_queue(txq);
c0c050c5
MC
749}
750
c61fb99c 751static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
322b87ca 752 struct bnxt_rx_ring_info *rxr,
c61fb99c
MC
753 gfp_t gfp)
754{
755 struct device *dev = &bp->pdev->dev;
756 struct page *page;
757
322b87ca 758 page = page_pool_dev_alloc_pages(rxr->page_pool);
c61fb99c
MC
759 if (!page)
760 return NULL;
761
c519fe9a
SN
762 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
763 DMA_ATTR_WEAK_ORDERING);
c61fb99c 764 if (dma_mapping_error(dev, *mapping)) {
322b87ca 765 page_pool_recycle_direct(rxr->page_pool, page);
c61fb99c
MC
766 return NULL;
767 }
768 *mapping += bp->rx_dma_offset;
769 return page;
770}
771
c0c050c5
MC
772static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
773 gfp_t gfp)
774{
775 u8 *data;
776 struct pci_dev *pdev = bp->pdev;
777
778 data = kmalloc(bp->rx_buf_size, gfp);
779 if (!data)
780 return NULL;
781
c519fe9a
SN
782 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
783 bp->rx_buf_use_size, bp->rx_dir,
784 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
785
786 if (dma_mapping_error(&pdev->dev, *mapping)) {
787 kfree(data);
788 data = NULL;
789 }
790 return data;
791}
792
38413406
MC
793int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
794 u16 prod, gfp_t gfp)
c0c050c5
MC
795{
796 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
797 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
c0c050c5
MC
798 dma_addr_t mapping;
799
c61fb99c 800 if (BNXT_RX_PAGE_MODE(bp)) {
322b87ca
AG
801 struct page *page =
802 __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
c0c050c5 803
c61fb99c
MC
804 if (!page)
805 return -ENOMEM;
806
807 rx_buf->data = page;
808 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
809 } else {
810 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
811
812 if (!data)
813 return -ENOMEM;
814
815 rx_buf->data = data;
816 rx_buf->data_ptr = data + bp->rx_offset;
817 }
11cd119d 818 rx_buf->mapping = mapping;
c0c050c5
MC
819
820 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
c0c050c5
MC
821 return 0;
822}
823
c6d30e83 824void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
c0c050c5
MC
825{
826 u16 prod = rxr->rx_prod;
827 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
828 struct rx_bd *cons_bd, *prod_bd;
829
830 prod_rx_buf = &rxr->rx_buf_ring[prod];
831 cons_rx_buf = &rxr->rx_buf_ring[cons];
832
833 prod_rx_buf->data = data;
6bb19474 834 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
c0c050c5 835
11cd119d 836 prod_rx_buf->mapping = cons_rx_buf->mapping;
c0c050c5
MC
837
838 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
839 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
840
841 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
842}
843
844static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
845{
846 u16 next, max = rxr->rx_agg_bmap_size;
847
848 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
849 if (next >= max)
850 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
851 return next;
852}
853
854static inline int bnxt_alloc_rx_page(struct bnxt *bp,
855 struct bnxt_rx_ring_info *rxr,
856 u16 prod, gfp_t gfp)
857{
858 struct rx_bd *rxbd =
859 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
860 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
861 struct pci_dev *pdev = bp->pdev;
862 struct page *page;
863 dma_addr_t mapping;
864 u16 sw_prod = rxr->rx_sw_agg_prod;
89d0a06c 865 unsigned int offset = 0;
c0c050c5 866
89d0a06c
MC
867 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
868 page = rxr->rx_page;
869 if (!page) {
870 page = alloc_page(gfp);
871 if (!page)
872 return -ENOMEM;
873 rxr->rx_page = page;
874 rxr->rx_page_offset = 0;
875 }
876 offset = rxr->rx_page_offset;
877 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
878 if (rxr->rx_page_offset == PAGE_SIZE)
879 rxr->rx_page = NULL;
880 else
881 get_page(page);
882 } else {
883 page = alloc_page(gfp);
884 if (!page)
885 return -ENOMEM;
886 }
c0c050c5 887
c519fe9a
SN
888 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
889 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
890 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
891 if (dma_mapping_error(&pdev->dev, mapping)) {
892 __free_page(page);
893 return -EIO;
894 }
895
896 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
897 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
898
899 __set_bit(sw_prod, rxr->rx_agg_bmap);
900 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
901 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
902
903 rx_agg_buf->page = page;
89d0a06c 904 rx_agg_buf->offset = offset;
c0c050c5
MC
905 rx_agg_buf->mapping = mapping;
906 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
907 rxbd->rx_bd_opaque = sw_prod;
908 return 0;
909}
910
4a228a3a
MC
911static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
912 struct bnxt_cp_ring_info *cpr,
913 u16 cp_cons, u16 curr)
914{
915 struct rx_agg_cmp *agg;
916
917 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
918 agg = (struct rx_agg_cmp *)
919 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
920 return agg;
921}
922
bfcd8d79
MC
923static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
924 struct bnxt_rx_ring_info *rxr,
925 u16 agg_id, u16 curr)
926{
927 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
928
929 return &tpa_info->agg_arr[curr];
930}
931
4a228a3a
MC
932static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
933 u16 start, u32 agg_bufs, bool tpa)
c0c050c5 934{
e44758b7 935 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5 936 struct bnxt *bp = bnapi->bp;
b6ab4b01 937 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
938 u16 prod = rxr->rx_agg_prod;
939 u16 sw_prod = rxr->rx_sw_agg_prod;
bfcd8d79 940 bool p5_tpa = false;
c0c050c5
MC
941 u32 i;
942
bfcd8d79
MC
943 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
944 p5_tpa = true;
945
c0c050c5
MC
946 for (i = 0; i < agg_bufs; i++) {
947 u16 cons;
948 struct rx_agg_cmp *agg;
949 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
950 struct rx_bd *prod_bd;
951 struct page *page;
952
bfcd8d79
MC
953 if (p5_tpa)
954 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
955 else
956 agg = bnxt_get_agg(bp, cpr, idx, start + i);
c0c050c5
MC
957 cons = agg->rx_agg_cmp_opaque;
958 __clear_bit(cons, rxr->rx_agg_bmap);
959
960 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
961 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
962
963 __set_bit(sw_prod, rxr->rx_agg_bmap);
964 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
965 cons_rx_buf = &rxr->rx_agg_ring[cons];
966
967 /* It is possible for sw_prod to be equal to cons, so
968 * set cons_rx_buf->page to NULL first.
969 */
970 page = cons_rx_buf->page;
971 cons_rx_buf->page = NULL;
972 prod_rx_buf->page = page;
89d0a06c 973 prod_rx_buf->offset = cons_rx_buf->offset;
c0c050c5
MC
974
975 prod_rx_buf->mapping = cons_rx_buf->mapping;
976
977 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
978
979 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
980 prod_bd->rx_bd_opaque = sw_prod;
981
982 prod = NEXT_RX_AGG(prod);
983 sw_prod = NEXT_RX_AGG(sw_prod);
c0c050c5
MC
984 }
985 rxr->rx_agg_prod = prod;
986 rxr->rx_sw_agg_prod = sw_prod;
987}
988
c61fb99c
MC
989static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
990 struct bnxt_rx_ring_info *rxr,
991 u16 cons, void *data, u8 *data_ptr,
992 dma_addr_t dma_addr,
993 unsigned int offset_and_len)
994{
995 unsigned int payload = offset_and_len >> 16;
996 unsigned int len = offset_and_len & 0xffff;
d7840976 997 skb_frag_t *frag;
c61fb99c
MC
998 struct page *page = data;
999 u16 prod = rxr->rx_prod;
1000 struct sk_buff *skb;
1001 int off, err;
1002
1003 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1004 if (unlikely(err)) {
1005 bnxt_reuse_rx_data(rxr, cons, data);
1006 return NULL;
1007 }
1008 dma_addr -= bp->rx_dma_offset;
c519fe9a
SN
1009 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
1010 DMA_ATTR_WEAK_ORDERING);
3071c517 1011 page_pool_release_page(rxr->page_pool, page);
c61fb99c
MC
1012
1013 if (unlikely(!payload))
c43f1255 1014 payload = eth_get_headlen(bp->dev, data_ptr, len);
c61fb99c
MC
1015
1016 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1017 if (!skb) {
1018 __free_page(page);
1019 return NULL;
1020 }
1021
1022 off = (void *)data_ptr - page_address(page);
1023 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
1024 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1025 payload + NET_IP_ALIGN);
1026
1027 frag = &skb_shinfo(skb)->frags[0];
1028 skb_frag_size_sub(frag, payload);
b54c9d5b 1029 skb_frag_off_add(frag, payload);
c61fb99c
MC
1030 skb->data_len -= payload;
1031 skb->tail += payload;
1032
1033 return skb;
1034}
1035
c0c050c5
MC
1036static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1037 struct bnxt_rx_ring_info *rxr, u16 cons,
6bb19474
MC
1038 void *data, u8 *data_ptr,
1039 dma_addr_t dma_addr,
1040 unsigned int offset_and_len)
c0c050c5 1041{
6bb19474 1042 u16 prod = rxr->rx_prod;
c0c050c5 1043 struct sk_buff *skb;
6bb19474 1044 int err;
c0c050c5
MC
1045
1046 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1047 if (unlikely(err)) {
1048 bnxt_reuse_rx_data(rxr, cons, data);
1049 return NULL;
1050 }
1051
1052 skb = build_skb(data, 0);
c519fe9a
SN
1053 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1054 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
1055 if (!skb) {
1056 kfree(data);
1057 return NULL;
1058 }
1059
b3dba77c 1060 skb_reserve(skb, bp->rx_offset);
6bb19474 1061 skb_put(skb, offset_and_len & 0xffff);
c0c050c5
MC
1062 return skb;
1063}
1064
e44758b7
MC
1065static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
1066 struct bnxt_cp_ring_info *cpr,
4a228a3a
MC
1067 struct sk_buff *skb, u16 idx,
1068 u32 agg_bufs, bool tpa)
c0c050c5 1069{
e44758b7 1070 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5 1071 struct pci_dev *pdev = bp->pdev;
b6ab4b01 1072 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5 1073 u16 prod = rxr->rx_agg_prod;
bfcd8d79 1074 bool p5_tpa = false;
c0c050c5
MC
1075 u32 i;
1076
bfcd8d79
MC
1077 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1078 p5_tpa = true;
1079
c0c050c5
MC
1080 for (i = 0; i < agg_bufs; i++) {
1081 u16 cons, frag_len;
1082 struct rx_agg_cmp *agg;
1083 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1084 struct page *page;
1085 dma_addr_t mapping;
1086
bfcd8d79
MC
1087 if (p5_tpa)
1088 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1089 else
1090 agg = bnxt_get_agg(bp, cpr, idx, i);
c0c050c5
MC
1091 cons = agg->rx_agg_cmp_opaque;
1092 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1093 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1094
1095 cons_rx_buf = &rxr->rx_agg_ring[cons];
89d0a06c
MC
1096 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1097 cons_rx_buf->offset, frag_len);
c0c050c5
MC
1098 __clear_bit(cons, rxr->rx_agg_bmap);
1099
1100 /* It is possible for bnxt_alloc_rx_page() to allocate
1101 * a sw_prod index that equals the cons index, so we
1102 * need to clear the cons entry now.
1103 */
11cd119d 1104 mapping = cons_rx_buf->mapping;
c0c050c5
MC
1105 page = cons_rx_buf->page;
1106 cons_rx_buf->page = NULL;
1107
1108 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1109 struct skb_shared_info *shinfo;
1110 unsigned int nr_frags;
1111
1112 shinfo = skb_shinfo(skb);
1113 nr_frags = --shinfo->nr_frags;
1114 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1115
1116 dev_kfree_skb(skb);
1117
1118 cons_rx_buf->page = page;
1119
1120 /* Update prod since possibly some pages have been
1121 * allocated already.
1122 */
1123 rxr->rx_agg_prod = prod;
4a228a3a 1124 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
c0c050c5
MC
1125 return NULL;
1126 }
1127
c519fe9a
SN
1128 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1129 PCI_DMA_FROMDEVICE,
1130 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
1131
1132 skb->data_len += frag_len;
1133 skb->len += frag_len;
1134 skb->truesize += PAGE_SIZE;
1135
1136 prod = NEXT_RX_AGG(prod);
c0c050c5
MC
1137 }
1138 rxr->rx_agg_prod = prod;
1139 return skb;
1140}
1141
1142static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1143 u8 agg_bufs, u32 *raw_cons)
1144{
1145 u16 last;
1146 struct rx_agg_cmp *agg;
1147
1148 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1149 last = RING_CMP(*raw_cons);
1150 agg = (struct rx_agg_cmp *)
1151 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1152 return RX_AGG_CMP_VALID(agg, *raw_cons);
1153}
1154
1155static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1156 unsigned int len,
1157 dma_addr_t mapping)
1158{
1159 struct bnxt *bp = bnapi->bp;
1160 struct pci_dev *pdev = bp->pdev;
1161 struct sk_buff *skb;
1162
1163 skb = napi_alloc_skb(&bnapi->napi, len);
1164 if (!skb)
1165 return NULL;
1166
745fc05c
MC
1167 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1168 bp->rx_dir);
c0c050c5 1169
6bb19474
MC
1170 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1171 len + NET_IP_ALIGN);
c0c050c5 1172
745fc05c
MC
1173 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1174 bp->rx_dir);
c0c050c5
MC
1175
1176 skb_put(skb, len);
1177 return skb;
1178}
1179
e44758b7 1180static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
fa7e2812
MC
1181 u32 *raw_cons, void *cmp)
1182{
fa7e2812
MC
1183 struct rx_cmp *rxcmp = cmp;
1184 u32 tmp_raw_cons = *raw_cons;
1185 u8 cmp_type, agg_bufs = 0;
1186
1187 cmp_type = RX_CMP_TYPE(rxcmp);
1188
1189 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1190 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1191 RX_CMP_AGG_BUFS) >>
1192 RX_CMP_AGG_BUFS_SHIFT;
1193 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1194 struct rx_tpa_end_cmp *tpa_end = cmp;
1195
bfcd8d79
MC
1196 if (bp->flags & BNXT_FLAG_CHIP_P5)
1197 return 0;
1198
4a228a3a 1199 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
fa7e2812
MC
1200 }
1201
1202 if (agg_bufs) {
1203 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1204 return -EBUSY;
1205 }
1206 *raw_cons = tmp_raw_cons;
1207 return 0;
1208}
1209
230d1f0d
MC
1210static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1211{
b148bb23
MC
1212 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1213 return;
1214
230d1f0d
MC
1215 if (BNXT_PF(bp))
1216 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1217 else
1218 schedule_delayed_work(&bp->fw_reset_task, delay);
1219}
1220
c213eae8
MC
1221static void bnxt_queue_sp_work(struct bnxt *bp)
1222{
1223 if (BNXT_PF(bp))
1224 queue_work(bnxt_pf_wq, &bp->sp_task);
1225 else
1226 schedule_work(&bp->sp_task);
1227}
1228
fa7e2812
MC
1229static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1230{
1231 if (!rxr->bnapi->in_reset) {
1232 rxr->bnapi->in_reset = true;
8fbf58e1
MC
1233 if (bp->flags & BNXT_FLAG_CHIP_P5)
1234 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1235 else
1236 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
c213eae8 1237 bnxt_queue_sp_work(bp);
fa7e2812
MC
1238 }
1239 rxr->rx_next_cons = 0xffff;
1240}
1241
ec4d8e7c
MC
1242static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1243{
1244 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1245 u16 idx = agg_id & MAX_TPA_P5_MASK;
1246
1247 if (test_bit(idx, map->agg_idx_bmap))
1248 idx = find_first_zero_bit(map->agg_idx_bmap,
1249 BNXT_AGG_IDX_BMAP_SIZE);
1250 __set_bit(idx, map->agg_idx_bmap);
1251 map->agg_id_tbl[agg_id] = idx;
1252 return idx;
1253}
1254
1255static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1256{
1257 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1258
1259 __clear_bit(idx, map->agg_idx_bmap);
1260}
1261
1262static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1263{
1264 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1265
1266 return map->agg_id_tbl[agg_id];
1267}
1268
c0c050c5
MC
1269static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1270 struct rx_tpa_start_cmp *tpa_start,
1271 struct rx_tpa_start_cmp_ext *tpa_start1)
1272{
c0c050c5 1273 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
bfcd8d79
MC
1274 struct bnxt_tpa_info *tpa_info;
1275 u16 cons, prod, agg_id;
c0c050c5
MC
1276 struct rx_bd *prod_bd;
1277 dma_addr_t mapping;
1278
ec4d8e7c 1279 if (bp->flags & BNXT_FLAG_CHIP_P5) {
bfcd8d79 1280 agg_id = TPA_START_AGG_ID_P5(tpa_start);
ec4d8e7c
MC
1281 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1282 } else {
bfcd8d79 1283 agg_id = TPA_START_AGG_ID(tpa_start);
ec4d8e7c 1284 }
c0c050c5
MC
1285 cons = tpa_start->rx_tpa_start_cmp_opaque;
1286 prod = rxr->rx_prod;
1287 cons_rx_buf = &rxr->rx_buf_ring[cons];
1288 prod_rx_buf = &rxr->rx_buf_ring[prod];
1289 tpa_info = &rxr->rx_tpa[agg_id];
1290
bfcd8d79
MC
1291 if (unlikely(cons != rxr->rx_next_cons ||
1292 TPA_START_ERROR(tpa_start))) {
1293 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1294 cons, rxr->rx_next_cons,
1295 TPA_START_ERROR_CODE(tpa_start1));
fa7e2812
MC
1296 bnxt_sched_reset(bp, rxr);
1297 return;
1298 }
ee5c7fb3
SP
1299 /* Store cfa_code in tpa_info to use in tpa_end
1300 * completion processing.
1301 */
1302 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
c0c050c5 1303 prod_rx_buf->data = tpa_info->data;
6bb19474 1304 prod_rx_buf->data_ptr = tpa_info->data_ptr;
c0c050c5
MC
1305
1306 mapping = tpa_info->mapping;
11cd119d 1307 prod_rx_buf->mapping = mapping;
c0c050c5
MC
1308
1309 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1310
1311 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1312
1313 tpa_info->data = cons_rx_buf->data;
6bb19474 1314 tpa_info->data_ptr = cons_rx_buf->data_ptr;
c0c050c5 1315 cons_rx_buf->data = NULL;
11cd119d 1316 tpa_info->mapping = cons_rx_buf->mapping;
c0c050c5
MC
1317
1318 tpa_info->len =
1319 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1320 RX_TPA_START_CMP_LEN_SHIFT;
1321 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1322 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1323
1324 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1325 tpa_info->gso_type = SKB_GSO_TCPV4;
1326 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
50f011b6 1327 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
c0c050c5
MC
1328 tpa_info->gso_type = SKB_GSO_TCPV6;
1329 tpa_info->rss_hash =
1330 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1331 } else {
1332 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1333 tpa_info->gso_type = 0;
871127e6 1334 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
c0c050c5
MC
1335 }
1336 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1337 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
94758f8d 1338 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
bfcd8d79 1339 tpa_info->agg_count = 0;
c0c050c5
MC
1340
1341 rxr->rx_prod = NEXT_RX(prod);
1342 cons = NEXT_RX(cons);
376a5b86 1343 rxr->rx_next_cons = NEXT_RX(cons);
c0c050c5
MC
1344 cons_rx_buf = &rxr->rx_buf_ring[cons];
1345
1346 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1347 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1348 cons_rx_buf->data = NULL;
1349}
1350
4a228a3a 1351static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
c0c050c5
MC
1352{
1353 if (agg_bufs)
4a228a3a 1354 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
c0c050c5
MC
1355}
1356
bee5a188
MC
1357#ifdef CONFIG_INET
1358static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1359{
1360 struct udphdr *uh = NULL;
1361
1362 if (ip_proto == htons(ETH_P_IP)) {
1363 struct iphdr *iph = (struct iphdr *)skb->data;
1364
1365 if (iph->protocol == IPPROTO_UDP)
1366 uh = (struct udphdr *)(iph + 1);
1367 } else {
1368 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1369
1370 if (iph->nexthdr == IPPROTO_UDP)
1371 uh = (struct udphdr *)(iph + 1);
1372 }
1373 if (uh) {
1374 if (uh->check)
1375 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1376 else
1377 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1378 }
1379}
1380#endif
1381
94758f8d
MC
1382static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1383 int payload_off, int tcp_ts,
1384 struct sk_buff *skb)
1385{
1386#ifdef CONFIG_INET
1387 struct tcphdr *th;
1388 int len, nw_off;
1389 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1390 u32 hdr_info = tpa_info->hdr_info;
1391 bool loopback = false;
1392
1393 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1394 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1395 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1396
1397 /* If the packet is an internal loopback packet, the offsets will
1398 * have an extra 4 bytes.
1399 */
1400 if (inner_mac_off == 4) {
1401 loopback = true;
1402 } else if (inner_mac_off > 4) {
1403 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1404 ETH_HLEN - 2));
1405
1406 /* We only support inner iPv4/ipv6. If we don't see the
1407 * correct protocol ID, it must be a loopback packet where
1408 * the offsets are off by 4.
1409 */
09a7636a 1410 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
94758f8d
MC
1411 loopback = true;
1412 }
1413 if (loopback) {
1414 /* internal loopback packet, subtract all offsets by 4 */
1415 inner_ip_off -= 4;
1416 inner_mac_off -= 4;
1417 outer_ip_off -= 4;
1418 }
1419
1420 nw_off = inner_ip_off - ETH_HLEN;
1421 skb_set_network_header(skb, nw_off);
1422 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1423 struct ipv6hdr *iph = ipv6_hdr(skb);
1424
1425 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1426 len = skb->len - skb_transport_offset(skb);
1427 th = tcp_hdr(skb);
1428 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1429 } else {
1430 struct iphdr *iph = ip_hdr(skb);
1431
1432 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1433 len = skb->len - skb_transport_offset(skb);
1434 th = tcp_hdr(skb);
1435 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1436 }
1437
1438 if (inner_mac_off) { /* tunnel */
94758f8d
MC
1439 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1440 ETH_HLEN - 2));
1441
bee5a188 1442 bnxt_gro_tunnel(skb, proto);
94758f8d
MC
1443 }
1444#endif
1445 return skb;
1446}
1447
67912c36
MC
1448static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1449 int payload_off, int tcp_ts,
1450 struct sk_buff *skb)
1451{
1452#ifdef CONFIG_INET
1453 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1454 u32 hdr_info = tpa_info->hdr_info;
1455 int iphdr_len, nw_off;
1456
1457 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1458 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1459 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1460
1461 nw_off = inner_ip_off - ETH_HLEN;
1462 skb_set_network_header(skb, nw_off);
1463 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1464 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1465 skb_set_transport_header(skb, nw_off + iphdr_len);
1466
1467 if (inner_mac_off) { /* tunnel */
1468 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1469 ETH_HLEN - 2));
1470
1471 bnxt_gro_tunnel(skb, proto);
1472 }
1473#endif
1474 return skb;
1475}
1476
c0c050c5
MC
1477#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1478#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1479
309369c9
MC
1480static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1481 int payload_off, int tcp_ts,
c0c050c5
MC
1482 struct sk_buff *skb)
1483{
d1611c3a 1484#ifdef CONFIG_INET
c0c050c5 1485 struct tcphdr *th;
719ca811 1486 int len, nw_off, tcp_opt_len = 0;
27e24189 1487
309369c9 1488 if (tcp_ts)
c0c050c5
MC
1489 tcp_opt_len = 12;
1490
c0c050c5
MC
1491 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1492 struct iphdr *iph;
1493
1494 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1495 ETH_HLEN;
1496 skb_set_network_header(skb, nw_off);
1497 iph = ip_hdr(skb);
1498 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1499 len = skb->len - skb_transport_offset(skb);
1500 th = tcp_hdr(skb);
1501 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1502 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1503 struct ipv6hdr *iph;
1504
1505 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1506 ETH_HLEN;
1507 skb_set_network_header(skb, nw_off);
1508 iph = ipv6_hdr(skb);
1509 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1510 len = skb->len - skb_transport_offset(skb);
1511 th = tcp_hdr(skb);
1512 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1513 } else {
1514 dev_kfree_skb_any(skb);
1515 return NULL;
1516 }
c0c050c5 1517
bee5a188
MC
1518 if (nw_off) /* tunnel */
1519 bnxt_gro_tunnel(skb, skb->protocol);
c0c050c5
MC
1520#endif
1521 return skb;
1522}
1523
309369c9
MC
1524static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1525 struct bnxt_tpa_info *tpa_info,
1526 struct rx_tpa_end_cmp *tpa_end,
1527 struct rx_tpa_end_cmp_ext *tpa_end1,
1528 struct sk_buff *skb)
1529{
1530#ifdef CONFIG_INET
1531 int payload_off;
1532 u16 segs;
1533
1534 segs = TPA_END_TPA_SEGS(tpa_end);
1535 if (segs == 1)
1536 return skb;
1537
1538 NAPI_GRO_CB(skb)->count = segs;
1539 skb_shinfo(skb)->gso_size =
1540 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1541 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
bfcd8d79
MC
1542 if (bp->flags & BNXT_FLAG_CHIP_P5)
1543 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1544 else
1545 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
309369c9 1546 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
5910906c
MC
1547 if (likely(skb))
1548 tcp_gro_complete(skb);
309369c9
MC
1549#endif
1550 return skb;
1551}
1552
ee5c7fb3
SP
1553/* Given the cfa_code of a received packet determine which
1554 * netdev (vf-rep or PF) the packet is destined to.
1555 */
1556static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1557{
1558 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1559
1560 /* if vf-rep dev is NULL, the must belongs to the PF */
1561 return dev ? dev : bp->dev;
1562}
1563
c0c050c5 1564static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
e44758b7 1565 struct bnxt_cp_ring_info *cpr,
c0c050c5
MC
1566 u32 *raw_cons,
1567 struct rx_tpa_end_cmp *tpa_end,
1568 struct rx_tpa_end_cmp_ext *tpa_end1,
4e5dbbda 1569 u8 *event)
c0c050c5 1570{
e44758b7 1571 struct bnxt_napi *bnapi = cpr->bnapi;
b6ab4b01 1572 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
6bb19474 1573 u8 *data_ptr, agg_bufs;
c0c050c5
MC
1574 unsigned int len;
1575 struct bnxt_tpa_info *tpa_info;
1576 dma_addr_t mapping;
1577 struct sk_buff *skb;
bfcd8d79 1578 u16 idx = 0, agg_id;
6bb19474 1579 void *data;
bfcd8d79 1580 bool gro;
c0c050c5 1581
fa7e2812 1582 if (unlikely(bnapi->in_reset)) {
e44758b7 1583 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
fa7e2812
MC
1584
1585 if (rc < 0)
1586 return ERR_PTR(-EBUSY);
1587 return NULL;
1588 }
1589
bfcd8d79
MC
1590 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1591 agg_id = TPA_END_AGG_ID_P5(tpa_end);
ec4d8e7c 1592 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
bfcd8d79
MC
1593 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1594 tpa_info = &rxr->rx_tpa[agg_id];
1595 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1596 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1597 agg_bufs, tpa_info->agg_count);
1598 agg_bufs = tpa_info->agg_count;
1599 }
1600 tpa_info->agg_count = 0;
1601 *event |= BNXT_AGG_EVENT;
ec4d8e7c 1602 bnxt_free_agg_idx(rxr, agg_id);
bfcd8d79
MC
1603 idx = agg_id;
1604 gro = !!(bp->flags & BNXT_FLAG_GRO);
1605 } else {
1606 agg_id = TPA_END_AGG_ID(tpa_end);
1607 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1608 tpa_info = &rxr->rx_tpa[agg_id];
1609 idx = RING_CMP(*raw_cons);
1610 if (agg_bufs) {
1611 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1612 return ERR_PTR(-EBUSY);
1613
1614 *event |= BNXT_AGG_EVENT;
1615 idx = NEXT_CMP(idx);
1616 }
1617 gro = !!TPA_END_GRO(tpa_end);
1618 }
c0c050c5 1619 data = tpa_info->data;
6bb19474
MC
1620 data_ptr = tpa_info->data_ptr;
1621 prefetch(data_ptr);
c0c050c5
MC
1622 len = tpa_info->len;
1623 mapping = tpa_info->mapping;
1624
69c149e2 1625 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
4a228a3a 1626 bnxt_abort_tpa(cpr, idx, agg_bufs);
69c149e2
MC
1627 if (agg_bufs > MAX_SKB_FRAGS)
1628 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1629 agg_bufs, (int)MAX_SKB_FRAGS);
c0c050c5
MC
1630 return NULL;
1631 }
1632
1633 if (len <= bp->rx_copy_thresh) {
6bb19474 1634 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
c0c050c5 1635 if (!skb) {
4a228a3a 1636 bnxt_abort_tpa(cpr, idx, agg_bufs);
c0c050c5
MC
1637 return NULL;
1638 }
1639 } else {
1640 u8 *new_data;
1641 dma_addr_t new_mapping;
1642
1643 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1644 if (!new_data) {
4a228a3a 1645 bnxt_abort_tpa(cpr, idx, agg_bufs);
c0c050c5
MC
1646 return NULL;
1647 }
1648
1649 tpa_info->data = new_data;
b3dba77c 1650 tpa_info->data_ptr = new_data + bp->rx_offset;
c0c050c5
MC
1651 tpa_info->mapping = new_mapping;
1652
1653 skb = build_skb(data, 0);
c519fe9a
SN
1654 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1655 bp->rx_buf_use_size, bp->rx_dir,
1656 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
1657
1658 if (!skb) {
1659 kfree(data);
4a228a3a 1660 bnxt_abort_tpa(cpr, idx, agg_bufs);
c0c050c5
MC
1661 return NULL;
1662 }
b3dba77c 1663 skb_reserve(skb, bp->rx_offset);
c0c050c5
MC
1664 skb_put(skb, len);
1665 }
1666
1667 if (agg_bufs) {
4a228a3a 1668 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
c0c050c5
MC
1669 if (!skb) {
1670 /* Page reuse already handled by bnxt_rx_pages(). */
1671 return NULL;
1672 }
1673 }
ee5c7fb3
SP
1674
1675 skb->protocol =
1676 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
c0c050c5
MC
1677
1678 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1679 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1680
8852ddb4 1681 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
a196e96b 1682 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
96bdd4b9
MC
1683 __be16 vlan_proto = htons(tpa_info->metadata >>
1684 RX_CMP_FLAGS2_METADATA_TPID_SFT);
ed7bc602 1685 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
c0c050c5 1686
96bdd4b9
MC
1687 if (eth_type_vlan(vlan_proto)) {
1688 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1689 } else {
1690 dev_kfree_skb(skb);
1691 return NULL;
1692 }
c0c050c5
MC
1693 }
1694
1695 skb_checksum_none_assert(skb);
1696 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1697 skb->ip_summed = CHECKSUM_UNNECESSARY;
1698 skb->csum_level =
1699 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1700 }
1701
bfcd8d79 1702 if (gro)
309369c9 1703 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
c0c050c5
MC
1704
1705 return skb;
1706}
1707
8fe88ce7
MC
1708static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1709 struct rx_agg_cmp *rx_agg)
1710{
1711 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1712 struct bnxt_tpa_info *tpa_info;
1713
ec4d8e7c 1714 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
8fe88ce7
MC
1715 tpa_info = &rxr->rx_tpa[agg_id];
1716 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1717 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1718}
1719
ee5c7fb3
SP
1720static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1721 struct sk_buff *skb)
1722{
1723 if (skb->dev != bp->dev) {
1724 /* this packet belongs to a vf-rep */
1725 bnxt_vf_rep_rx(bp, skb);
1726 return;
1727 }
1728 skb_record_rx_queue(skb, bnapi->index);
1729 napi_gro_receive(&bnapi->napi, skb);
1730}
1731
c0c050c5
MC
1732/* returns the following:
1733 * 1 - 1 packet successfully received
1734 * 0 - successful TPA_START, packet not completed yet
1735 * -EBUSY - completion ring does not have all the agg buffers yet
1736 * -ENOMEM - packet aborted due to out of memory
1737 * -EIO - packet aborted due to hw error indicated in BD
1738 */
e44758b7
MC
1739static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1740 u32 *raw_cons, u8 *event)
c0c050c5 1741{
e44758b7 1742 struct bnxt_napi *bnapi = cpr->bnapi;
b6ab4b01 1743 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
1744 struct net_device *dev = bp->dev;
1745 struct rx_cmp *rxcmp;
1746 struct rx_cmp_ext *rxcmp1;
1747 u32 tmp_raw_cons = *raw_cons;
ee5c7fb3 1748 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
c0c050c5
MC
1749 struct bnxt_sw_rx_bd *rx_buf;
1750 unsigned int len;
6bb19474 1751 u8 *data_ptr, agg_bufs, cmp_type;
c0c050c5
MC
1752 dma_addr_t dma_addr;
1753 struct sk_buff *skb;
7f5515d1 1754 u32 flags, misc;
6bb19474 1755 void *data;
c0c050c5
MC
1756 int rc = 0;
1757
1758 rxcmp = (struct rx_cmp *)
1759 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1760
8fe88ce7
MC
1761 cmp_type = RX_CMP_TYPE(rxcmp);
1762
1763 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1764 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1765 goto next_rx_no_prod_no_len;
1766 }
1767
c0c050c5
MC
1768 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1769 cp_cons = RING_CMP(tmp_raw_cons);
1770 rxcmp1 = (struct rx_cmp_ext *)
1771 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1772
1773 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1774 return -EBUSY;
1775
c0c050c5
MC
1776 prod = rxr->rx_prod;
1777
1778 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1779 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1780 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1781
4e5dbbda 1782 *event |= BNXT_RX_EVENT;
e7e70fa6 1783 goto next_rx_no_prod_no_len;
c0c050c5
MC
1784
1785 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
e44758b7 1786 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
c0c050c5 1787 (struct rx_tpa_end_cmp *)rxcmp,
4e5dbbda 1788 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
c0c050c5 1789
1fac4b2f 1790 if (IS_ERR(skb))
c0c050c5
MC
1791 return -EBUSY;
1792
1793 rc = -ENOMEM;
1794 if (likely(skb)) {
ee5c7fb3 1795 bnxt_deliver_skb(bp, bnapi, skb);
c0c050c5
MC
1796 rc = 1;
1797 }
4e5dbbda 1798 *event |= BNXT_RX_EVENT;
e7e70fa6 1799 goto next_rx_no_prod_no_len;
c0c050c5
MC
1800 }
1801
1802 cons = rxcmp->rx_cmp_opaque;
fa7e2812 1803 if (unlikely(cons != rxr->rx_next_cons)) {
bbd6f0a9 1804 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
fa7e2812 1805
1b5c8b63
MC
1806 /* 0xffff is forced error, don't print it */
1807 if (rxr->rx_next_cons != 0xffff)
1808 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1809 cons, rxr->rx_next_cons);
fa7e2812 1810 bnxt_sched_reset(bp, rxr);
bbd6f0a9
MC
1811 if (rc1)
1812 return rc1;
1813 goto next_rx_no_prod_no_len;
fa7e2812 1814 }
a1b0e4e6
MC
1815 rx_buf = &rxr->rx_buf_ring[cons];
1816 data = rx_buf->data;
1817 data_ptr = rx_buf->data_ptr;
6bb19474 1818 prefetch(data_ptr);
c0c050c5 1819
c61fb99c
MC
1820 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1821 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
c0c050c5
MC
1822
1823 if (agg_bufs) {
1824 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1825 return -EBUSY;
1826
1827 cp_cons = NEXT_CMP(cp_cons);
4e5dbbda 1828 *event |= BNXT_AGG_EVENT;
c0c050c5 1829 }
4e5dbbda 1830 *event |= BNXT_RX_EVENT;
c0c050c5
MC
1831
1832 rx_buf->data = NULL;
1833 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
8e44e96c
MC
1834 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1835
c0c050c5
MC
1836 bnxt_reuse_rx_data(rxr, cons, data);
1837 if (agg_bufs)
4a228a3a
MC
1838 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1839 false);
c0c050c5
MC
1840
1841 rc = -EIO;
8e44e96c 1842 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
9d8b5f05 1843 bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
8d4bd96b
MC
1844 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1845 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
8fbf58e1
MC
1846 netdev_warn_once(bp->dev, "RX buffer error %x\n",
1847 rx_err);
19b3751f
MC
1848 bnxt_sched_reset(bp, rxr);
1849 }
8e44e96c 1850 }
0b397b17 1851 goto next_rx_no_len;
c0c050c5
MC
1852 }
1853
7f5515d1
PC
1854 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
1855 len = flags >> RX_CMP_LEN_SHIFT;
11cd119d 1856 dma_addr = rx_buf->mapping;
c0c050c5 1857
c6d30e83
MC
1858 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1859 rc = 1;
1860 goto next_rx;
1861 }
1862
c0c050c5 1863 if (len <= bp->rx_copy_thresh) {
6bb19474 1864 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
c0c050c5
MC
1865 bnxt_reuse_rx_data(rxr, cons, data);
1866 if (!skb) {
296d5b54 1867 if (agg_bufs)
4a228a3a
MC
1868 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1869 agg_bufs, false);
c0c050c5
MC
1870 rc = -ENOMEM;
1871 goto next_rx;
1872 }
1873 } else {
c61fb99c
MC
1874 u32 payload;
1875
c6d30e83
MC
1876 if (rx_buf->data_ptr == data_ptr)
1877 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1878 else
1879 payload = 0;
6bb19474 1880 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
c61fb99c 1881 payload | len);
c0c050c5
MC
1882 if (!skb) {
1883 rc = -ENOMEM;
1884 goto next_rx;
1885 }
1886 }
1887
1888 if (agg_bufs) {
4a228a3a 1889 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
c0c050c5
MC
1890 if (!skb) {
1891 rc = -ENOMEM;
1892 goto next_rx;
1893 }
1894 }
1895
1896 if (RX_CMP_HASH_VALID(rxcmp)) {
1897 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1898 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1899
1900 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1901 if (hash_type != 1 && hash_type != 3)
1902 type = PKT_HASH_TYPE_L3;
1903 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1904 }
1905
ee5c7fb3
SP
1906 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1907 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
c0c050c5 1908
8852ddb4
MC
1909 if ((rxcmp1->rx_cmp_flags2 &
1910 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
a196e96b 1911 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
c0c050c5 1912 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
ed7bc602 1913 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
96bdd4b9
MC
1914 __be16 vlan_proto = htons(meta_data >>
1915 RX_CMP_FLAGS2_METADATA_TPID_SFT);
c0c050c5 1916
96bdd4b9
MC
1917 if (eth_type_vlan(vlan_proto)) {
1918 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1919 } else {
1920 dev_kfree_skb(skb);
1921 goto next_rx;
1922 }
c0c050c5
MC
1923 }
1924
1925 skb_checksum_none_assert(skb);
1926 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1927 if (dev->features & NETIF_F_RXCSUM) {
1928 skb->ip_summed = CHECKSUM_UNNECESSARY;
1929 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1930 }
1931 } else {
665e350d
SB
1932 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1933 if (dev->features & NETIF_F_RXCSUM)
9d8b5f05 1934 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
665e350d 1935 }
c0c050c5
MC
1936 }
1937
7f5515d1
PC
1938 if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
1939 RX_CMP_FLAGS_ITYPE_PTP_W_TS)) {
1940 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1941 u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
1942 u64 ns, ts;
1943
1944 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
1945 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
1946
1947 spin_lock_bh(&ptp->ptp_lock);
1948 ns = timecounter_cyc2time(&ptp->tc, ts);
1949 spin_unlock_bh(&ptp->ptp_lock);
1950 memset(skb_hwtstamps(skb), 0,
1951 sizeof(*skb_hwtstamps(skb)));
1952 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
1953 }
1954 }
1955 }
ee5c7fb3 1956 bnxt_deliver_skb(bp, bnapi, skb);
c0c050c5
MC
1957 rc = 1;
1958
1959next_rx:
6a8788f2
AG
1960 cpr->rx_packets += 1;
1961 cpr->rx_bytes += len;
e7e70fa6 1962
0b397b17
MC
1963next_rx_no_len:
1964 rxr->rx_prod = NEXT_RX(prod);
1965 rxr->rx_next_cons = NEXT_RX(cons);
1966
e7e70fa6 1967next_rx_no_prod_no_len:
c0c050c5
MC
1968 *raw_cons = tmp_raw_cons;
1969
1970 return rc;
1971}
1972
2270bc5d
MC
1973/* In netpoll mode, if we are using a combined completion ring, we need to
1974 * discard the rx packets and recycle the buffers.
1975 */
e44758b7
MC
1976static int bnxt_force_rx_discard(struct bnxt *bp,
1977 struct bnxt_cp_ring_info *cpr,
2270bc5d
MC
1978 u32 *raw_cons, u8 *event)
1979{
2270bc5d
MC
1980 u32 tmp_raw_cons = *raw_cons;
1981 struct rx_cmp_ext *rxcmp1;
1982 struct rx_cmp *rxcmp;
1983 u16 cp_cons;
1984 u8 cmp_type;
1985
1986 cp_cons = RING_CMP(tmp_raw_cons);
1987 rxcmp = (struct rx_cmp *)
1988 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1989
1990 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1991 cp_cons = RING_CMP(tmp_raw_cons);
1992 rxcmp1 = (struct rx_cmp_ext *)
1993 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1994
1995 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1996 return -EBUSY;
1997
1998 cmp_type = RX_CMP_TYPE(rxcmp);
1999 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2000 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2001 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2002 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2003 struct rx_tpa_end_cmp_ext *tpa_end1;
2004
2005 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2006 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2007 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2008 }
e44758b7 2009 return bnxt_rx_pkt(bp, cpr, raw_cons, event);
2270bc5d
MC
2010}
2011
7e914027
MC
2012u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2013{
2014 struct bnxt_fw_health *fw_health = bp->fw_health;
2015 u32 reg = fw_health->regs[reg_idx];
2016 u32 reg_type, reg_off, val = 0;
2017
2018 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2019 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2020 switch (reg_type) {
2021 case BNXT_FW_HEALTH_REG_TYPE_CFG:
2022 pci_read_config_dword(bp->pdev, reg_off, &val);
2023 break;
2024 case BNXT_FW_HEALTH_REG_TYPE_GRC:
2025 reg_off = fw_health->mapped_regs[reg_idx];
df561f66 2026 fallthrough;
7e914027
MC
2027 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2028 val = readl(bp->bar0 + reg_off);
2029 break;
2030 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2031 val = readl(bp->bar1 + reg_off);
2032 break;
2033 }
2034 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2035 val &= fw_health->fw_reset_inprog_reg_mask;
2036 return val;
2037}
2038
8d4bd96b
MC
2039static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2040{
2041 int i;
2042
2043 for (i = 0; i < bp->rx_nr_rings; i++) {
2044 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2045 struct bnxt_ring_grp_info *grp_info;
2046
2047 grp_info = &bp->grp_info[grp_idx];
2048 if (grp_info->agg_fw_ring_id == ring_id)
2049 return grp_idx;
2050 }
2051 return INVALID_HW_RING_ID;
2052}
2053
4bb13abf 2054#define BNXT_GET_EVENT_PORT(data) \
87c374de
MC
2055 ((data) & \
2056 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
4bb13abf 2057
8d4bd96b
MC
2058#define BNXT_EVENT_RING_TYPE(data2) \
2059 ((data2) & \
2060 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2061
2062#define BNXT_EVENT_RING_TYPE_RX(data2) \
2063 (BNXT_EVENT_RING_TYPE(data2) == \
2064 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2065
c0c050c5
MC
2066static int bnxt_async_event_process(struct bnxt *bp,
2067 struct hwrm_async_event_cmpl *cmpl)
2068{
2069 u16 event_id = le16_to_cpu(cmpl->event_id);
03ab8ca1
MC
2070 u32 data1 = le32_to_cpu(cmpl->event_data1);
2071 u32 data2 = le32_to_cpu(cmpl->event_data2);
c0c050c5
MC
2072
2073 /* TODO CHIMP_FW: Define event id's for link change, error etc */
2074 switch (event_id) {
87c374de 2075 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
8cbde117
MC
2076 struct bnxt_link_info *link_info = &bp->link_info;
2077
2078 if (BNXT_VF(bp))
2079 goto async_event_process_exit;
a8168b6c
MC
2080
2081 /* print unsupported speed warning in forced speed mode only */
2082 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2083 (data1 & 0x20000)) {
8cbde117
MC
2084 u16 fw_speed = link_info->force_link_speed;
2085 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2086
a8168b6c
MC
2087 if (speed != SPEED_UNKNOWN)
2088 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2089 speed);
8cbde117 2090 }
286ef9d6 2091 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
8cbde117 2092 }
df561f66 2093 fallthrough;
b1613e78
MC
2094 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2095 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2096 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
df561f66 2097 fallthrough;
87c374de 2098 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
c0c050c5 2099 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
19241368 2100 break;
87c374de 2101 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
19241368 2102 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
c0c050c5 2103 break;
87c374de 2104 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
4bb13abf
MC
2105 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2106
2107 if (BNXT_VF(bp))
2108 break;
2109
2110 if (bp->pf.port_id != port_id)
2111 break;
2112
4bb13abf
MC
2113 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2114 break;
2115 }
87c374de 2116 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
fc0f1929
MC
2117 if (BNXT_PF(bp))
2118 goto async_event_process_exit;
2119 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2120 break;
5863b10a
MC
2121 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2122 char *fatal_str = "non-fatal";
2123
8280b38e
VV
2124 if (!bp->fw_health)
2125 goto async_event_process_exit;
2126
2151fe08
MC
2127 bp->fw_reset_timestamp = jiffies;
2128 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2129 if (!bp->fw_reset_min_dsecs)
2130 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2131 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2132 if (!bp->fw_reset_max_dsecs)
2133 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
acfb50e4 2134 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
5863b10a 2135 fatal_str = "fatal";
acfb50e4 2136 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
5863b10a 2137 }
871127e6
MC
2138 netif_warn(bp, hw, bp->dev,
2139 "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2140 fatal_str, data1, data2,
2141 bp->fw_reset_min_dsecs * 100,
2142 bp->fw_reset_max_dsecs * 100);
2151fe08
MC
2143 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2144 break;
5863b10a 2145 }
7e914027
MC
2146 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2147 struct bnxt_fw_health *fw_health = bp->fw_health;
7e914027
MC
2148
2149 if (!fw_health)
2150 goto async_event_process_exit;
2151
2152 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
2153 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
f4d95c3c
MC
2154 if (!fw_health->enabled) {
2155 netif_info(bp, drv, bp->dev,
2156 "Error recovery info: error recovery[0]\n");
7e914027 2157 break;
f4d95c3c 2158 }
7e914027
MC
2159 fw_health->tmr_multiplier =
2160 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2161 bp->current_interval * 10);
2162 fw_health->tmr_counter = fw_health->tmr_multiplier;
2163 fw_health->last_fw_heartbeat =
2164 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2165 fw_health->last_fw_reset_cnt =
2166 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
f4d95c3c
MC
2167 netif_info(bp, drv, bp->dev,
2168 "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
2169 fw_health->master, fw_health->last_fw_reset_cnt,
2170 bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
7e914027
MC
2171 goto async_event_process_exit;
2172 }
a44daa8f 2173 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
871127e6
MC
2174 netif_notice(bp, hw, bp->dev,
2175 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2176 data1, data2);
a44daa8f 2177 goto async_event_process_exit;
8d4bd96b 2178 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
8d4bd96b
MC
2179 struct bnxt_rx_ring_info *rxr;
2180 u16 grp_idx;
2181
2182 if (bp->flags & BNXT_FLAG_CHIP_P5)
2183 goto async_event_process_exit;
2184
2185 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2186 BNXT_EVENT_RING_TYPE(data2), data1);
2187 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2188 goto async_event_process_exit;
2189
2190 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2191 if (grp_idx == INVALID_HW_RING_ID) {
2192 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2193 data1);
2194 goto async_event_process_exit;
2195 }
2196 rxr = bp->bnapi[grp_idx]->rx_ring;
2197 bnxt_sched_reset(bp, rxr);
2198 goto async_event_process_exit;
2199 }
df97b34d
MC
2200 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2201 struct bnxt_fw_health *fw_health = bp->fw_health;
2202
2203 netif_notice(bp, hw, bp->dev,
2204 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2205 data1, data2);
2206 if (fw_health) {
2207 fw_health->echo_req_data1 = data1;
2208 fw_health->echo_req_data2 = data2;
2209 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2210 break;
2211 }
2212 goto async_event_process_exit;
2213 }
c0c050c5 2214 default:
19241368 2215 goto async_event_process_exit;
c0c050c5 2216 }
c213eae8 2217 bnxt_queue_sp_work(bp);
19241368 2218async_event_process_exit:
a588e458 2219 bnxt_ulp_async_events(bp, cmpl);
c0c050c5
MC
2220 return 0;
2221}
2222
2223static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2224{
2225 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2226 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2227 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2228 (struct hwrm_fwd_req_cmpl *)txcmp;
2229
2230 switch (cmpl_type) {
2231 case CMPL_BASE_TYPE_HWRM_DONE:
2232 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2233 if (seq_id == bp->hwrm_intr_seq_id)
fc718bb2 2234 bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
c0c050c5
MC
2235 else
2236 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
2237 break;
2238
2239 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2240 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2241
2242 if ((vf_id < bp->pf.first_vf_id) ||
2243 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2244 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2245 vf_id);
2246 return -EINVAL;
2247 }
2248
2249 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2250 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
c213eae8 2251 bnxt_queue_sp_work(bp);
c0c050c5
MC
2252 break;
2253
2254 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2255 bnxt_async_event_process(bp,
2256 (struct hwrm_async_event_cmpl *)txcmp);
cc9fd180 2257 break;
c0c050c5
MC
2258
2259 default:
2260 break;
2261 }
2262
2263 return 0;
2264}
2265
2266static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2267{
2268 struct bnxt_napi *bnapi = dev_instance;
2269 struct bnxt *bp = bnapi->bp;
2270 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2271 u32 cons = RING_CMP(cpr->cp_raw_cons);
2272
6a8788f2 2273 cpr->event_ctr++;
c0c050c5
MC
2274 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2275 napi_schedule(&bnapi->napi);
2276 return IRQ_HANDLED;
2277}
2278
2279static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2280{
2281 u32 raw_cons = cpr->cp_raw_cons;
2282 u16 cons = RING_CMP(raw_cons);
2283 struct tx_cmp *txcmp;
2284
2285 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2286
2287 return TX_CMP_VALID(txcmp, raw_cons);
2288}
2289
c0c050c5
MC
2290static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2291{
2292 struct bnxt_napi *bnapi = dev_instance;
2293 struct bnxt *bp = bnapi->bp;
2294 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2295 u32 cons = RING_CMP(cpr->cp_raw_cons);
2296 u32 int_status;
2297
2298 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2299
2300 if (!bnxt_has_work(bp, cpr)) {
11809490 2301 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
c0c050c5
MC
2302 /* return if erroneous interrupt */
2303 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2304 return IRQ_NONE;
2305 }
2306
2307 /* disable ring IRQ */
697197e5 2308 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
c0c050c5
MC
2309
2310 /* Return here if interrupt is shared and is disabled. */
2311 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2312 return IRQ_HANDLED;
2313
2314 napi_schedule(&bnapi->napi);
2315 return IRQ_HANDLED;
2316}
2317
3675b92f
MC
2318static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2319 int budget)
c0c050c5 2320{
e44758b7 2321 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5
MC
2322 u32 raw_cons = cpr->cp_raw_cons;
2323 u32 cons;
2324 int tx_pkts = 0;
2325 int rx_pkts = 0;
4e5dbbda 2326 u8 event = 0;
c0c050c5
MC
2327 struct tx_cmp *txcmp;
2328
0fcec985 2329 cpr->has_more_work = 0;
340ac85e 2330 cpr->had_work_done = 1;
c0c050c5
MC
2331 while (1) {
2332 int rc;
2333
2334 cons = RING_CMP(raw_cons);
2335 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2336
2337 if (!TX_CMP_VALID(txcmp, raw_cons))
2338 break;
2339
67a95e20
MC
2340 /* The valid test of the entry must be done first before
2341 * reading any further.
2342 */
b67daab0 2343 dma_rmb();
c0c050c5
MC
2344 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2345 tx_pkts++;
2346 /* return full budget so NAPI will complete. */
73f21c65 2347 if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
c0c050c5 2348 rx_pkts = budget;
73f21c65 2349 raw_cons = NEXT_RAW_CMP(raw_cons);
0fcec985
MC
2350 if (budget)
2351 cpr->has_more_work = 1;
73f21c65
MC
2352 break;
2353 }
c0c050c5 2354 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2270bc5d 2355 if (likely(budget))
e44758b7 2356 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2270bc5d 2357 else
e44758b7 2358 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2270bc5d 2359 &event);
c0c050c5
MC
2360 if (likely(rc >= 0))
2361 rx_pkts += rc;
903649e7
MC
2362 /* Increment rx_pkts when rc is -ENOMEM to count towards
2363 * the NAPI budget. Otherwise, we may potentially loop
2364 * here forever if we consistently cannot allocate
2365 * buffers.
2366 */
2edbdb31 2367 else if (rc == -ENOMEM && budget)
903649e7 2368 rx_pkts++;
c0c050c5
MC
2369 else if (rc == -EBUSY) /* partial completion */
2370 break;
c0c050c5
MC
2371 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2372 CMPL_BASE_TYPE_HWRM_DONE) ||
2373 (TX_CMP_TYPE(txcmp) ==
2374 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2375 (TX_CMP_TYPE(txcmp) ==
2376 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2377 bnxt_hwrm_handler(bp, txcmp);
2378 }
2379 raw_cons = NEXT_RAW_CMP(raw_cons);
2380
0fcec985
MC
2381 if (rx_pkts && rx_pkts == budget) {
2382 cpr->has_more_work = 1;
c0c050c5 2383 break;
0fcec985 2384 }
c0c050c5
MC
2385 }
2386
f18c2b77
AG
2387 if (event & BNXT_REDIRECT_EVENT)
2388 xdp_do_flush_map();
2389
38413406
MC
2390 if (event & BNXT_TX_EVENT) {
2391 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
38413406
MC
2392 u16 prod = txr->tx_prod;
2393
2394 /* Sync BD data before updating doorbell */
2395 wmb();
2396
697197e5 2397 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
38413406
MC
2398 }
2399
c0c050c5 2400 cpr->cp_raw_cons = raw_cons;
3675b92f
MC
2401 bnapi->tx_pkts += tx_pkts;
2402 bnapi->events |= event;
2403 return rx_pkts;
2404}
c0c050c5 2405
3675b92f
MC
2406static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2407{
2408 if (bnapi->tx_pkts) {
2409 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2410 bnapi->tx_pkts = 0;
2411 }
c0c050c5 2412
8fbf58e1 2413 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
b6ab4b01 2414 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5 2415
3675b92f 2416 if (bnapi->events & BNXT_AGG_EVENT)
697197e5 2417 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
e8f267b0 2418 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
c0c050c5 2419 }
3675b92f
MC
2420 bnapi->events = 0;
2421}
2422
2423static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2424 int budget)
2425{
2426 struct bnxt_napi *bnapi = cpr->bnapi;
2427 int rx_pkts;
2428
2429 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2430
2431 /* ACK completion ring before freeing tx ring and producing new
2432 * buffers in rx/agg rings to prevent overflowing the completion
2433 * ring.
2434 */
2435 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2436
2437 __bnxt_poll_work_done(bp, bnapi);
c0c050c5
MC
2438 return rx_pkts;
2439}
2440
10bbdaf5
PS
2441static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2442{
2443 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2444 struct bnxt *bp = bnapi->bp;
2445 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2446 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2447 struct tx_cmp *txcmp;
2448 struct rx_cmp_ext *rxcmp1;
2449 u32 cp_cons, tmp_raw_cons;
2450 u32 raw_cons = cpr->cp_raw_cons;
2451 u32 rx_pkts = 0;
4e5dbbda 2452 u8 event = 0;
10bbdaf5
PS
2453
2454 while (1) {
2455 int rc;
2456
2457 cp_cons = RING_CMP(raw_cons);
2458 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2459
2460 if (!TX_CMP_VALID(txcmp, raw_cons))
2461 break;
2462
2463 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2464 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2465 cp_cons = RING_CMP(tmp_raw_cons);
2466 rxcmp1 = (struct rx_cmp_ext *)
2467 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2468
2469 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2470 break;
2471
2472 /* force an error to recycle the buffer */
2473 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2474 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2475
e44758b7 2476 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2edbdb31 2477 if (likely(rc == -EIO) && budget)
10bbdaf5
PS
2478 rx_pkts++;
2479 else if (rc == -EBUSY) /* partial completion */
2480 break;
2481 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2482 CMPL_BASE_TYPE_HWRM_DONE)) {
2483 bnxt_hwrm_handler(bp, txcmp);
2484 } else {
2485 netdev_err(bp->dev,
2486 "Invalid completion received on special ring\n");
2487 }
2488 raw_cons = NEXT_RAW_CMP(raw_cons);
2489
2490 if (rx_pkts == budget)
2491 break;
2492 }
2493
2494 cpr->cp_raw_cons = raw_cons;
697197e5
MC
2495 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2496 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
10bbdaf5 2497
434c975a 2498 if (event & BNXT_AGG_EVENT)
697197e5 2499 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
10bbdaf5
PS
2500
2501 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
6ad20165 2502 napi_complete_done(napi, rx_pkts);
697197e5 2503 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
10bbdaf5
PS
2504 }
2505 return rx_pkts;
2506}
2507
c0c050c5
MC
2508static int bnxt_poll(struct napi_struct *napi, int budget)
2509{
2510 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2511 struct bnxt *bp = bnapi->bp;
2512 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2513 int work_done = 0;
2514
0da65f49
MC
2515 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2516 napi_complete(napi);
2517 return 0;
2518 }
c0c050c5 2519 while (1) {
e44758b7 2520 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
c0c050c5 2521
73f21c65
MC
2522 if (work_done >= budget) {
2523 if (!budget)
697197e5 2524 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
c0c050c5 2525 break;
73f21c65 2526 }
c0c050c5
MC
2527
2528 if (!bnxt_has_work(bp, cpr)) {
e7b95691 2529 if (napi_complete_done(napi, work_done))
697197e5 2530 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
c0c050c5
MC
2531 break;
2532 }
2533 }
6a8788f2 2534 if (bp->flags & BNXT_FLAG_DIM) {
f06d0ca4 2535 struct dim_sample dim_sample = {};
6a8788f2 2536
8960b389
TG
2537 dim_update_sample(cpr->event_ctr,
2538 cpr->rx_packets,
2539 cpr->rx_bytes,
2540 &dim_sample);
6a8788f2
AG
2541 net_dim(&cpr->dim, dim_sample);
2542 }
c0c050c5
MC
2543 return work_done;
2544}
2545
0fcec985
MC
2546static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2547{
2548 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2549 int i, work_done = 0;
2550
2551 for (i = 0; i < 2; i++) {
2552 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2553
2554 if (cpr2) {
2555 work_done += __bnxt_poll_work(bp, cpr2,
2556 budget - work_done);
2557 cpr->has_more_work |= cpr2->has_more_work;
2558 }
2559 }
2560 return work_done;
2561}
2562
2563static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
340ac85e 2564 u64 dbr_type)
0fcec985
MC
2565{
2566 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2567 int i;
2568
2569 for (i = 0; i < 2; i++) {
2570 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2571 struct bnxt_db_info *db;
2572
340ac85e 2573 if (cpr2 && cpr2->had_work_done) {
0fcec985
MC
2574 db = &cpr2->cp_db;
2575 writeq(db->db_key64 | dbr_type |
2576 RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2577 cpr2->had_work_done = 0;
2578 }
2579 }
2580 __bnxt_poll_work_done(bp, bnapi);
2581}
2582
2583static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2584{
2585 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2586 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2587 u32 raw_cons = cpr->cp_raw_cons;
2588 struct bnxt *bp = bnapi->bp;
2589 struct nqe_cn *nqcmp;
2590 int work_done = 0;
2591 u32 cons;
2592
0da65f49
MC
2593 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2594 napi_complete(napi);
2595 return 0;
2596 }
0fcec985
MC
2597 if (cpr->has_more_work) {
2598 cpr->has_more_work = 0;
2599 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
0fcec985
MC
2600 }
2601 while (1) {
2602 cons = RING_CMP(raw_cons);
2603 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2604
2605 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
54a9062f
MC
2606 if (cpr->has_more_work)
2607 break;
2608
340ac85e 2609 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
0fcec985
MC
2610 cpr->cp_raw_cons = raw_cons;
2611 if (napi_complete_done(napi, work_done))
2612 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2613 cpr->cp_raw_cons);
2614 return work_done;
2615 }
2616
2617 /* The valid test of the entry must be done first before
2618 * reading any further.
2619 */
2620 dma_rmb();
2621
2622 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2623 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2624 struct bnxt_cp_ring_info *cpr2;
2625
2626 cpr2 = cpr->cp_ring_arr[idx];
2627 work_done += __bnxt_poll_work(bp, cpr2,
2628 budget - work_done);
54a9062f 2629 cpr->has_more_work |= cpr2->has_more_work;
0fcec985
MC
2630 } else {
2631 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2632 }
2633 raw_cons = NEXT_RAW_CMP(raw_cons);
0fcec985 2634 }
340ac85e 2635 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
389a877a
MC
2636 if (raw_cons != cpr->cp_raw_cons) {
2637 cpr->cp_raw_cons = raw_cons;
2638 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2639 }
0fcec985
MC
2640 return work_done;
2641}
2642
c0c050c5
MC
2643static void bnxt_free_tx_skbs(struct bnxt *bp)
2644{
2645 int i, max_idx;
2646 struct pci_dev *pdev = bp->pdev;
2647
b6ab4b01 2648 if (!bp->tx_ring)
c0c050c5
MC
2649 return;
2650
2651 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2652 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2653 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
2654 int j;
2655
c0c050c5
MC
2656 for (j = 0; j < max_idx;) {
2657 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
f18c2b77 2658 struct sk_buff *skb;
c0c050c5
MC
2659 int k, last;
2660
f18c2b77
AG
2661 if (i < bp->tx_nr_rings_xdp &&
2662 tx_buf->action == XDP_REDIRECT) {
2663 dma_unmap_single(&pdev->dev,
2664 dma_unmap_addr(tx_buf, mapping),
2665 dma_unmap_len(tx_buf, len),
2666 PCI_DMA_TODEVICE);
2667 xdp_return_frame(tx_buf->xdpf);
2668 tx_buf->action = 0;
2669 tx_buf->xdpf = NULL;
2670 j++;
2671 continue;
2672 }
2673
2674 skb = tx_buf->skb;
c0c050c5
MC
2675 if (!skb) {
2676 j++;
2677 continue;
2678 }
2679
2680 tx_buf->skb = NULL;
2681
2682 if (tx_buf->is_push) {
2683 dev_kfree_skb(skb);
2684 j += 2;
2685 continue;
2686 }
2687
2688 dma_unmap_single(&pdev->dev,
2689 dma_unmap_addr(tx_buf, mapping),
2690 skb_headlen(skb),
2691 PCI_DMA_TODEVICE);
2692
2693 last = tx_buf->nr_frags;
2694 j += 2;
d612a579
MC
2695 for (k = 0; k < last; k++, j++) {
2696 int ring_idx = j & bp->tx_ring_mask;
c0c050c5
MC
2697 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2698
d612a579 2699 tx_buf = &txr->tx_buf_ring[ring_idx];
c0c050c5
MC
2700 dma_unmap_page(
2701 &pdev->dev,
2702 dma_unmap_addr(tx_buf, mapping),
2703 skb_frag_size(frag), PCI_DMA_TODEVICE);
2704 }
2705 dev_kfree_skb(skb);
2706 }
2707 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2708 }
2709}
2710
975bc99a 2711static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
c0c050c5 2712{
975bc99a 2713 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
c0c050c5 2714 struct pci_dev *pdev = bp->pdev;
975bc99a
MC
2715 struct bnxt_tpa_idx_map *map;
2716 int i, max_idx, max_agg_idx;
c0c050c5
MC
2717
2718 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2719 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
975bc99a
MC
2720 if (!rxr->rx_tpa)
2721 goto skip_rx_tpa_free;
c0c050c5 2722
975bc99a
MC
2723 for (i = 0; i < bp->max_tpa; i++) {
2724 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2725 u8 *data = tpa_info->data;
c0c050c5 2726
975bc99a
MC
2727 if (!data)
2728 continue;
c0c050c5 2729
975bc99a
MC
2730 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2731 bp->rx_buf_use_size, bp->rx_dir,
2732 DMA_ATTR_WEAK_ORDERING);
c0c050c5 2733
975bc99a 2734 tpa_info->data = NULL;
c0c050c5 2735
975bc99a
MC
2736 kfree(data);
2737 }
c0c050c5 2738
975bc99a
MC
2739skip_rx_tpa_free:
2740 for (i = 0; i < max_idx; i++) {
2741 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2742 dma_addr_t mapping = rx_buf->mapping;
2743 void *data = rx_buf->data;
c0c050c5 2744
975bc99a
MC
2745 if (!data)
2746 continue;
c0c050c5 2747
975bc99a
MC
2748 rx_buf->data = NULL;
2749 if (BNXT_RX_PAGE_MODE(bp)) {
2750 mapping -= bp->rx_dma_offset;
2751 dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
2752 bp->rx_dir,
2753 DMA_ATTR_WEAK_ORDERING);
2754 page_pool_recycle_direct(rxr->page_pool, data);
2755 } else {
2756 dma_unmap_single_attrs(&pdev->dev, mapping,
2757 bp->rx_buf_use_size, bp->rx_dir,
2758 DMA_ATTR_WEAK_ORDERING);
2759 kfree(data);
c0c050c5 2760 }
975bc99a
MC
2761 }
2762 for (i = 0; i < max_agg_idx; i++) {
2763 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2764 struct page *page = rx_agg_buf->page;
c0c050c5 2765
975bc99a
MC
2766 if (!page)
2767 continue;
c0c050c5 2768
975bc99a
MC
2769 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2770 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
2771 DMA_ATTR_WEAK_ORDERING);
c0c050c5 2772
975bc99a
MC
2773 rx_agg_buf->page = NULL;
2774 __clear_bit(i, rxr->rx_agg_bmap);
c0c050c5 2775
975bc99a
MC
2776 __free_page(page);
2777 }
2778 if (rxr->rx_page) {
2779 __free_page(rxr->rx_page);
2780 rxr->rx_page = NULL;
c0c050c5 2781 }
975bc99a
MC
2782 map = rxr->rx_tpa_idx_map;
2783 if (map)
2784 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2785}
2786
2787static void bnxt_free_rx_skbs(struct bnxt *bp)
2788{
2789 int i;
2790
2791 if (!bp->rx_ring)
2792 return;
2793
2794 for (i = 0; i < bp->rx_nr_rings; i++)
2795 bnxt_free_one_rx_ring_skbs(bp, i);
c0c050c5
MC
2796}
2797
2798static void bnxt_free_skbs(struct bnxt *bp)
2799{
2800 bnxt_free_tx_skbs(bp);
2801 bnxt_free_rx_skbs(bp);
2802}
2803
41435c39
MC
2804static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
2805{
2806 u8 init_val = mem_init->init_val;
2807 u16 offset = mem_init->offset;
2808 u8 *p2 = p;
2809 int i;
2810
2811 if (!init_val)
2812 return;
2813 if (offset == BNXT_MEM_INVALID_OFFSET) {
2814 memset(p, init_val, len);
2815 return;
2816 }
2817 for (i = 0; i < len; i += mem_init->size)
2818 *(p2 + i + offset) = init_val;
2819}
2820
6fe19886 2821static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
c0c050c5
MC
2822{
2823 struct pci_dev *pdev = bp->pdev;
2824 int i;
2825
6fe19886
MC
2826 for (i = 0; i < rmem->nr_pages; i++) {
2827 if (!rmem->pg_arr[i])
c0c050c5
MC
2828 continue;
2829
6fe19886
MC
2830 dma_free_coherent(&pdev->dev, rmem->page_size,
2831 rmem->pg_arr[i], rmem->dma_arr[i]);
c0c050c5 2832
6fe19886 2833 rmem->pg_arr[i] = NULL;
c0c050c5 2834 }
6fe19886 2835 if (rmem->pg_tbl) {
4f49b2b8
MC
2836 size_t pg_tbl_size = rmem->nr_pages * 8;
2837
2838 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2839 pg_tbl_size = rmem->page_size;
2840 dma_free_coherent(&pdev->dev, pg_tbl_size,
6fe19886
MC
2841 rmem->pg_tbl, rmem->pg_tbl_map);
2842 rmem->pg_tbl = NULL;
c0c050c5 2843 }
6fe19886
MC
2844 if (rmem->vmem_size && *rmem->vmem) {
2845 vfree(*rmem->vmem);
2846 *rmem->vmem = NULL;
c0c050c5
MC
2847 }
2848}
2849
6fe19886 2850static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
c0c050c5 2851{
c0c050c5 2852 struct pci_dev *pdev = bp->pdev;
66cca20a 2853 u64 valid_bit = 0;
6fe19886 2854 int i;
c0c050c5 2855
66cca20a
MC
2856 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2857 valid_bit = PTU_PTE_VALID;
4f49b2b8
MC
2858 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2859 size_t pg_tbl_size = rmem->nr_pages * 8;
2860
2861 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2862 pg_tbl_size = rmem->page_size;
2863 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
6fe19886 2864 &rmem->pg_tbl_map,
c0c050c5 2865 GFP_KERNEL);
6fe19886 2866 if (!rmem->pg_tbl)
c0c050c5
MC
2867 return -ENOMEM;
2868 }
2869
6fe19886 2870 for (i = 0; i < rmem->nr_pages; i++) {
66cca20a
MC
2871 u64 extra_bits = valid_bit;
2872
6fe19886
MC
2873 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2874 rmem->page_size,
2875 &rmem->dma_arr[i],
c0c050c5 2876 GFP_KERNEL);
6fe19886 2877 if (!rmem->pg_arr[i])
c0c050c5
MC
2878 return -ENOMEM;
2879
41435c39
MC
2880 if (rmem->mem_init)
2881 bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
2882 rmem->page_size);
4f49b2b8 2883 if (rmem->nr_pages > 1 || rmem->depth > 0) {
66cca20a
MC
2884 if (i == rmem->nr_pages - 2 &&
2885 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2886 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2887 else if (i == rmem->nr_pages - 1 &&
2888 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2889 extra_bits |= PTU_PTE_LAST;
2890 rmem->pg_tbl[i] =
2891 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2892 }
c0c050c5
MC
2893 }
2894
6fe19886
MC
2895 if (rmem->vmem_size) {
2896 *rmem->vmem = vzalloc(rmem->vmem_size);
2897 if (!(*rmem->vmem))
c0c050c5
MC
2898 return -ENOMEM;
2899 }
2900 return 0;
2901}
2902
4a228a3a
MC
2903static void bnxt_free_tpa_info(struct bnxt *bp)
2904{
2905 int i;
2906
2907 for (i = 0; i < bp->rx_nr_rings; i++) {
2908 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2909
ec4d8e7c
MC
2910 kfree(rxr->rx_tpa_idx_map);
2911 rxr->rx_tpa_idx_map = NULL;
79632e9b
MC
2912 if (rxr->rx_tpa) {
2913 kfree(rxr->rx_tpa[0].agg_arr);
2914 rxr->rx_tpa[0].agg_arr = NULL;
2915 }
4a228a3a
MC
2916 kfree(rxr->rx_tpa);
2917 rxr->rx_tpa = NULL;
2918 }
2919}
2920
2921static int bnxt_alloc_tpa_info(struct bnxt *bp)
2922{
79632e9b
MC
2923 int i, j, total_aggs = 0;
2924
2925 bp->max_tpa = MAX_TPA;
2926 if (bp->flags & BNXT_FLAG_CHIP_P5) {
2927 if (!bp->max_tpa_v2)
2928 return 0;
2929 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2930 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
2931 }
4a228a3a
MC
2932
2933 for (i = 0; i < bp->rx_nr_rings; i++) {
2934 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
79632e9b 2935 struct rx_agg_cmp *agg;
4a228a3a 2936
79632e9b 2937 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
4a228a3a
MC
2938 GFP_KERNEL);
2939 if (!rxr->rx_tpa)
2940 return -ENOMEM;
79632e9b
MC
2941
2942 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2943 continue;
2944 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
2945 rxr->rx_tpa[0].agg_arr = agg;
2946 if (!agg)
2947 return -ENOMEM;
2948 for (j = 1; j < bp->max_tpa; j++)
2949 rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
ec4d8e7c
MC
2950 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
2951 GFP_KERNEL);
2952 if (!rxr->rx_tpa_idx_map)
2953 return -ENOMEM;
4a228a3a
MC
2954 }
2955 return 0;
2956}
2957
c0c050c5
MC
2958static void bnxt_free_rx_rings(struct bnxt *bp)
2959{
2960 int i;
2961
b6ab4b01 2962 if (!bp->rx_ring)
c0c050c5
MC
2963 return;
2964
4a228a3a 2965 bnxt_free_tpa_info(bp);
c0c050c5 2966 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 2967 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
2968 struct bnxt_ring_struct *ring;
2969
c6d30e83
MC
2970 if (rxr->xdp_prog)
2971 bpf_prog_put(rxr->xdp_prog);
2972
96a8604f
JDB
2973 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2974 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2975
12479f62 2976 page_pool_destroy(rxr->page_pool);
322b87ca
AG
2977 rxr->page_pool = NULL;
2978
c0c050c5
MC
2979 kfree(rxr->rx_agg_bmap);
2980 rxr->rx_agg_bmap = NULL;
2981
2982 ring = &rxr->rx_ring_struct;
6fe19886 2983 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
2984
2985 ring = &rxr->rx_agg_ring_struct;
6fe19886 2986 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
2987 }
2988}
2989
322b87ca
AG
2990static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
2991 struct bnxt_rx_ring_info *rxr)
2992{
2993 struct page_pool_params pp = { 0 };
2994
2995 pp.pool_size = bp->rx_ring_size;
2996 pp.nid = dev_to_node(&bp->pdev->dev);
2997 pp.dev = &bp->pdev->dev;
2998 pp.dma_dir = DMA_BIDIRECTIONAL;
2999
3000 rxr->page_pool = page_pool_create(&pp);
3001 if (IS_ERR(rxr->page_pool)) {
3002 int err = PTR_ERR(rxr->page_pool);
3003
3004 rxr->page_pool = NULL;
3005 return err;
3006 }
3007 return 0;
3008}
3009
c0c050c5
MC
3010static int bnxt_alloc_rx_rings(struct bnxt *bp)
3011{
4a228a3a 3012 int i, rc = 0, agg_rings = 0;
c0c050c5 3013
b6ab4b01
MC
3014 if (!bp->rx_ring)
3015 return -ENOMEM;
3016
c0c050c5
MC
3017 if (bp->flags & BNXT_FLAG_AGG_RINGS)
3018 agg_rings = 1;
3019
c0c050c5 3020 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 3021 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
3022 struct bnxt_ring_struct *ring;
3023
c0c050c5
MC
3024 ring = &rxr->rx_ring_struct;
3025
322b87ca
AG
3026 rc = bnxt_alloc_rx_page_pool(bp, rxr);
3027 if (rc)
3028 return rc;
3029
b02e5a0e 3030 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
12479f62 3031 if (rc < 0)
96a8604f
JDB
3032 return rc;
3033
f18c2b77 3034 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
322b87ca
AG
3035 MEM_TYPE_PAGE_POOL,
3036 rxr->page_pool);
f18c2b77
AG
3037 if (rc) {
3038 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3039 return rc;
3040 }
3041
6fe19886 3042 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
3043 if (rc)
3044 return rc;
3045
2c61d211 3046 ring->grp_idx = i;
c0c050c5
MC
3047 if (agg_rings) {
3048 u16 mem_size;
3049
3050 ring = &rxr->rx_agg_ring_struct;
6fe19886 3051 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
3052 if (rc)
3053 return rc;
3054
9899bb59 3055 ring->grp_idx = i;
c0c050c5
MC
3056 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3057 mem_size = rxr->rx_agg_bmap_size / 8;
3058 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3059 if (!rxr->rx_agg_bmap)
3060 return -ENOMEM;
c0c050c5
MC
3061 }
3062 }
4a228a3a
MC
3063 if (bp->flags & BNXT_FLAG_TPA)
3064 rc = bnxt_alloc_tpa_info(bp);
3065 return rc;
c0c050c5
MC
3066}
3067
3068static void bnxt_free_tx_rings(struct bnxt *bp)
3069{
3070 int i;
3071 struct pci_dev *pdev = bp->pdev;
3072
b6ab4b01 3073 if (!bp->tx_ring)
c0c050c5
MC
3074 return;
3075
3076 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 3077 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
3078 struct bnxt_ring_struct *ring;
3079
c0c050c5
MC
3080 if (txr->tx_push) {
3081 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3082 txr->tx_push, txr->tx_push_mapping);
3083 txr->tx_push = NULL;
3084 }
3085
3086 ring = &txr->tx_ring_struct;
3087
6fe19886 3088 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
3089 }
3090}
3091
3092static int bnxt_alloc_tx_rings(struct bnxt *bp)
3093{
3094 int i, j, rc;
3095 struct pci_dev *pdev = bp->pdev;
3096
3097 bp->tx_push_size = 0;
3098 if (bp->tx_push_thresh) {
3099 int push_size;
3100
3101 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3102 bp->tx_push_thresh);
3103
4419dbe6 3104 if (push_size > 256) {
c0c050c5
MC
3105 push_size = 0;
3106 bp->tx_push_thresh = 0;
3107 }
3108
3109 bp->tx_push_size = push_size;
3110 }
3111
3112 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 3113 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5 3114 struct bnxt_ring_struct *ring;
2e8ef77e 3115 u8 qidx;
c0c050c5 3116
c0c050c5
MC
3117 ring = &txr->tx_ring_struct;
3118
6fe19886 3119 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
3120 if (rc)
3121 return rc;
3122
9899bb59 3123 ring->grp_idx = txr->bnapi->index;
c0c050c5 3124 if (bp->tx_push_size) {
c0c050c5
MC
3125 dma_addr_t mapping;
3126
3127 /* One pre-allocated DMA buffer to backup
3128 * TX push operation
3129 */
3130 txr->tx_push = dma_alloc_coherent(&pdev->dev,
3131 bp->tx_push_size,
3132 &txr->tx_push_mapping,
3133 GFP_KERNEL);
3134
3135 if (!txr->tx_push)
3136 return -ENOMEM;
3137
c0c050c5
MC
3138 mapping = txr->tx_push_mapping +
3139 sizeof(struct tx_push_bd);
4419dbe6 3140 txr->data_mapping = cpu_to_le64(mapping);
c0c050c5 3141 }
2e8ef77e
MC
3142 qidx = bp->tc_to_qidx[j];
3143 ring->queue_id = bp->q_info[qidx].queue_id;
5f449249
MC
3144 if (i < bp->tx_nr_rings_xdp)
3145 continue;
c0c050c5
MC
3146 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3147 j++;
3148 }
3149 return 0;
3150}
3151
3152static void bnxt_free_cp_rings(struct bnxt *bp)
3153{
3154 int i;
3155
3156 if (!bp->bnapi)
3157 return;
3158
3159 for (i = 0; i < bp->cp_nr_rings; i++) {
3160 struct bnxt_napi *bnapi = bp->bnapi[i];
3161 struct bnxt_cp_ring_info *cpr;
3162 struct bnxt_ring_struct *ring;
50e3ab78 3163 int j;
c0c050c5
MC
3164
3165 if (!bnapi)
3166 continue;
3167
3168 cpr = &bnapi->cp_ring;
3169 ring = &cpr->cp_ring_struct;
3170
6fe19886 3171 bnxt_free_ring(bp, &ring->ring_mem);
50e3ab78
MC
3172
3173 for (j = 0; j < 2; j++) {
3174 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3175
3176 if (cpr2) {
3177 ring = &cpr2->cp_ring_struct;
3178 bnxt_free_ring(bp, &ring->ring_mem);
3179 kfree(cpr2);
3180 cpr->cp_ring_arr[j] = NULL;
3181 }
3182 }
c0c050c5
MC
3183 }
3184}
3185
50e3ab78
MC
3186static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3187{
3188 struct bnxt_ring_mem_info *rmem;
3189 struct bnxt_ring_struct *ring;
3190 struct bnxt_cp_ring_info *cpr;
3191 int rc;
3192
3193 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3194 if (!cpr)
3195 return NULL;
3196
3197 ring = &cpr->cp_ring_struct;
3198 rmem = &ring->ring_mem;
3199 rmem->nr_pages = bp->cp_nr_pages;
3200 rmem->page_size = HW_CMPD_RING_SIZE;
3201 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3202 rmem->dma_arr = cpr->cp_desc_mapping;
3203 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3204 rc = bnxt_alloc_ring(bp, rmem);
3205 if (rc) {
3206 bnxt_free_ring(bp, rmem);
3207 kfree(cpr);
3208 cpr = NULL;
3209 }
3210 return cpr;
3211}
3212
c0c050c5
MC
3213static int bnxt_alloc_cp_rings(struct bnxt *bp)
3214{
50e3ab78 3215 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
e5811b8c 3216 int i, rc, ulp_base_vec, ulp_msix;
c0c050c5 3217
e5811b8c
MC
3218 ulp_msix = bnxt_get_ulp_msix_num(bp);
3219 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
c0c050c5
MC
3220 for (i = 0; i < bp->cp_nr_rings; i++) {
3221 struct bnxt_napi *bnapi = bp->bnapi[i];
3222 struct bnxt_cp_ring_info *cpr;
3223 struct bnxt_ring_struct *ring;
3224
3225 if (!bnapi)
3226 continue;
3227
3228 cpr = &bnapi->cp_ring;
50e3ab78 3229 cpr->bnapi = bnapi;
c0c050c5
MC
3230 ring = &cpr->cp_ring_struct;
3231
6fe19886 3232 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
3233 if (rc)
3234 return rc;
e5811b8c
MC
3235
3236 if (ulp_msix && i >= ulp_base_vec)
3237 ring->map_idx = i + ulp_msix;
3238 else
3239 ring->map_idx = i;
50e3ab78
MC
3240
3241 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3242 continue;
3243
3244 if (i < bp->rx_nr_rings) {
3245 struct bnxt_cp_ring_info *cpr2 =
3246 bnxt_alloc_cp_sub_ring(bp);
3247
3248 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3249 if (!cpr2)
3250 return -ENOMEM;
3251 cpr2->bnapi = bnapi;
3252 }
3253 if ((sh && i < bp->tx_nr_rings) ||
3254 (!sh && i >= bp->rx_nr_rings)) {
3255 struct bnxt_cp_ring_info *cpr2 =
3256 bnxt_alloc_cp_sub_ring(bp);
3257
3258 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3259 if (!cpr2)
3260 return -ENOMEM;
3261 cpr2->bnapi = bnapi;
3262 }
c0c050c5
MC
3263 }
3264 return 0;
3265}
3266
3267static void bnxt_init_ring_struct(struct bnxt *bp)
3268{
3269 int i;
3270
3271 for (i = 0; i < bp->cp_nr_rings; i++) {
3272 struct bnxt_napi *bnapi = bp->bnapi[i];
6fe19886 3273 struct bnxt_ring_mem_info *rmem;
c0c050c5
MC
3274 struct bnxt_cp_ring_info *cpr;
3275 struct bnxt_rx_ring_info *rxr;
3276 struct bnxt_tx_ring_info *txr;
3277 struct bnxt_ring_struct *ring;
3278
3279 if (!bnapi)
3280 continue;
3281
3282 cpr = &bnapi->cp_ring;
3283 ring = &cpr->cp_ring_struct;
6fe19886
MC
3284 rmem = &ring->ring_mem;
3285 rmem->nr_pages = bp->cp_nr_pages;
3286 rmem->page_size = HW_CMPD_RING_SIZE;
3287 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3288 rmem->dma_arr = cpr->cp_desc_mapping;
3289 rmem->vmem_size = 0;
c0c050c5 3290
b6ab4b01 3291 rxr = bnapi->rx_ring;
3b2b7d9d
MC
3292 if (!rxr)
3293 goto skip_rx;
3294
c0c050c5 3295 ring = &rxr->rx_ring_struct;
6fe19886
MC
3296 rmem = &ring->ring_mem;
3297 rmem->nr_pages = bp->rx_nr_pages;
3298 rmem->page_size = HW_RXBD_RING_SIZE;
3299 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3300 rmem->dma_arr = rxr->rx_desc_mapping;
3301 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3302 rmem->vmem = (void **)&rxr->rx_buf_ring;
c0c050c5
MC
3303
3304 ring = &rxr->rx_agg_ring_struct;
6fe19886
MC
3305 rmem = &ring->ring_mem;
3306 rmem->nr_pages = bp->rx_agg_nr_pages;
3307 rmem->page_size = HW_RXBD_RING_SIZE;
3308 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3309 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3310 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3311 rmem->vmem = (void **)&rxr->rx_agg_ring;
c0c050c5 3312
3b2b7d9d 3313skip_rx:
b6ab4b01 3314 txr = bnapi->tx_ring;
3b2b7d9d
MC
3315 if (!txr)
3316 continue;
3317
c0c050c5 3318 ring = &txr->tx_ring_struct;
6fe19886
MC
3319 rmem = &ring->ring_mem;
3320 rmem->nr_pages = bp->tx_nr_pages;
3321 rmem->page_size = HW_RXBD_RING_SIZE;
3322 rmem->pg_arr = (void **)txr->tx_desc_ring;
3323 rmem->dma_arr = txr->tx_desc_mapping;
3324 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3325 rmem->vmem = (void **)&txr->tx_buf_ring;
c0c050c5
MC
3326 }
3327}
3328
3329static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3330{
3331 int i;
3332 u32 prod;
3333 struct rx_bd **rx_buf_ring;
3334
6fe19886
MC
3335 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3336 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
c0c050c5
MC
3337 int j;
3338 struct rx_bd *rxbd;
3339
3340 rxbd = rx_buf_ring[i];
3341 if (!rxbd)
3342 continue;
3343
3344 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3345 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3346 rxbd->rx_bd_opaque = prod;
3347 }
3348 }
3349}
3350
7737d325 3351static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
c0c050c5 3352{
7737d325 3353 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
c0c050c5 3354 struct net_device *dev = bp->dev;
7737d325 3355 u32 prod;
c0c050c5
MC
3356 int i;
3357
c0c050c5
MC
3358 prod = rxr->rx_prod;
3359 for (i = 0; i < bp->rx_ring_size; i++) {
7737d325 3360 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
c0c050c5
MC
3361 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3362 ring_nr, i, bp->rx_ring_size);
3363 break;
3364 }
3365 prod = NEXT_RX(prod);
3366 }
3367 rxr->rx_prod = prod;
edd0c2cc 3368
c0c050c5
MC
3369 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3370 return 0;
3371
c0c050c5
MC
3372 prod = rxr->rx_agg_prod;
3373 for (i = 0; i < bp->rx_agg_ring_size; i++) {
7737d325 3374 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
c0c050c5
MC
3375 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3376 ring_nr, i, bp->rx_ring_size);
3377 break;
3378 }
3379 prod = NEXT_RX_AGG(prod);
3380 }
3381 rxr->rx_agg_prod = prod;
c0c050c5 3382
7737d325
MC
3383 if (rxr->rx_tpa) {
3384 dma_addr_t mapping;
3385 u8 *data;
c0c050c5 3386
7737d325
MC
3387 for (i = 0; i < bp->max_tpa; i++) {
3388 data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
3389 if (!data)
3390 return -ENOMEM;
c0c050c5 3391
7737d325
MC
3392 rxr->rx_tpa[i].data = data;
3393 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3394 rxr->rx_tpa[i].mapping = mapping;
c0c050c5
MC
3395 }
3396 }
c0c050c5
MC
3397 return 0;
3398}
3399
7737d325
MC
3400static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3401{
3402 struct bnxt_rx_ring_info *rxr;
3403 struct bnxt_ring_struct *ring;
3404 u32 type;
3405
3406 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3407 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3408
3409 if (NET_IP_ALIGN == 2)
3410 type |= RX_BD_FLAGS_SOP;
3411
3412 rxr = &bp->rx_ring[ring_nr];
3413 ring = &rxr->rx_ring_struct;
3414 bnxt_init_rxbd_pages(ring, type);
3415
3416 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3417 bpf_prog_add(bp->xdp_prog, 1);
3418 rxr->xdp_prog = bp->xdp_prog;
3419 }
3420 ring->fw_ring_id = INVALID_HW_RING_ID;
3421
3422 ring = &rxr->rx_agg_ring_struct;
3423 ring->fw_ring_id = INVALID_HW_RING_ID;
3424
3425 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3426 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3427 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3428
3429 bnxt_init_rxbd_pages(ring, type);
3430 }
3431
3432 return bnxt_alloc_one_rx_ring(bp, ring_nr);
3433}
3434
2247925f
SP
3435static void bnxt_init_cp_rings(struct bnxt *bp)
3436{
3e08b184 3437 int i, j;
2247925f
SP
3438
3439 for (i = 0; i < bp->cp_nr_rings; i++) {
3440 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3441 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3442
3443 ring->fw_ring_id = INVALID_HW_RING_ID;
6a8788f2
AG
3444 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3445 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3e08b184
MC
3446 for (j = 0; j < 2; j++) {
3447 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3448
3449 if (!cpr2)
3450 continue;
3451
3452 ring = &cpr2->cp_ring_struct;
3453 ring->fw_ring_id = INVALID_HW_RING_ID;
3454 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3455 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3456 }
2247925f
SP
3457 }
3458}
3459
c0c050c5
MC
3460static int bnxt_init_rx_rings(struct bnxt *bp)
3461{
3462 int i, rc = 0;
3463
c61fb99c 3464 if (BNXT_RX_PAGE_MODE(bp)) {
c6d30e83
MC
3465 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3466 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
c61fb99c
MC
3467 } else {
3468 bp->rx_offset = BNXT_RX_OFFSET;
3469 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3470 }
b3dba77c 3471
c0c050c5
MC
3472 for (i = 0; i < bp->rx_nr_rings; i++) {
3473 rc = bnxt_init_one_rx_ring(bp, i);
3474 if (rc)
3475 break;
3476 }
3477
3478 return rc;
3479}
3480
3481static int bnxt_init_tx_rings(struct bnxt *bp)
3482{
3483 u16 i;
3484
3485 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3486 MAX_SKB_FRAGS + 1);
3487
3488 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 3489 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
3490 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3491
3492 ring->fw_ring_id = INVALID_HW_RING_ID;
3493 }
3494
3495 return 0;
3496}
3497
3498static void bnxt_free_ring_grps(struct bnxt *bp)
3499{
3500 kfree(bp->grp_info);
3501 bp->grp_info = NULL;
3502}
3503
3504static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3505{
3506 int i;
3507
3508 if (irq_re_init) {
3509 bp->grp_info = kcalloc(bp->cp_nr_rings,
3510 sizeof(struct bnxt_ring_grp_info),
3511 GFP_KERNEL);
3512 if (!bp->grp_info)
3513 return -ENOMEM;
3514 }
3515 for (i = 0; i < bp->cp_nr_rings; i++) {
3516 if (irq_re_init)
3517 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3518 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3519 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3520 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3521 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3522 }
3523 return 0;
3524}
3525
3526static void bnxt_free_vnics(struct bnxt *bp)
3527{
3528 kfree(bp->vnic_info);
3529 bp->vnic_info = NULL;
3530 bp->nr_vnics = 0;
3531}
3532
3533static int bnxt_alloc_vnics(struct bnxt *bp)
3534{
3535 int num_vnics = 1;
3536
3537#ifdef CONFIG_RFS_ACCEL
9b3d15e6 3538 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
c0c050c5
MC
3539 num_vnics += bp->rx_nr_rings;
3540#endif
3541
dc52c6c7
PS
3542 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3543 num_vnics++;
3544
c0c050c5
MC
3545 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3546 GFP_KERNEL);
3547 if (!bp->vnic_info)
3548 return -ENOMEM;
3549
3550 bp->nr_vnics = num_vnics;
3551 return 0;
3552}
3553
3554static void bnxt_init_vnics(struct bnxt *bp)
3555{
3556 int i;
3557
3558 for (i = 0; i < bp->nr_vnics; i++) {
3559 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
44c6f72a 3560 int j;
c0c050c5
MC
3561
3562 vnic->fw_vnic_id = INVALID_HW_RING_ID;
44c6f72a
MC
3563 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3564 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3565
c0c050c5
MC
3566 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3567
3568 if (bp->vnic_info[i].rss_hash_key) {
3569 if (i == 0)
3570 prandom_bytes(vnic->rss_hash_key,
3571 HW_HASH_KEY_SIZE);
3572 else
3573 memcpy(vnic->rss_hash_key,
3574 bp->vnic_info[0].rss_hash_key,
3575 HW_HASH_KEY_SIZE);
3576 }
3577 }
3578}
3579
3580static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3581{
3582 int pages;
3583
3584 pages = ring_size / desc_per_pg;
3585
3586 if (!pages)
3587 return 1;
3588
3589 pages++;
3590
3591 while (pages & (pages - 1))
3592 pages++;
3593
3594 return pages;
3595}
3596
c6d30e83 3597void bnxt_set_tpa_flags(struct bnxt *bp)
c0c050c5
MC
3598{
3599 bp->flags &= ~BNXT_FLAG_TPA;
341138c3
MC
3600 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3601 return;
c0c050c5
MC
3602 if (bp->dev->features & NETIF_F_LRO)
3603 bp->flags |= BNXT_FLAG_LRO;
1054aee8 3604 else if (bp->dev->features & NETIF_F_GRO_HW)
c0c050c5
MC
3605 bp->flags |= BNXT_FLAG_GRO;
3606}
3607
3608/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3609 * be set on entry.
3610 */
3611void bnxt_set_ring_params(struct bnxt *bp)
3612{
27640ce6 3613 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
c0c050c5
MC
3614 u32 agg_factor = 0, agg_ring_size = 0;
3615
3616 /* 8 for CRC and VLAN */
3617 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3618
3619 rx_space = rx_size + NET_SKB_PAD +
3620 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3621
3622 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3623 ring_size = bp->rx_ring_size;
3624 bp->rx_agg_ring_size = 0;
3625 bp->rx_agg_nr_pages = 0;
3626
3627 if (bp->flags & BNXT_FLAG_TPA)
2839f28b 3628 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
c0c050c5
MC
3629
3630 bp->flags &= ~BNXT_FLAG_JUMBO;
bdbd1eb5 3631 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
c0c050c5
MC
3632 u32 jumbo_factor;
3633
3634 bp->flags |= BNXT_FLAG_JUMBO;
3635 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3636 if (jumbo_factor > agg_factor)
3637 agg_factor = jumbo_factor;
3638 }
3639 agg_ring_size = ring_size * agg_factor;
3640
3641 if (agg_ring_size) {
3642 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3643 RX_DESC_CNT);
3644 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3645 u32 tmp = agg_ring_size;
3646
3647 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3648 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3649 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3650 tmp, agg_ring_size);
3651 }
3652 bp->rx_agg_ring_size = agg_ring_size;
3653 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3654 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3655 rx_space = rx_size + NET_SKB_PAD +
3656 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3657 }
3658
3659 bp->rx_buf_use_size = rx_size;
3660 bp->rx_buf_size = rx_space;
3661
3662 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3663 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3664
3665 ring_size = bp->tx_ring_size;
3666 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3667 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3668
27640ce6
MC
3669 max_rx_cmpl = bp->rx_ring_size;
3670 /* MAX TPA needs to be added because TPA_START completions are
3671 * immediately recycled, so the TPA completions are not bound by
3672 * the RX ring size.
3673 */
3674 if (bp->flags & BNXT_FLAG_TPA)
3675 max_rx_cmpl += bp->max_tpa;
3676 /* RX and TPA completions are 32-byte, all others are 16-byte */
3677 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
c0c050c5
MC
3678 bp->cp_ring_size = ring_size;
3679
3680 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3681 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3682 bp->cp_nr_pages = MAX_CP_PAGES;
3683 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3684 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3685 ring_size, bp->cp_ring_size);
3686 }
3687 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3688 bp->cp_ring_mask = bp->cp_bit - 1;
3689}
3690
96a8604f
JDB
3691/* Changing allocation mode of RX rings.
3692 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3693 */
c61fb99c 3694int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
6bb19474 3695{
c61fb99c
MC
3696 if (page_mode) {
3697 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3698 return -EOPNOTSUPP;
7eb9bb3a
MC
3699 bp->dev->max_mtu =
3700 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
c61fb99c
MC
3701 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3702 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
c61fb99c
MC
3703 bp->rx_dir = DMA_BIDIRECTIONAL;
3704 bp->rx_skb_func = bnxt_rx_page_skb;
1054aee8
MC
3705 /* Disable LRO or GRO_HW */
3706 netdev_update_features(bp->dev);
c61fb99c 3707 } else {
7eb9bb3a 3708 bp->dev->max_mtu = bp->max_mtu;
c61fb99c
MC
3709 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3710 bp->rx_dir = DMA_FROM_DEVICE;
3711 bp->rx_skb_func = bnxt_rx_skb;
3712 }
6bb19474
MC
3713 return 0;
3714}
3715
c0c050c5
MC
3716static void bnxt_free_vnic_attributes(struct bnxt *bp)
3717{
3718 int i;
3719 struct bnxt_vnic_info *vnic;
3720 struct pci_dev *pdev = bp->pdev;
3721
3722 if (!bp->vnic_info)
3723 return;
3724
3725 for (i = 0; i < bp->nr_vnics; i++) {
3726 vnic = &bp->vnic_info[i];
3727
3728 kfree(vnic->fw_grp_ids);
3729 vnic->fw_grp_ids = NULL;
3730
3731 kfree(vnic->uc_list);
3732 vnic->uc_list = NULL;
3733
3734 if (vnic->mc_list) {
3735 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3736 vnic->mc_list, vnic->mc_list_mapping);
3737 vnic->mc_list = NULL;
3738 }
3739
3740 if (vnic->rss_table) {
34370d24 3741 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
c0c050c5
MC
3742 vnic->rss_table,
3743 vnic->rss_table_dma_addr);
3744 vnic->rss_table = NULL;
3745 }
3746
3747 vnic->rss_hash_key = NULL;
3748 vnic->flags = 0;
3749 }
3750}
3751
3752static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3753{
3754 int i, rc = 0, size;
3755 struct bnxt_vnic_info *vnic;
3756 struct pci_dev *pdev = bp->pdev;
3757 int max_rings;
3758
3759 for (i = 0; i < bp->nr_vnics; i++) {
3760 vnic = &bp->vnic_info[i];
3761
3762 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3763 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3764
3765 if (mem_size > 0) {
3766 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3767 if (!vnic->uc_list) {
3768 rc = -ENOMEM;
3769 goto out;
3770 }
3771 }
3772 }
3773
3774 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3775 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3776 vnic->mc_list =
3777 dma_alloc_coherent(&pdev->dev,
3778 vnic->mc_list_size,
3779 &vnic->mc_list_mapping,
3780 GFP_KERNEL);
3781 if (!vnic->mc_list) {
3782 rc = -ENOMEM;
3783 goto out;
3784 }
3785 }
3786
44c6f72a
MC
3787 if (bp->flags & BNXT_FLAG_CHIP_P5)
3788 goto vnic_skip_grps;
3789
c0c050c5
MC
3790 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3791 max_rings = bp->rx_nr_rings;
3792 else
3793 max_rings = 1;
3794
3795 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3796 if (!vnic->fw_grp_ids) {
3797 rc = -ENOMEM;
3798 goto out;
3799 }
44c6f72a 3800vnic_skip_grps:
ae10ae74
MC
3801 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3802 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3803 continue;
3804
c0c050c5 3805 /* Allocate rss table and hash key */
34370d24
MC
3806 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3807 if (bp->flags & BNXT_FLAG_CHIP_P5)
3808 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
3809
3810 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
3811 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
3812 vnic->rss_table_size,
c0c050c5
MC
3813 &vnic->rss_table_dma_addr,
3814 GFP_KERNEL);
3815 if (!vnic->rss_table) {
3816 rc = -ENOMEM;
3817 goto out;
3818 }
3819
c0c050c5
MC
3820 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3821 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3822 }
3823 return 0;
3824
3825out:
3826 return rc;
3827}
3828
3829static void bnxt_free_hwrm_resources(struct bnxt *bp)
3830{
3831 struct pci_dev *pdev = bp->pdev;
3832
a2bf74f4
VD
3833 if (bp->hwrm_cmd_resp_addr) {
3834 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3835 bp->hwrm_cmd_resp_dma_addr);
3836 bp->hwrm_cmd_resp_addr = NULL;
3837 }
760b6d33
VD
3838
3839 if (bp->hwrm_cmd_kong_resp_addr) {
3840 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3841 bp->hwrm_cmd_kong_resp_addr,
3842 bp->hwrm_cmd_kong_resp_dma_addr);
3843 bp->hwrm_cmd_kong_resp_addr = NULL;
3844 }
3845}
3846
3847static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3848{
3849 struct pci_dev *pdev = bp->pdev;
3850
ba642ab7
MC
3851 if (bp->hwrm_cmd_kong_resp_addr)
3852 return 0;
3853
760b6d33
VD
3854 bp->hwrm_cmd_kong_resp_addr =
3855 dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3856 &bp->hwrm_cmd_kong_resp_dma_addr,
3857 GFP_KERNEL);
3858 if (!bp->hwrm_cmd_kong_resp_addr)
3859 return -ENOMEM;
3860
3861 return 0;
c0c050c5
MC
3862}
3863
3864static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3865{
3866 struct pci_dev *pdev = bp->pdev;
3867
3868 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3869 &bp->hwrm_cmd_resp_dma_addr,
3870 GFP_KERNEL);
3871 if (!bp->hwrm_cmd_resp_addr)
3872 return -ENOMEM;
c0c050c5
MC
3873
3874 return 0;
3875}
3876
e605db80
DK
3877static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3878{
3879 if (bp->hwrm_short_cmd_req_addr) {
3880 struct pci_dev *pdev = bp->pdev;
3881
1dfddc41 3882 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
e605db80
DK
3883 bp->hwrm_short_cmd_req_addr,
3884 bp->hwrm_short_cmd_req_dma_addr);
3885 bp->hwrm_short_cmd_req_addr = NULL;
3886 }
3887}
3888
3889static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3890{
3891 struct pci_dev *pdev = bp->pdev;
3892
ba642ab7
MC
3893 if (bp->hwrm_short_cmd_req_addr)
3894 return 0;
3895
e605db80 3896 bp->hwrm_short_cmd_req_addr =
1dfddc41 3897 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
e605db80
DK
3898 &bp->hwrm_short_cmd_req_dma_addr,
3899 GFP_KERNEL);
3900 if (!bp->hwrm_short_cmd_req_addr)
3901 return -ENOMEM;
3902
3903 return 0;
3904}
3905
177a6cde 3906static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
c0c050c5 3907{
a37120b2
MC
3908 kfree(stats->hw_masks);
3909 stats->hw_masks = NULL;
3910 kfree(stats->sw_stats);
3911 stats->sw_stats = NULL;
177a6cde
MC
3912 if (stats->hw_stats) {
3913 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
3914 stats->hw_stats_map);
3915 stats->hw_stats = NULL;
3916 }
3917}
c0c050c5 3918
a37120b2
MC
3919static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
3920 bool alloc_masks)
177a6cde
MC
3921{
3922 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
3923 &stats->hw_stats_map, GFP_KERNEL);
3924 if (!stats->hw_stats)
3925 return -ENOMEM;
00db3cba 3926
a37120b2
MC
3927 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
3928 if (!stats->sw_stats)
3929 goto stats_mem_err;
3930
3931 if (alloc_masks) {
3932 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
3933 if (!stats->hw_masks)
3934 goto stats_mem_err;
3935 }
177a6cde 3936 return 0;
a37120b2
MC
3937
3938stats_mem_err:
3939 bnxt_free_stats_mem(bp, stats);
3940 return -ENOMEM;
177a6cde 3941}
00db3cba 3942
d752d053
MC
3943static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
3944{
3945 int i;
3946
3947 for (i = 0; i < count; i++)
3948 mask_arr[i] = mask;
3949}
3950
3951static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
3952{
3953 int i;
3954
3955 for (i = 0; i < count; i++)
3956 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
3957}
3958
3959static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
3960 struct bnxt_stats_mem *stats)
3961{
3962 struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
3963 struct hwrm_func_qstats_ext_input req = {0};
3964 __le64 *hw_masks;
3965 int rc;
3966
3967 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
3968 !(bp->flags & BNXT_FLAG_CHIP_P5))
3969 return -EOPNOTSUPP;
3970
3971 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1);
d2b42d01 3972 req.fid = cpu_to_le16(0xffff);
d752d053
MC
3973 req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
3974 mutex_lock(&bp->hwrm_cmd_lock);
3975 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3976 if (rc)
3977 goto qstat_exit;
3978
3979 hw_masks = &resp->rx_ucast_pkts;
3980 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
3981
3982qstat_exit:
3983 mutex_unlock(&bp->hwrm_cmd_lock);
3984 return rc;
3985}
3986
531d1d26
MC
3987static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
3988static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
3989
d752d053
MC
3990static void bnxt_init_stats(struct bnxt *bp)
3991{
3992 struct bnxt_napi *bnapi = bp->bnapi[0];
3993 struct bnxt_cp_ring_info *cpr;
3994 struct bnxt_stats_mem *stats;
531d1d26
MC
3995 __le64 *rx_stats, *tx_stats;
3996 int rc, rx_count, tx_count;
3997 u64 *rx_masks, *tx_masks;
d752d053 3998 u64 mask;
531d1d26 3999 u8 flags;
d752d053
MC
4000
4001 cpr = &bnapi->cp_ring;
4002 stats = &cpr->stats;
4003 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
4004 if (rc) {
4005 if (bp->flags & BNXT_FLAG_CHIP_P5)
4006 mask = (1ULL << 48) - 1;
4007 else
4008 mask = -1ULL;
4009 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
4010 }
531d1d26
MC
4011 if (bp->flags & BNXT_FLAG_PORT_STATS) {
4012 stats = &bp->port_stats;
4013 rx_stats = stats->hw_stats;
4014 rx_masks = stats->hw_masks;
4015 rx_count = sizeof(struct rx_port_stats) / 8;
4016 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4017 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4018 tx_count = sizeof(struct tx_port_stats) / 8;
4019
4020 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
4021 rc = bnxt_hwrm_port_qstats(bp, flags);
4022 if (rc) {
4023 mask = (1ULL << 40) - 1;
4024
4025 bnxt_fill_masks(rx_masks, mask, rx_count);
4026 bnxt_fill_masks(tx_masks, mask, tx_count);
4027 } else {
4028 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4029 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
4030 bnxt_hwrm_port_qstats(bp, 0);
4031 }
4032 }
4033 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4034 stats = &bp->rx_port_stats_ext;
4035 rx_stats = stats->hw_stats;
4036 rx_masks = stats->hw_masks;
4037 rx_count = sizeof(struct rx_port_stats_ext) / 8;
4038 stats = &bp->tx_port_stats_ext;
4039 tx_stats = stats->hw_stats;
4040 tx_masks = stats->hw_masks;
4041 tx_count = sizeof(struct tx_port_stats_ext) / 8;
4042
c07fa08f 4043 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
531d1d26
MC
4044 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4045 if (rc) {
4046 mask = (1ULL << 40) - 1;
4047
4048 bnxt_fill_masks(rx_masks, mask, rx_count);
4049 if (tx_stats)
4050 bnxt_fill_masks(tx_masks, mask, tx_count);
4051 } else {
4052 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4053 if (tx_stats)
4054 bnxt_copy_hw_masks(tx_masks, tx_stats,
4055 tx_count);
4056 bnxt_hwrm_port_qstats_ext(bp, 0);
4057 }
4058 }
d752d053
MC
4059}
4060
177a6cde
MC
4061static void bnxt_free_port_stats(struct bnxt *bp)
4062{
4063 bp->flags &= ~BNXT_FLAG_PORT_STATS;
4064 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
36e53349 4065
177a6cde
MC
4066 bnxt_free_stats_mem(bp, &bp->port_stats);
4067 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4068 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
fd3ab1c7
MC
4069}
4070
4071static void bnxt_free_ring_stats(struct bnxt *bp)
4072{
177a6cde 4073 int i;
3bdf56c4 4074
c0c050c5
MC
4075 if (!bp->bnapi)
4076 return;
4077
c0c050c5
MC
4078 for (i = 0; i < bp->cp_nr_rings; i++) {
4079 struct bnxt_napi *bnapi = bp->bnapi[i];
4080 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4081
177a6cde 4082 bnxt_free_stats_mem(bp, &cpr->stats);
c0c050c5
MC
4083 }
4084}
4085
4086static int bnxt_alloc_stats(struct bnxt *bp)
4087{
4088 u32 size, i;
177a6cde 4089 int rc;
c0c050c5 4090
4e748506 4091 size = bp->hw_ring_stats_size;
c0c050c5
MC
4092
4093 for (i = 0; i < bp->cp_nr_rings; i++) {
4094 struct bnxt_napi *bnapi = bp->bnapi[i];
4095 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4096
177a6cde 4097 cpr->stats.len = size;
a37120b2 4098 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
177a6cde
MC
4099 if (rc)
4100 return rc;
c0c050c5
MC
4101
4102 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4103 }
3bdf56c4 4104
a220eabc
VV
4105 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4106 return 0;
fd3ab1c7 4107
177a6cde 4108 if (bp->port_stats.hw_stats)
a220eabc 4109 goto alloc_ext_stats;
3bdf56c4 4110
177a6cde 4111 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
a37120b2 4112 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
177a6cde
MC
4113 if (rc)
4114 return rc;
3bdf56c4 4115
a220eabc 4116 bp->flags |= BNXT_FLAG_PORT_STATS;
00db3cba 4117
fd3ab1c7 4118alloc_ext_stats:
a220eabc
VV
4119 /* Display extended statistics only if FW supports it */
4120 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
6154532f 4121 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
00db3cba
VV
4122 return 0;
4123
177a6cde 4124 if (bp->rx_port_stats_ext.hw_stats)
a220eabc 4125 goto alloc_tx_ext_stats;
fd3ab1c7 4126
177a6cde 4127 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
a37120b2 4128 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
177a6cde
MC
4129 /* Extended stats are optional */
4130 if (rc)
a220eabc 4131 return 0;
00db3cba 4132
fd3ab1c7 4133alloc_tx_ext_stats:
177a6cde 4134 if (bp->tx_port_stats_ext.hw_stats)
dfe64de9 4135 return 0;
fd3ab1c7 4136
6154532f
VV
4137 if (bp->hwrm_spec_code >= 0x10902 ||
4138 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
177a6cde 4139 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
a37120b2 4140 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
177a6cde
MC
4141 /* Extended stats are optional */
4142 if (rc)
4143 return 0;
3bdf56c4 4144 }
a220eabc 4145 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
c0c050c5
MC
4146 return 0;
4147}
4148
4149static void bnxt_clear_ring_indices(struct bnxt *bp)
4150{
4151 int i;
4152
4153 if (!bp->bnapi)
4154 return;
4155
4156 for (i = 0; i < bp->cp_nr_rings; i++) {
4157 struct bnxt_napi *bnapi = bp->bnapi[i];
4158 struct bnxt_cp_ring_info *cpr;
4159 struct bnxt_rx_ring_info *rxr;
4160 struct bnxt_tx_ring_info *txr;
4161
4162 if (!bnapi)
4163 continue;
4164
4165 cpr = &bnapi->cp_ring;
4166 cpr->cp_raw_cons = 0;
4167
b6ab4b01 4168 txr = bnapi->tx_ring;
3b2b7d9d
MC
4169 if (txr) {
4170 txr->tx_prod = 0;
4171 txr->tx_cons = 0;
4172 }
c0c050c5 4173
b6ab4b01 4174 rxr = bnapi->rx_ring;
3b2b7d9d
MC
4175 if (rxr) {
4176 rxr->rx_prod = 0;
4177 rxr->rx_agg_prod = 0;
4178 rxr->rx_sw_agg_prod = 0;
376a5b86 4179 rxr->rx_next_cons = 0;
3b2b7d9d 4180 }
c0c050c5
MC
4181 }
4182}
4183
4184static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4185{
4186#ifdef CONFIG_RFS_ACCEL
4187 int i;
4188
4189 /* Under rtnl_lock and all our NAPIs have been disabled. It's
4190 * safe to delete the hash table.
4191 */
4192 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4193 struct hlist_head *head;
4194 struct hlist_node *tmp;
4195 struct bnxt_ntuple_filter *fltr;
4196
4197 head = &bp->ntp_fltr_hash_tbl[i];
4198 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4199 hlist_del(&fltr->hash);
4200 kfree(fltr);
4201 }
4202 }
4203 if (irq_reinit) {
4204 kfree(bp->ntp_fltr_bmap);
4205 bp->ntp_fltr_bmap = NULL;
4206 }
4207 bp->ntp_fltr_count = 0;
4208#endif
4209}
4210
4211static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4212{
4213#ifdef CONFIG_RFS_ACCEL
4214 int i, rc = 0;
4215
4216 if (!(bp->flags & BNXT_FLAG_RFS))
4217 return 0;
4218
4219 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4220 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4221
4222 bp->ntp_fltr_count = 0;
ac45bd93
DC
4223 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
4224 sizeof(long),
c0c050c5
MC
4225 GFP_KERNEL);
4226
4227 if (!bp->ntp_fltr_bmap)
4228 rc = -ENOMEM;
4229
4230 return rc;
4231#else
4232 return 0;
4233#endif
4234}
4235
4236static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4237{
4238 bnxt_free_vnic_attributes(bp);
4239 bnxt_free_tx_rings(bp);
4240 bnxt_free_rx_rings(bp);
4241 bnxt_free_cp_rings(bp);
4242 bnxt_free_ntp_fltrs(bp, irq_re_init);
4243 if (irq_re_init) {
fd3ab1c7 4244 bnxt_free_ring_stats(bp);
b0d28207 4245 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
eba93de6 4246 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
fea6b333 4247 bnxt_free_port_stats(bp);
c0c050c5
MC
4248 bnxt_free_ring_grps(bp);
4249 bnxt_free_vnics(bp);
a960dec9
MC
4250 kfree(bp->tx_ring_map);
4251 bp->tx_ring_map = NULL;
b6ab4b01
MC
4252 kfree(bp->tx_ring);
4253 bp->tx_ring = NULL;
4254 kfree(bp->rx_ring);
4255 bp->rx_ring = NULL;
c0c050c5
MC
4256 kfree(bp->bnapi);
4257 bp->bnapi = NULL;
4258 } else {
4259 bnxt_clear_ring_indices(bp);
4260 }
4261}
4262
4263static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4264{
01657bcd 4265 int i, j, rc, size, arr_size;
c0c050c5
MC
4266 void *bnapi;
4267
4268 if (irq_re_init) {
4269 /* Allocate bnapi mem pointer array and mem block for
4270 * all queues
4271 */
4272 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4273 bp->cp_nr_rings);
4274 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4275 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4276 if (!bnapi)
4277 return -ENOMEM;
4278
4279 bp->bnapi = bnapi;
4280 bnapi += arr_size;
4281 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4282 bp->bnapi[i] = bnapi;
4283 bp->bnapi[i]->index = i;
4284 bp->bnapi[i]->bp = bp;
e38287b7
MC
4285 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4286 struct bnxt_cp_ring_info *cpr =
4287 &bp->bnapi[i]->cp_ring;
4288
4289 cpr->cp_ring_struct.ring_mem.flags =
4290 BNXT_RMEM_RING_PTE_FLAG;
4291 }
c0c050c5
MC
4292 }
4293
b6ab4b01
MC
4294 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4295 sizeof(struct bnxt_rx_ring_info),
4296 GFP_KERNEL);
4297 if (!bp->rx_ring)
4298 return -ENOMEM;
4299
4300 for (i = 0; i < bp->rx_nr_rings; i++) {
e38287b7
MC
4301 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4302
4303 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4304 rxr->rx_ring_struct.ring_mem.flags =
4305 BNXT_RMEM_RING_PTE_FLAG;
4306 rxr->rx_agg_ring_struct.ring_mem.flags =
4307 BNXT_RMEM_RING_PTE_FLAG;
4308 }
4309 rxr->bnapi = bp->bnapi[i];
b6ab4b01
MC
4310 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4311 }
4312
4313 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4314 sizeof(struct bnxt_tx_ring_info),
4315 GFP_KERNEL);
4316 if (!bp->tx_ring)
4317 return -ENOMEM;
4318
a960dec9
MC
4319 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4320 GFP_KERNEL);
4321
4322 if (!bp->tx_ring_map)
4323 return -ENOMEM;
4324
01657bcd
MC
4325 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4326 j = 0;
4327 else
4328 j = bp->rx_nr_rings;
4329
4330 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
e38287b7
MC
4331 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4332
4333 if (bp->flags & BNXT_FLAG_CHIP_P5)
4334 txr->tx_ring_struct.ring_mem.flags =
4335 BNXT_RMEM_RING_PTE_FLAG;
4336 txr->bnapi = bp->bnapi[j];
4337 bp->bnapi[j]->tx_ring = txr;
5f449249 4338 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
38413406 4339 if (i >= bp->tx_nr_rings_xdp) {
e38287b7 4340 txr->txq_index = i - bp->tx_nr_rings_xdp;
38413406
MC
4341 bp->bnapi[j]->tx_int = bnxt_tx_int;
4342 } else {
fa3e93e8 4343 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
38413406
MC
4344 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4345 }
b6ab4b01
MC
4346 }
4347
c0c050c5
MC
4348 rc = bnxt_alloc_stats(bp);
4349 if (rc)
4350 goto alloc_mem_err;
d752d053 4351 bnxt_init_stats(bp);
c0c050c5
MC
4352
4353 rc = bnxt_alloc_ntp_fltrs(bp);
4354 if (rc)
4355 goto alloc_mem_err;
4356
4357 rc = bnxt_alloc_vnics(bp);
4358 if (rc)
4359 goto alloc_mem_err;
4360 }
4361
4362 bnxt_init_ring_struct(bp);
4363
4364 rc = bnxt_alloc_rx_rings(bp);
4365 if (rc)
4366 goto alloc_mem_err;
4367
4368 rc = bnxt_alloc_tx_rings(bp);
4369 if (rc)
4370 goto alloc_mem_err;
4371
4372 rc = bnxt_alloc_cp_rings(bp);
4373 if (rc)
4374 goto alloc_mem_err;
4375
4376 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4377 BNXT_VNIC_UCAST_FLAG;
4378 rc = bnxt_alloc_vnic_attributes(bp);
4379 if (rc)
4380 goto alloc_mem_err;
4381 return 0;
4382
4383alloc_mem_err:
4384 bnxt_free_mem(bp, true);
4385 return rc;
4386}
4387
9d8bc097
MC
4388static void bnxt_disable_int(struct bnxt *bp)
4389{
4390 int i;
4391
4392 if (!bp->bnapi)
4393 return;
4394
4395 for (i = 0; i < bp->cp_nr_rings; i++) {
4396 struct bnxt_napi *bnapi = bp->bnapi[i];
4397 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
daf1f1e7 4398 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
9d8bc097 4399
daf1f1e7 4400 if (ring->fw_ring_id != INVALID_HW_RING_ID)
697197e5 4401 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
9d8bc097
MC
4402 }
4403}
4404
e5811b8c
MC
4405static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4406{
4407 struct bnxt_napi *bnapi = bp->bnapi[n];
4408 struct bnxt_cp_ring_info *cpr;
4409
4410 cpr = &bnapi->cp_ring;
4411 return cpr->cp_ring_struct.map_idx;
4412}
4413
9d8bc097
MC
4414static void bnxt_disable_int_sync(struct bnxt *bp)
4415{
4416 int i;
4417
38290e37
MC
4418 if (!bp->irq_tbl)
4419 return;
4420
9d8bc097
MC
4421 atomic_inc(&bp->intr_sem);
4422
4423 bnxt_disable_int(bp);
e5811b8c
MC
4424 for (i = 0; i < bp->cp_nr_rings; i++) {
4425 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4426
4427 synchronize_irq(bp->irq_tbl[map_idx].vector);
4428 }
9d8bc097
MC
4429}
4430
4431static void bnxt_enable_int(struct bnxt *bp)
4432{
4433 int i;
4434
4435 atomic_set(&bp->intr_sem, 0);
4436 for (i = 0; i < bp->cp_nr_rings; i++) {
4437 struct bnxt_napi *bnapi = bp->bnapi[i];
4438 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4439
697197e5 4440 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
9d8bc097
MC
4441 }
4442}
4443
c0c050c5
MC
4444void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
4445 u16 cmpl_ring, u16 target_id)
4446{
a8643e16 4447 struct input *req = request;
c0c050c5 4448
a8643e16
MC
4449 req->req_type = cpu_to_le16(req_type);
4450 req->cmpl_ring = cpu_to_le16(cmpl_ring);
4451 req->target_id = cpu_to_le16(target_id);
760b6d33
VD
4452 if (bnxt_kong_hwrm_message(bp, req))
4453 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
4454 else
4455 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
c0c050c5
MC
4456}
4457
d4f1420d
MC
4458static int bnxt_hwrm_to_stderr(u32 hwrm_err)
4459{
4460 switch (hwrm_err) {
4461 case HWRM_ERR_CODE_SUCCESS:
4462 return 0;
cf223bfa
VV
4463 case HWRM_ERR_CODE_RESOURCE_LOCKED:
4464 return -EROFS;
d4f1420d
MC
4465 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
4466 return -EACCES;
4467 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
4468 return -ENOSPC;
4469 case HWRM_ERR_CODE_INVALID_PARAMS:
4470 case HWRM_ERR_CODE_INVALID_FLAGS:
4471 case HWRM_ERR_CODE_INVALID_ENABLES:
4472 case HWRM_ERR_CODE_UNSUPPORTED_TLV:
4473 case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
4474 return -EINVAL;
4475 case HWRM_ERR_CODE_NO_BUFFER:
4476 return -ENOMEM;
4477 case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
3a707bed 4478 case HWRM_ERR_CODE_BUSY:
d4f1420d
MC
4479 return -EAGAIN;
4480 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
4481 return -EOPNOTSUPP;
4482 default:
4483 return -EIO;
4484 }
4485}
4486
fbfbc485
MC
4487static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
4488 int timeout, bool silent)
c0c050c5 4489{
a11fa2be 4490 int i, intr_process, rc, tmo_count;
a8643e16 4491 struct input *req = msg;
c0c050c5 4492 u32 *data = msg;
845adfe4 4493 u8 *valid;
c0c050c5
MC
4494 u16 cp_ring_id, len = 0;
4495 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
e605db80 4496 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
ebd5818c 4497 struct hwrm_short_input short_input = {0};
2e9ee398
VD
4498 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
4499 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
760b6d33 4500 u16 dst = BNXT_HWRM_CHNL_CHIMP;
c0c050c5 4501
825741b0
VV
4502 if (BNXT_NO_FW_ACCESS(bp) &&
4503 le16_to_cpu(req->req_type) != HWRM_FUNC_RESET)
b4fff207
MC
4504 return -EBUSY;
4505
1dfddc41
MC
4506 if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4507 if (msg_len > bp->hwrm_max_ext_req_len ||
4508 !bp->hwrm_short_cmd_req_addr)
4509 return -EINVAL;
4510 }
4511
760b6d33
VD
4512 if (bnxt_hwrm_kong_chnl(bp, req)) {
4513 dst = BNXT_HWRM_CHNL_KONG;
4514 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
4515 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
4516 resp = bp->hwrm_cmd_kong_resp_addr;
760b6d33
VD
4517 }
4518
4519 memset(resp, 0, PAGE_SIZE);
4520 cp_ring_id = le16_to_cpu(req->cmpl_ring);
4521 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
4522
4523 req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
4524 /* currently supports only one outstanding message */
4525 if (intr_process)
4526 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
4527
1dfddc41
MC
4528 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
4529 msg_len > BNXT_HWRM_MAX_REQ_LEN) {
e605db80 4530 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
1dfddc41
MC
4531 u16 max_msg_len;
4532
4533 /* Set boundary for maximum extended request length for short
4534 * cmd format. If passed up from device use the max supported
4535 * internal req length.
4536 */
4537 max_msg_len = bp->hwrm_max_ext_req_len;
e605db80
DK
4538
4539 memcpy(short_cmd_req, req, msg_len);
1dfddc41
MC
4540 if (msg_len < max_msg_len)
4541 memset(short_cmd_req + msg_len, 0,
4542 max_msg_len - msg_len);
e605db80
DK
4543
4544 short_input.req_type = req->req_type;
4545 short_input.signature =
4546 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
4547 short_input.size = cpu_to_le16(msg_len);
4548 short_input.req_addr =
4549 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
4550
4551 data = (u32 *)&short_input;
4552 msg_len = sizeof(short_input);
4553
4554 /* Sync memory write before updating doorbell */
4555 wmb();
4556
4557 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
4558 }
4559
c0c050c5 4560 /* Write request msg to hwrm channel */
2e9ee398 4561 __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
c0c050c5 4562
e605db80 4563 for (i = msg_len; i < max_req_len; i += 4)
2e9ee398 4564 writel(0, bp->bar0 + bar_offset + i);
d79979a1 4565
c0c050c5 4566 /* Ring channel doorbell */
2e9ee398 4567 writel(1, bp->bar0 + doorbell_offset);
c0c050c5 4568
5bedb529 4569 if (!pci_is_enabled(bp->pdev))
a2f3835c 4570 return -ENODEV;
5bedb529 4571
ff4fe81d
MC
4572 if (!timeout)
4573 timeout = DFLT_HWRM_CMD_TIMEOUT;
881d8353
VV
4574 /* Limit timeout to an upper limit */
4575 timeout = min(timeout, HWRM_CMD_MAX_TIMEOUT);
9751e8e7
AG
4576 /* convert timeout to usec */
4577 timeout *= 1000;
ff4fe81d 4578
c0c050c5 4579 i = 0;
9751e8e7
AG
4580 /* Short timeout for the first few iterations:
4581 * number of loops = number of loops for short timeout +
4582 * number of loops for standard timeout.
4583 */
4584 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
4585 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
4586 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
89455017 4587
c0c050c5 4588 if (intr_process) {
fc718bb2
VD
4589 u16 seq_id = bp->hwrm_intr_seq_id;
4590
c0c050c5 4591 /* Wait until hwrm response cmpl interrupt is processed */
fc718bb2 4592 while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
a11fa2be 4593 i++ < tmo_count) {
642aebde
PC
4594 /* Abort the wait for completion if the FW health
4595 * check has failed.
4596 */
4597 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4598 return -EBUSY;
9751e8e7 4599 /* on first few passes, just barely sleep */
80a9641f 4600 if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
9751e8e7
AG
4601 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4602 HWRM_SHORT_MAX_TIMEOUT);
80a9641f
PC
4603 } else {
4604 if (HWRM_WAIT_MUST_ABORT(bp, req))
4605 break;
9751e8e7
AG
4606 usleep_range(HWRM_MIN_TIMEOUT,
4607 HWRM_MAX_TIMEOUT);
80a9641f 4608 }
c0c050c5
MC
4609 }
4610
fc718bb2 4611 if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
5bedb529
MC
4612 if (!silent)
4613 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
4614 le16_to_cpu(req->req_type));
a935cb7e 4615 return -EBUSY;
c0c050c5 4616 }
2a5a8800
EP
4617 len = le16_to_cpu(resp->resp_len);
4618 valid = ((u8 *)resp) + len - 1;
c0c050c5 4619 } else {
cc559c1a
MC
4620 int j;
4621
c0c050c5 4622 /* Check if response len is updated */
a11fa2be 4623 for (i = 0; i < tmo_count; i++) {
642aebde
PC
4624 /* Abort the wait for completion if the FW health
4625 * check has failed.
4626 */
4627 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4628 return -EBUSY;
2a5a8800 4629 len = le16_to_cpu(resp->resp_len);
c0c050c5
MC
4630 if (len)
4631 break;
9751e8e7 4632 /* on first few passes, just barely sleep */
80a9641f 4633 if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
9751e8e7
AG
4634 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4635 HWRM_SHORT_MAX_TIMEOUT);
80a9641f
PC
4636 } else {
4637 if (HWRM_WAIT_MUST_ABORT(bp, req))
4638 goto timeout_abort;
9751e8e7
AG
4639 usleep_range(HWRM_MIN_TIMEOUT,
4640 HWRM_MAX_TIMEOUT);
80a9641f 4641 }
c0c050c5
MC
4642 }
4643
a11fa2be 4644 if (i >= tmo_count) {
80a9641f 4645timeout_abort:
5bedb529
MC
4646 if (!silent)
4647 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
4648 HWRM_TOTAL_TIMEOUT(i),
4649 le16_to_cpu(req->req_type),
4650 le16_to_cpu(req->seq_id), len);
a935cb7e 4651 return -EBUSY;
c0c050c5
MC
4652 }
4653
845adfe4 4654 /* Last byte of resp contains valid bit */
2a5a8800 4655 valid = ((u8 *)resp) + len - 1;
cc559c1a 4656 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
845adfe4
MC
4657 /* make sure we read from updated DMA memory */
4658 dma_rmb();
4659 if (*valid)
c0c050c5 4660 break;
0000b81a 4661 usleep_range(1, 5);
c0c050c5
MC
4662 }
4663
cc559c1a 4664 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
5bedb529
MC
4665 if (!silent)
4666 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
4667 HWRM_TOTAL_TIMEOUT(i),
4668 le16_to_cpu(req->req_type),
4669 le16_to_cpu(req->seq_id), len,
4670 *valid);
a935cb7e 4671 return -EBUSY;
c0c050c5
MC
4672 }
4673 }
4674
845adfe4
MC
4675 /* Zero valid bit for compatibility. Valid bit in an older spec
4676 * may become a new field in a newer spec. We must make sure that
4677 * a new field not implemented by old spec will read zero.
4678 */
4679 *valid = 0;
c0c050c5 4680 rc = le16_to_cpu(resp->error_code);
fbfbc485 4681 if (rc && !silent)
c0c050c5
MC
4682 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
4683 le16_to_cpu(resp->req_type),
4684 le16_to_cpu(resp->seq_id), rc);
d4f1420d 4685 return bnxt_hwrm_to_stderr(rc);
fbfbc485
MC
4686}
4687
4688int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4689{
4690 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
c0c050c5
MC
4691}
4692
cc72f3b1
MC
4693int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4694 int timeout)
4695{
4696 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4697}
4698
c0c050c5
MC
4699int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4700{
4701 int rc;
4702
4703 mutex_lock(&bp->hwrm_cmd_lock);
4704 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
4705 mutex_unlock(&bp->hwrm_cmd_lock);
4706 return rc;
4707}
4708
90e20921
MC
4709int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4710 int timeout)
4711{
4712 int rc;
4713
4714 mutex_lock(&bp->hwrm_cmd_lock);
4715 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4716 mutex_unlock(&bp->hwrm_cmd_lock);
4717 return rc;
4718}
4719
2e882468
VV
4720int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4721 bool async_only)
c0c050c5 4722{
2e882468 4723 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
c0c050c5 4724 struct hwrm_func_drv_rgtr_input req = {0};
25be8623
MC
4725 DECLARE_BITMAP(async_events_bmap, 256);
4726 u32 *events = (u32 *)async_events_bmap;
acfb50e4 4727 u32 flags;
2e882468 4728 int rc, i;
a1653b13
MC
4729
4730 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4731
4732 req.enables =
4733 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
2e882468
VV
4734 FUNC_DRV_RGTR_REQ_ENABLES_VER |
4735 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
a1653b13 4736
11f15ed3 4737 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
8280b38e
VV
4738 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4739 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4740 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
acfb50e4 4741 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
e633a329
VV
4742 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4743 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
acfb50e4 4744 req.flags = cpu_to_le32(flags);
d4f52de0
MC
4745 req.ver_maj_8b = DRV_VER_MAJ;
4746 req.ver_min_8b = DRV_VER_MIN;
4747 req.ver_upd_8b = DRV_VER_UPD;
4748 req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4749 req.ver_min = cpu_to_le16(DRV_VER_MIN);
4750 req.ver_upd = cpu_to_le16(DRV_VER_UPD);
c0c050c5
MC
4751
4752 if (BNXT_PF(bp)) {
9b0436c3 4753 u32 data[8];
a1653b13 4754 int i;
c0c050c5 4755
9b0436c3
MC
4756 memset(data, 0, sizeof(data));
4757 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4758 u16 cmd = bnxt_vf_req_snif[i];
4759 unsigned int bit, idx;
4760
4761 idx = cmd / 32;
4762 bit = cmd % 32;
4763 data[idx] |= 1 << bit;
4764 }
c0c050c5 4765
de68f5de
MC
4766 for (i = 0; i < 8; i++)
4767 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4768
c0c050c5
MC
4769 req.enables |=
4770 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4771 }
4772
abd43a13
VD
4773 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4774 req.flags |= cpu_to_le32(
4775 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4776
2e882468
VV
4777 memset(async_events_bmap, 0, sizeof(async_events_bmap));
4778 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4779 u16 event_id = bnxt_async_events_arr[i];
4780
4781 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4782 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4783 continue;
4784 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4785 }
4786 if (bmap && bmap_size) {
4787 for (i = 0; i < bmap_size; i++) {
4788 if (test_bit(i, bmap))
4789 __set_bit(i, async_events_bmap);
4790 }
4791 }
4792 for (i = 0; i < 8; i++)
4793 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4794
4795 if (async_only)
4796 req.enables =
4797 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4798
25e1acd6
MC
4799 mutex_lock(&bp->hwrm_cmd_lock);
4800 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
bdb38602
VV
4801 if (!rc) {
4802 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4803 if (resp->flags &
4804 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4805 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4806 }
25e1acd6
MC
4807 mutex_unlock(&bp->hwrm_cmd_lock);
4808 return rc;
c0c050c5
MC
4809}
4810
be58a0da
JH
4811static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4812{
4813 struct hwrm_func_drv_unrgtr_input req = {0};
4814
bdb38602
VV
4815 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4816 return 0;
4817
be58a0da
JH
4818 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4819 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4820}
4821
c0c050c5
MC
4822static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4823{
4824 u32 rc = 0;
4825 struct hwrm_tunnel_dst_port_free_input req = {0};
4826
4827 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4828 req.tunnel_type = tunnel_type;
4829
4830 switch (tunnel_type) {
4831 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
442a35a5
JK
4832 req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
4833 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
c0c050c5
MC
4834 break;
4835 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
442a35a5
JK
4836 req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
4837 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
c0c050c5
MC
4838 break;
4839 default:
4840 break;
4841 }
4842
4843 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4844 if (rc)
4845 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4846 rc);
4847 return rc;
4848}
4849
4850static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4851 u8 tunnel_type)
4852{
4853 u32 rc = 0;
4854 struct hwrm_tunnel_dst_port_alloc_input req = {0};
4855 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4856
4857 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4858
4859 req.tunnel_type = tunnel_type;
4860 req.tunnel_dst_port_val = port;
4861
4862 mutex_lock(&bp->hwrm_cmd_lock);
4863 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4864 if (rc) {
4865 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4866 rc);
4867 goto err_out;
4868 }
4869
57aac71b
CJ
4870 switch (tunnel_type) {
4871 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
442a35a5
JK
4872 bp->vxlan_fw_dst_port_id =
4873 le16_to_cpu(resp->tunnel_dst_port_id);
57aac71b
CJ
4874 break;
4875 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
442a35a5 4876 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
57aac71b
CJ
4877 break;
4878 default:
4879 break;
4880 }
4881
c0c050c5
MC
4882err_out:
4883 mutex_unlock(&bp->hwrm_cmd_lock);
4884 return rc;
4885}
4886
4887static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4888{
4889 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4890 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4891
4892 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
c193554e 4893 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
c0c050c5
MC
4894
4895 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4896 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4897 req.mask = cpu_to_le32(vnic->rx_mask);
4898 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4899}
4900
4901#ifdef CONFIG_RFS_ACCEL
4902static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4903 struct bnxt_ntuple_filter *fltr)
4904{
4905 struct hwrm_cfa_ntuple_filter_free_input req = {0};
4906
4907 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4908 req.ntuple_filter_id = fltr->filter_id;
4909 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4910}
4911
4912#define BNXT_NTP_FLTR_FLAGS \
4913 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4914 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
4915 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
4916 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
4917 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
4918 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
4919 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
4920 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
4921 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
4922 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
4923 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
4924 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
4925 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
c193554e 4926 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
c0c050c5 4927
61aad724
MC
4928#define BNXT_NTP_TUNNEL_FLTR_FLAG \
4929 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4930
c0c050c5
MC
4931static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4932 struct bnxt_ntuple_filter *fltr)
4933{
c0c050c5 4934 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
5c209fc8 4935 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
c0c050c5 4936 struct flow_keys *keys = &fltr->fkeys;
ac33906c 4937 struct bnxt_vnic_info *vnic;
41136ab3 4938 u32 flags = 0;
5c209fc8 4939 int rc = 0;
c0c050c5
MC
4940
4941 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
a54c4d74 4942 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
c0c050c5 4943
41136ab3
MC
4944 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4945 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4946 req.dst_id = cpu_to_le16(fltr->rxq);
ac33906c
MC
4947 } else {
4948 vnic = &bp->vnic_info[fltr->rxq + 1];
41136ab3 4949 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
ac33906c 4950 }
41136ab3
MC
4951 req.flags = cpu_to_le32(flags);
4952 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
c0c050c5
MC
4953
4954 req.ethertype = htons(ETH_P_IP);
4955 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
c193554e 4956 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
c0c050c5
MC
4957 req.ip_protocol = keys->basic.ip_proto;
4958
dda0e746
MC
4959 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4960 int i;
4961
4962 req.ethertype = htons(ETH_P_IPV6);
4963 req.ip_addr_type =
4964 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4965 *(struct in6_addr *)&req.src_ipaddr[0] =
4966 keys->addrs.v6addrs.src;
4967 *(struct in6_addr *)&req.dst_ipaddr[0] =
4968 keys->addrs.v6addrs.dst;
4969 for (i = 0; i < 4; i++) {
4970 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4971 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4972 }
4973 } else {
4974 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4975 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4976 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4977 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4978 }
61aad724
MC
4979 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4980 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4981 req.tunnel_type =
4982 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4983 }
c0c050c5
MC
4984
4985 req.src_port = keys->ports.src;
4986 req.src_port_mask = cpu_to_be16(0xffff);
4987 req.dst_port = keys->ports.dst;
4988 req.dst_port_mask = cpu_to_be16(0xffff);
4989
c0c050c5
MC
4990 mutex_lock(&bp->hwrm_cmd_lock);
4991 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5c209fc8
VD
4992 if (!rc) {
4993 resp = bnxt_get_hwrm_resp_addr(bp, &req);
c0c050c5 4994 fltr->filter_id = resp->ntuple_filter_id;
5c209fc8 4995 }
c0c050c5
MC
4996 mutex_unlock(&bp->hwrm_cmd_lock);
4997 return rc;
4998}
4999#endif
5000
5001static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
5002 u8 *mac_addr)
5003{
5004 u32 rc = 0;
5005 struct hwrm_cfa_l2_filter_alloc_input req = {0};
5006 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5007
5008 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
dc52c6c7
PS
5009 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
5010 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
5011 req.flags |=
5012 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
c193554e 5013 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
c0c050c5
MC
5014 req.enables =
5015 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
c193554e 5016 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
c0c050c5
MC
5017 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
5018 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
5019 req.l2_addr_mask[0] = 0xff;
5020 req.l2_addr_mask[1] = 0xff;
5021 req.l2_addr_mask[2] = 0xff;
5022 req.l2_addr_mask[3] = 0xff;
5023 req.l2_addr_mask[4] = 0xff;
5024 req.l2_addr_mask[5] = 0xff;
5025
5026 mutex_lock(&bp->hwrm_cmd_lock);
5027 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5028 if (!rc)
5029 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
5030 resp->l2_filter_id;
5031 mutex_unlock(&bp->hwrm_cmd_lock);
5032 return rc;
5033}
5034
5035static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
5036{
5037 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
5038 int rc = 0;
5039
5040 /* Any associated ntuple filters will also be cleared by firmware. */
5041 mutex_lock(&bp->hwrm_cmd_lock);
5042 for (i = 0; i < num_of_vnics; i++) {
5043 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5044
5045 for (j = 0; j < vnic->uc_filter_count; j++) {
5046 struct hwrm_cfa_l2_filter_free_input req = {0};
5047
5048 bnxt_hwrm_cmd_hdr_init(bp, &req,
5049 HWRM_CFA_L2_FILTER_FREE, -1, -1);
5050
5051 req.l2_filter_id = vnic->fw_l2_filter_id[j];
5052
5053 rc = _hwrm_send_message(bp, &req, sizeof(req),
5054 HWRM_CMD_TIMEOUT);
5055 }
5056 vnic->uc_filter_count = 0;
5057 }
5058 mutex_unlock(&bp->hwrm_cmd_lock);
5059
5060 return rc;
5061}
5062
5063static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
5064{
5065 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
79632e9b 5066 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
c0c050c5
MC
5067 struct hwrm_vnic_tpa_cfg_input req = {0};
5068
3c4fe80b
MC
5069 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
5070 return 0;
5071
c0c050c5
MC
5072 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
5073
5074 if (tpa_flags) {
5075 u16 mss = bp->dev->mtu - 40;
5076 u32 nsegs, n, segs = 0, flags;
5077
5078 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
5079 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
5080 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
5081 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
5082 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
5083 if (tpa_flags & BNXT_FLAG_GRO)
5084 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
5085
5086 req.flags = cpu_to_le32(flags);
5087
5088 req.enables =
5089 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
c193554e
MC
5090 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
5091 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
c0c050c5
MC
5092
5093 /* Number of segs are log2 units, and first packet is not
5094 * included as part of this units.
5095 */
2839f28b
MC
5096 if (mss <= BNXT_RX_PAGE_SIZE) {
5097 n = BNXT_RX_PAGE_SIZE / mss;
c0c050c5
MC
5098 nsegs = (MAX_SKB_FRAGS - 1) * n;
5099 } else {
2839f28b
MC
5100 n = mss / BNXT_RX_PAGE_SIZE;
5101 if (mss & (BNXT_RX_PAGE_SIZE - 1))
c0c050c5
MC
5102 n++;
5103 nsegs = (MAX_SKB_FRAGS - n) / n;
5104 }
5105
79632e9b
MC
5106 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5107 segs = MAX_TPA_SEGS_P5;
5108 max_aggs = bp->max_tpa;
5109 } else {
5110 segs = ilog2(nsegs);
5111 }
c0c050c5 5112 req.max_agg_segs = cpu_to_le16(segs);
79632e9b 5113 req.max_aggs = cpu_to_le16(max_aggs);
c193554e
MC
5114
5115 req.min_agg_len = cpu_to_le32(512);
c0c050c5
MC
5116 }
5117 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5118
5119 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5120}
5121
2c61d211
MC
5122static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
5123{
5124 struct bnxt_ring_grp_info *grp_info;
5125
5126 grp_info = &bp->grp_info[ring->grp_idx];
5127 return grp_info->cp_fw_ring_id;
5128}
5129
5130static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
5131{
5132 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5133 struct bnxt_napi *bnapi = rxr->bnapi;
5134 struct bnxt_cp_ring_info *cpr;
5135
5136 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
5137 return cpr->cp_ring_struct.fw_ring_id;
5138 } else {
5139 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
5140 }
5141}
5142
5143static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
5144{
5145 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5146 struct bnxt_napi *bnapi = txr->bnapi;
5147 struct bnxt_cp_ring_info *cpr;
5148
5149 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
5150 return cpr->cp_ring_struct.fw_ring_id;
5151 } else {
5152 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5153 }
5154}
5155
1667cbf6
MC
5156static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
5157{
5158 int entries;
5159
5160 if (bp->flags & BNXT_FLAG_CHIP_P5)
5161 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
5162 else
5163 entries = HW_HASH_INDEX_SIZE;
5164
5165 bp->rss_indir_tbl_entries = entries;
5166 bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
5167 GFP_KERNEL);
5168 if (!bp->rss_indir_tbl)
5169 return -ENOMEM;
5170 return 0;
5171}
5172
5173static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5174{
5175 u16 max_rings, max_entries, pad, i;
5176
5177 if (!bp->rx_nr_rings)
5178 return;
5179
5180 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5181 max_rings = bp->rx_nr_rings - 1;
5182 else
5183 max_rings = bp->rx_nr_rings;
5184
5185 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5186
5187 for (i = 0; i < max_entries; i++)
5188 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5189
5190 pad = bp->rss_indir_tbl_entries - max_entries;
5191 if (pad)
5192 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5193}
5194
bd3191b5
MC
5195static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5196{
5197 u16 i, tbl_size, max_ring = 0;
5198
5199 if (!bp->rss_indir_tbl)
5200 return 0;
5201
5202 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5203 for (i = 0; i < tbl_size; i++)
5204 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5205 return max_ring;
5206}
5207
f9f6a3fb
MC
5208int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5209{
5210 if (bp->flags & BNXT_FLAG_CHIP_P5)
5211 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5212 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5213 return 2;
5214 return 1;
5215}
5216
f33a305d
MC
5217static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5218{
5219 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5220 u16 i, j;
5221
5222 /* Fill the RSS indirection table with ring group ids */
5223 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5224 if (!no_rss)
5225 j = bp->rss_indir_tbl[i];
5226 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5227 }
5228}
5229
5230static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5231 struct bnxt_vnic_info *vnic)
5232{
5233 __le16 *ring_tbl = vnic->rss_table;
5234 struct bnxt_rx_ring_info *rxr;
5235 u16 tbl_size, i;
5236
5237 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5238
5239 for (i = 0; i < tbl_size; i++) {
5240 u16 ring_id, j;
5241
5242 j = bp->rss_indir_tbl[i];
5243 rxr = &bp->rx_ring[j];
5244
5245 ring_id = rxr->rx_ring_struct.fw_ring_id;
5246 *ring_tbl++ = cpu_to_le16(ring_id);
5247 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5248 *ring_tbl++ = cpu_to_le16(ring_id);
5249 }
5250}
5251
5252static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5253{
5254 if (bp->flags & BNXT_FLAG_CHIP_P5)
5255 __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5256 else
5257 __bnxt_fill_hw_rss_tbl(bp, vnic);
5258}
5259
c0c050c5
MC
5260static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5261{
c0c050c5
MC
5262 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5263 struct hwrm_vnic_rss_cfg_input req = {0};
5264
7b3af4f7
MC
5265 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5266 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
c0c050c5
MC
5267 return 0;
5268
5269 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5270 if (set_rss) {
f33a305d 5271 bnxt_fill_hw_rss_tbl(bp, vnic);
87da7f79 5272 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
50f011b6 5273 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
c0c050c5
MC
5274 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5275 req.hash_key_tbl_addr =
5276 cpu_to_le64(vnic->rss_hash_key_dma_addr);
5277 }
94ce9caa 5278 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
c0c050c5
MC
5279 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5280}
5281
7b3af4f7
MC
5282static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5283{
5284 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
7b3af4f7 5285 struct hwrm_vnic_rss_cfg_input req = {0};
f33a305d
MC
5286 dma_addr_t ring_tbl_map;
5287 u32 i, nr_ctxs;
7b3af4f7
MC
5288
5289 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5290 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5291 if (!set_rss) {
5292 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5293 return 0;
5294 }
f33a305d 5295 bnxt_fill_hw_rss_tbl(bp, vnic);
7b3af4f7
MC
5296 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
5297 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
7b3af4f7 5298 req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
f33a305d 5299 ring_tbl_map = vnic->rss_table_dma_addr;
f9f6a3fb 5300 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
f33a305d 5301 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
7b3af4f7
MC
5302 int rc;
5303
f33a305d 5304 req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
7b3af4f7
MC
5305 req.ring_table_pair_index = i;
5306 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
7b3af4f7
MC
5307 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5308 if (rc)
d4f1420d 5309 return rc;
7b3af4f7
MC
5310 }
5311 return 0;
5312}
5313
c0c050c5
MC
5314static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5315{
5316 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5317 struct hwrm_vnic_plcmodes_cfg_input req = {0};
5318
5319 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
5320 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
5321 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5322 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5323 req.enables =
5324 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
5325 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
5326 /* thresholds not implemented in firmware yet */
5327 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5328 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5329 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5330 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5331}
5332
94ce9caa
PS
5333static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5334 u16 ctx_idx)
c0c050c5
MC
5335{
5336 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
5337
5338 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
5339 req.rss_cos_lb_ctx_id =
94ce9caa 5340 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
c0c050c5
MC
5341
5342 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
94ce9caa 5343 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
c0c050c5
MC
5344}
5345
5346static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5347{
94ce9caa 5348 int i, j;
c0c050c5
MC
5349
5350 for (i = 0; i < bp->nr_vnics; i++) {
5351 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5352
94ce9caa
PS
5353 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5354 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5355 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5356 }
c0c050c5
MC
5357 }
5358 bp->rsscos_nr_ctxs = 0;
5359}
5360
94ce9caa 5361static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
c0c050c5
MC
5362{
5363 int rc;
5364 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
5365 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
5366 bp->hwrm_cmd_resp_addr;
5367
5368 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
5369 -1);
5370
5371 mutex_lock(&bp->hwrm_cmd_lock);
5372 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5373 if (!rc)
94ce9caa 5374 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
c0c050c5
MC
5375 le16_to_cpu(resp->rss_cos_lb_ctx_id);
5376 mutex_unlock(&bp->hwrm_cmd_lock);
5377
5378 return rc;
5379}
5380
abe93ad2
MC
5381static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5382{
5383 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5384 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5385 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5386}
5387
a588e458 5388int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
c0c050c5 5389{
b81a90d3 5390 unsigned int ring = 0, grp_idx;
c0c050c5
MC
5391 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5392 struct hwrm_vnic_cfg_input req = {0};
cf6645f8 5393 u16 def_vlan = 0;
c0c050c5
MC
5394
5395 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
dc52c6c7 5396
7b3af4f7
MC
5397 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5398 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5399
5400 req.default_rx_ring_id =
5401 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5402 req.default_cmpl_ring_id =
5403 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5404 req.enables =
5405 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5406 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5407 goto vnic_mru;
5408 }
dc52c6c7 5409 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
c0c050c5 5410 /* Only RSS support for now TBD: COS & LB */
dc52c6c7
PS
5411 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5412 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5413 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5414 VNIC_CFG_REQ_ENABLES_MRU);
ae10ae74
MC
5415 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5416 req.rss_rule =
5417 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5418 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5419 VNIC_CFG_REQ_ENABLES_MRU);
5420 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
dc52c6c7
PS
5421 } else {
5422 req.rss_rule = cpu_to_le16(0xffff);
5423 }
94ce9caa 5424
dc52c6c7
PS
5425 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5426 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
94ce9caa
PS
5427 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5428 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5429 } else {
5430 req.cos_rule = cpu_to_le16(0xffff);
5431 }
5432
c0c050c5 5433 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
b81a90d3 5434 ring = 0;
c0c050c5 5435 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
b81a90d3 5436 ring = vnic_id - 1;
76595193
PS
5437 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5438 ring = bp->rx_nr_rings - 1;
c0c050c5 5439
b81a90d3 5440 grp_idx = bp->rx_ring[ring].bnapi->index;
c0c050c5 5441 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
c0c050c5 5442 req.lb_rule = cpu_to_le16(0xffff);
7b3af4f7 5443vnic_mru:
d0b82c54 5444 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
c0c050c5 5445
7b3af4f7 5446 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
cf6645f8
MC
5447#ifdef CONFIG_BNXT_SRIOV
5448 if (BNXT_VF(bp))
5449 def_vlan = bp->vf.vlan;
5450#endif
5451 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
c0c050c5 5452 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
a588e458 5453 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
abe93ad2 5454 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
c0c050c5
MC
5455
5456 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5457}
5458
3d061591 5459static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
c0c050c5 5460{
c0c050c5
MC
5461 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5462 struct hwrm_vnic_free_input req = {0};
5463
5464 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
5465 req.vnic_id =
5466 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5467
3d061591 5468 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
c0c050c5
MC
5469 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5470 }
c0c050c5
MC
5471}
5472
5473static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5474{
5475 u16 i;
5476
5477 for (i = 0; i < bp->nr_vnics; i++)
5478 bnxt_hwrm_vnic_free_one(bp, i);
5479}
5480
b81a90d3
MC
5481static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5482 unsigned int start_rx_ring_idx,
5483 unsigned int nr_rings)
c0c050c5 5484{
b81a90d3
MC
5485 int rc = 0;
5486 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
c0c050c5
MC
5487 struct hwrm_vnic_alloc_input req = {0};
5488 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
44c6f72a
MC
5489 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5490
5491 if (bp->flags & BNXT_FLAG_CHIP_P5)
5492 goto vnic_no_ring_grps;
c0c050c5
MC
5493
5494 /* map ring groups to this vnic */
b81a90d3
MC
5495 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5496 grp_idx = bp->rx_ring[i].bnapi->index;
5497 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
c0c050c5 5498 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
b81a90d3 5499 j, nr_rings);
c0c050c5
MC
5500 break;
5501 }
44c6f72a 5502 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
c0c050c5
MC
5503 }
5504
44c6f72a
MC
5505vnic_no_ring_grps:
5506 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5507 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
c0c050c5
MC
5508 if (vnic_id == 0)
5509 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5510
5511 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
5512
5513 mutex_lock(&bp->hwrm_cmd_lock);
5514 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5515 if (!rc)
44c6f72a 5516 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
c0c050c5
MC
5517 mutex_unlock(&bp->hwrm_cmd_lock);
5518 return rc;
5519}
5520
8fdefd63
MC
5521static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5522{
5523 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5524 struct hwrm_vnic_qcaps_input req = {0};
5525 int rc;
5526
fbbdbc64 5527 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
ba642ab7 5528 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
8fdefd63
MC
5529 if (bp->hwrm_spec_code < 0x10600)
5530 return 0;
5531
5532 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
5533 mutex_lock(&bp->hwrm_cmd_lock);
5534 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5535 if (!rc) {
abe93ad2
MC
5536 u32 flags = le32_to_cpu(resp->flags);
5537
41e8d798
MC
5538 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5539 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
8fdefd63 5540 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
abe93ad2
MC
5541 if (flags &
5542 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5543 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
1da63ddd
EP
5544
5545 /* Older P5 fw before EXT_HW_STATS support did not set
5546 * VLAN_STRIP_CAP properly.
5547 */
5548 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
9d6b648c 5549 (BNXT_CHIP_P5_THOR(bp) &&
1da63ddd
EP
5550 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5551 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
79632e9b 5552 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
9d6b648c
MC
5553 if (bp->max_tpa_v2) {
5554 if (BNXT_CHIP_P5_THOR(bp))
5555 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5556 else
5557 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5558 }
8fdefd63
MC
5559 }
5560 mutex_unlock(&bp->hwrm_cmd_lock);
5561 return rc;
5562}
5563
c0c050c5
MC
5564static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5565{
5566 u16 i;
5567 u32 rc = 0;
5568
44c6f72a
MC
5569 if (bp->flags & BNXT_FLAG_CHIP_P5)
5570 return 0;
5571
c0c050c5
MC
5572 mutex_lock(&bp->hwrm_cmd_lock);
5573 for (i = 0; i < bp->rx_nr_rings; i++) {
5574 struct hwrm_ring_grp_alloc_input req = {0};
5575 struct hwrm_ring_grp_alloc_output *resp =
5576 bp->hwrm_cmd_resp_addr;
b81a90d3 5577 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
c0c050c5
MC
5578
5579 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
5580
b81a90d3
MC
5581 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5582 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5583 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5584 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
c0c050c5
MC
5585
5586 rc = _hwrm_send_message(bp, &req, sizeof(req),
5587 HWRM_CMD_TIMEOUT);
5588 if (rc)
5589 break;
5590
b81a90d3
MC
5591 bp->grp_info[grp_idx].fw_grp_id =
5592 le32_to_cpu(resp->ring_group_id);
c0c050c5
MC
5593 }
5594 mutex_unlock(&bp->hwrm_cmd_lock);
5595 return rc;
5596}
5597
3d061591 5598static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
c0c050c5
MC
5599{
5600 u16 i;
c0c050c5
MC
5601 struct hwrm_ring_grp_free_input req = {0};
5602
44c6f72a 5603 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
3d061591 5604 return;
c0c050c5
MC
5605
5606 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
5607
5608 mutex_lock(&bp->hwrm_cmd_lock);
5609 for (i = 0; i < bp->cp_nr_rings; i++) {
5610 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5611 continue;
5612 req.ring_group_id =
5613 cpu_to_le32(bp->grp_info[i].fw_grp_id);
5614
3d061591 5615 _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
c0c050c5
MC
5616 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5617 }
5618 mutex_unlock(&bp->hwrm_cmd_lock);
c0c050c5
MC
5619}
5620
5621static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5622 struct bnxt_ring_struct *ring,
9899bb59 5623 u32 ring_type, u32 map_index)
c0c050c5
MC
5624{
5625 int rc = 0, err = 0;
5626 struct hwrm_ring_alloc_input req = {0};
5627 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6fe19886 5628 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
9899bb59 5629 struct bnxt_ring_grp_info *grp_info;
c0c050c5
MC
5630 u16 ring_id;
5631
5632 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
5633
5634 req.enables = 0;
6fe19886
MC
5635 if (rmem->nr_pages > 1) {
5636 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
c0c050c5
MC
5637 /* Page size is in log2 units */
5638 req.page_size = BNXT_PAGE_SHIFT;
5639 req.page_tbl_depth = 1;
5640 } else {
6fe19886 5641 req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
c0c050c5
MC
5642 }
5643 req.fbo = 0;
5644 /* Association of ring index with doorbell index and MSIX number */
5645 req.logical_id = cpu_to_le16(map_index);
5646
5647 switch (ring_type) {
2c61d211
MC
5648 case HWRM_RING_ALLOC_TX: {
5649 struct bnxt_tx_ring_info *txr;
5650
5651 txr = container_of(ring, struct bnxt_tx_ring_info,
5652 tx_ring_struct);
c0c050c5
MC
5653 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5654 /* Association of transmit ring with completion ring */
9899bb59 5655 grp_info = &bp->grp_info[ring->grp_idx];
2c61d211 5656 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
c0c050c5 5657 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
9899bb59 5658 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
c0c050c5
MC
5659 req.queue_id = cpu_to_le16(ring->queue_id);
5660 break;
2c61d211 5661 }
c0c050c5
MC
5662 case HWRM_RING_ALLOC_RX:
5663 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5664 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
23aefdd7
MC
5665 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5666 u16 flags = 0;
5667
5668 /* Association of rx ring with stats context */
5669 grp_info = &bp->grp_info[ring->grp_idx];
5670 req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5671 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5672 req.enables |= cpu_to_le32(
5673 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5674 if (NET_IP_ALIGN == 2)
5675 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5676 req.flags = cpu_to_le16(flags);
5677 }
c0c050c5
MC
5678 break;
5679 case HWRM_RING_ALLOC_AGG:
23aefdd7
MC
5680 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5681 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5682 /* Association of agg ring with rx ring */
5683 grp_info = &bp->grp_info[ring->grp_idx];
5684 req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5685 req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5686 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5687 req.enables |= cpu_to_le32(
5688 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5689 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5690 } else {
5691 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5692 }
c0c050c5
MC
5693 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5694 break;
5695 case HWRM_RING_ALLOC_CMPL:
bac9a7e0 5696 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
c0c050c5 5697 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
23aefdd7
MC
5698 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5699 /* Association of cp ring with nq */
5700 grp_info = &bp->grp_info[map_index];
5701 req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5702 req.cq_handle = cpu_to_le64(ring->handle);
5703 req.enables |= cpu_to_le32(
5704 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5705 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5706 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5707 }
5708 break;
5709 case HWRM_RING_ALLOC_NQ:
5710 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5711 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
c0c050c5
MC
5712 if (bp->flags & BNXT_FLAG_USING_MSIX)
5713 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5714 break;
5715 default:
5716 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5717 ring_type);
5718 return -1;
5719 }
5720
5721 mutex_lock(&bp->hwrm_cmd_lock);
5722 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5723 err = le16_to_cpu(resp->error_code);
5724 ring_id = le16_to_cpu(resp->ring_id);
5725 mutex_unlock(&bp->hwrm_cmd_lock);
5726
5727 if (rc || err) {
2727c888
MC
5728 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5729 ring_type, rc, err);
5730 return -EIO;
c0c050c5
MC
5731 }
5732 ring->fw_ring_id = ring_id;
5733 return rc;
5734}
5735
486b5c22
MC
5736static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5737{
5738 int rc;
5739
5740 if (BNXT_PF(bp)) {
5741 struct hwrm_func_cfg_input req = {0};
5742
5743 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5744 req.fid = cpu_to_le16(0xffff);
5745 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5746 req.async_event_cr = cpu_to_le16(idx);
5747 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5748 } else {
5749 struct hwrm_func_vf_cfg_input req = {0};
5750
5751 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
5752 req.enables =
5753 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5754 req.async_event_cr = cpu_to_le16(idx);
5755 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5756 }
5757 return rc;
5758}
5759
697197e5
MC
5760static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5761 u32 map_idx, u32 xid)
5762{
5763 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5764 if (BNXT_PF(bp))
ebdf73dc 5765 db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
697197e5 5766 else
ebdf73dc 5767 db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
697197e5
MC
5768 switch (ring_type) {
5769 case HWRM_RING_ALLOC_TX:
5770 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5771 break;
5772 case HWRM_RING_ALLOC_RX:
5773 case HWRM_RING_ALLOC_AGG:
5774 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5775 break;
5776 case HWRM_RING_ALLOC_CMPL:
5777 db->db_key64 = DBR_PATH_L2;
5778 break;
5779 case HWRM_RING_ALLOC_NQ:
5780 db->db_key64 = DBR_PATH_L2;
5781 break;
5782 }
5783 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5784 } else {
5785 db->doorbell = bp->bar1 + map_idx * 0x80;
5786 switch (ring_type) {
5787 case HWRM_RING_ALLOC_TX:
5788 db->db_key32 = DB_KEY_TX;
5789 break;
5790 case HWRM_RING_ALLOC_RX:
5791 case HWRM_RING_ALLOC_AGG:
5792 db->db_key32 = DB_KEY_RX;
5793 break;
5794 case HWRM_RING_ALLOC_CMPL:
5795 db->db_key32 = DB_KEY_CP;
5796 break;
5797 }
5798 }
5799}
5800
c0c050c5
MC
5801static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5802{
e8f267b0 5803 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
c0c050c5 5804 int i, rc = 0;
697197e5 5805 u32 type;
c0c050c5 5806
23aefdd7
MC
5807 if (bp->flags & BNXT_FLAG_CHIP_P5)
5808 type = HWRM_RING_ALLOC_NQ;
5809 else
5810 type = HWRM_RING_ALLOC_CMPL;
edd0c2cc
MC
5811 for (i = 0; i < bp->cp_nr_rings; i++) {
5812 struct bnxt_napi *bnapi = bp->bnapi[i];
5813 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5814 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
9899bb59 5815 u32 map_idx = ring->map_idx;
5e66e35a 5816 unsigned int vector;
c0c050c5 5817
5e66e35a
MC
5818 vector = bp->irq_tbl[map_idx].vector;
5819 disable_irq_nosync(vector);
697197e5 5820 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5e66e35a
MC
5821 if (rc) {
5822 enable_irq(vector);
edd0c2cc 5823 goto err_out;
5e66e35a 5824 }
697197e5
MC
5825 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5826 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5e66e35a 5827 enable_irq(vector);
edd0c2cc 5828 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
486b5c22
MC
5829
5830 if (!i) {
5831 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5832 if (rc)
5833 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5834 }
c0c050c5
MC
5835 }
5836
697197e5 5837 type = HWRM_RING_ALLOC_TX;
edd0c2cc 5838 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 5839 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3e08b184
MC
5840 struct bnxt_ring_struct *ring;
5841 u32 map_idx;
c0c050c5 5842
3e08b184
MC
5843 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5844 struct bnxt_napi *bnapi = txr->bnapi;
5845 struct bnxt_cp_ring_info *cpr, *cpr2;
5846 u32 type2 = HWRM_RING_ALLOC_CMPL;
5847
5848 cpr = &bnapi->cp_ring;
5849 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5850 ring = &cpr2->cp_ring_struct;
5851 ring->handle = BNXT_TX_HDL;
5852 map_idx = bnapi->index;
5853 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5854 if (rc)
5855 goto err_out;
5856 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5857 ring->fw_ring_id);
5858 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5859 }
5860 ring = &txr->tx_ring_struct;
5861 map_idx = i;
697197e5 5862 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
edd0c2cc
MC
5863 if (rc)
5864 goto err_out;
697197e5 5865 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
c0c050c5
MC
5866 }
5867
697197e5 5868 type = HWRM_RING_ALLOC_RX;
edd0c2cc 5869 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5870 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 5871 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
3e08b184
MC
5872 struct bnxt_napi *bnapi = rxr->bnapi;
5873 u32 map_idx = bnapi->index;
c0c050c5 5874
697197e5 5875 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
edd0c2cc
MC
5876 if (rc)
5877 goto err_out;
697197e5 5878 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
e8f267b0
MC
5879 /* If we have agg rings, post agg buffers first. */
5880 if (!agg_rings)
5881 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
b81a90d3 5882 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
3e08b184
MC
5883 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5884 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5885 u32 type2 = HWRM_RING_ALLOC_CMPL;
5886 struct bnxt_cp_ring_info *cpr2;
5887
5888 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5889 ring = &cpr2->cp_ring_struct;
5890 ring->handle = BNXT_RX_HDL;
5891 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5892 if (rc)
5893 goto err_out;
5894 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5895 ring->fw_ring_id);
5896 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5897 }
c0c050c5
MC
5898 }
5899
e8f267b0 5900 if (agg_rings) {
697197e5 5901 type = HWRM_RING_ALLOC_AGG;
c0c050c5 5902 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5903 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
5904 struct bnxt_ring_struct *ring =
5905 &rxr->rx_agg_ring_struct;
9899bb59 5906 u32 grp_idx = ring->grp_idx;
b81a90d3 5907 u32 map_idx = grp_idx + bp->rx_nr_rings;
c0c050c5 5908
697197e5 5909 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
c0c050c5
MC
5910 if (rc)
5911 goto err_out;
5912
697197e5
MC
5913 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5914 ring->fw_ring_id);
5915 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
e8f267b0 5916 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
b81a90d3 5917 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
c0c050c5
MC
5918 }
5919 }
5920err_out:
5921 return rc;
5922}
5923
5924static int hwrm_ring_free_send_msg(struct bnxt *bp,
5925 struct bnxt_ring_struct *ring,
5926 u32 ring_type, int cmpl_ring_id)
5927{
5928 int rc;
5929 struct hwrm_ring_free_input req = {0};
5930 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5931 u16 error_code;
5932
b340dc68 5933 if (BNXT_NO_FW_ACCESS(bp))
b4fff207
MC
5934 return 0;
5935
74608fc9 5936 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
c0c050c5
MC
5937 req.ring_type = ring_type;
5938 req.ring_id = cpu_to_le16(ring->fw_ring_id);
5939
5940 mutex_lock(&bp->hwrm_cmd_lock);
5941 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5942 error_code = le16_to_cpu(resp->error_code);
5943 mutex_unlock(&bp->hwrm_cmd_lock);
5944
5945 if (rc || error_code) {
2727c888
MC
5946 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5947 ring_type, rc, error_code);
5948 return -EIO;
c0c050c5
MC
5949 }
5950 return 0;
5951}
5952
edd0c2cc 5953static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
c0c050c5 5954{
23aefdd7 5955 u32 type;
edd0c2cc 5956 int i;
c0c050c5
MC
5957
5958 if (!bp->bnapi)
edd0c2cc 5959 return;
c0c050c5 5960
edd0c2cc 5961 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 5962 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
edd0c2cc 5963 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
edd0c2cc
MC
5964
5965 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
5966 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5967
edd0c2cc
MC
5968 hwrm_ring_free_send_msg(bp, ring,
5969 RING_FREE_REQ_RING_TYPE_TX,
5970 close_path ? cmpl_ring_id :
5971 INVALID_HW_RING_ID);
5972 ring->fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
5973 }
5974 }
5975
edd0c2cc 5976 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5977 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 5978 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
b81a90d3 5979 u32 grp_idx = rxr->bnapi->index;
edd0c2cc
MC
5980
5981 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
5982 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5983
edd0c2cc
MC
5984 hwrm_ring_free_send_msg(bp, ring,
5985 RING_FREE_REQ_RING_TYPE_RX,
5986 close_path ? cmpl_ring_id :
5987 INVALID_HW_RING_ID);
5988 ring->fw_ring_id = INVALID_HW_RING_ID;
b81a90d3
MC
5989 bp->grp_info[grp_idx].rx_fw_ring_id =
5990 INVALID_HW_RING_ID;
c0c050c5
MC
5991 }
5992 }
5993
23aefdd7
MC
5994 if (bp->flags & BNXT_FLAG_CHIP_P5)
5995 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5996 else
5997 type = RING_FREE_REQ_RING_TYPE_RX;
edd0c2cc 5998 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5999 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 6000 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
b81a90d3 6001 u32 grp_idx = rxr->bnapi->index;
edd0c2cc
MC
6002
6003 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
6004 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6005
23aefdd7 6006 hwrm_ring_free_send_msg(bp, ring, type,
edd0c2cc
MC
6007 close_path ? cmpl_ring_id :
6008 INVALID_HW_RING_ID);
6009 ring->fw_ring_id = INVALID_HW_RING_ID;
b81a90d3
MC
6010 bp->grp_info[grp_idx].agg_fw_ring_id =
6011 INVALID_HW_RING_ID;
c0c050c5
MC
6012 }
6013 }
6014
9d8bc097
MC
6015 /* The completion rings are about to be freed. After that the
6016 * IRQ doorbell will not work anymore. So we need to disable
6017 * IRQ here.
6018 */
6019 bnxt_disable_int_sync(bp);
6020
23aefdd7
MC
6021 if (bp->flags & BNXT_FLAG_CHIP_P5)
6022 type = RING_FREE_REQ_RING_TYPE_NQ;
6023 else
6024 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
edd0c2cc
MC
6025 for (i = 0; i < bp->cp_nr_rings; i++) {
6026 struct bnxt_napi *bnapi = bp->bnapi[i];
6027 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3e08b184
MC
6028 struct bnxt_ring_struct *ring;
6029 int j;
edd0c2cc 6030
3e08b184
MC
6031 for (j = 0; j < 2; j++) {
6032 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
6033
6034 if (cpr2) {
6035 ring = &cpr2->cp_ring_struct;
6036 if (ring->fw_ring_id == INVALID_HW_RING_ID)
6037 continue;
6038 hwrm_ring_free_send_msg(bp, ring,
6039 RING_FREE_REQ_RING_TYPE_L2_CMPL,
6040 INVALID_HW_RING_ID);
6041 ring->fw_ring_id = INVALID_HW_RING_ID;
6042 }
6043 }
6044 ring = &cpr->cp_ring_struct;
edd0c2cc 6045 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
23aefdd7 6046 hwrm_ring_free_send_msg(bp, ring, type,
edd0c2cc
MC
6047 INVALID_HW_RING_ID);
6048 ring->fw_ring_id = INVALID_HW_RING_ID;
6049 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
6050 }
6051 }
c0c050c5
MC
6052}
6053
41e8d798
MC
6054static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
6055 bool shared);
6056
674f50a5
MC
6057static int bnxt_hwrm_get_rings(struct bnxt *bp)
6058{
6059 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6060 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6061 struct hwrm_func_qcfg_input req = {0};
6062 int rc;
6063
6064 if (bp->hwrm_spec_code < 0x10601)
6065 return 0;
6066
6067 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6068 req.fid = cpu_to_le16(0xffff);
6069 mutex_lock(&bp->hwrm_cmd_lock);
6070 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6071 if (rc) {
6072 mutex_unlock(&bp->hwrm_cmd_lock);
d4f1420d 6073 return rc;
674f50a5
MC
6074 }
6075
6076 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
f1ca94de 6077 if (BNXT_NEW_RM(bp)) {
674f50a5
MC
6078 u16 cp, stats;
6079
6080 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
6081 hw_resc->resv_hw_ring_grps =
6082 le32_to_cpu(resp->alloc_hw_ring_grps);
6083 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
6084 cp = le16_to_cpu(resp->alloc_cmpl_rings);
6085 stats = le16_to_cpu(resp->alloc_stat_ctx);
75720e63 6086 hw_resc->resv_irqs = cp;
41e8d798
MC
6087 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6088 int rx = hw_resc->resv_rx_rings;
6089 int tx = hw_resc->resv_tx_rings;
6090
6091 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6092 rx >>= 1;
6093 if (cp < (rx + tx)) {
6094 bnxt_trim_rings(bp, &rx, &tx, cp, false);
6095 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6096 rx <<= 1;
6097 hw_resc->resv_rx_rings = rx;
6098 hw_resc->resv_tx_rings = tx;
6099 }
75720e63 6100 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
41e8d798
MC
6101 hw_resc->resv_hw_ring_grps = rx;
6102 }
674f50a5 6103 hw_resc->resv_cp_rings = cp;
780baad4 6104 hw_resc->resv_stat_ctxs = stats;
674f50a5
MC
6105 }
6106 mutex_unlock(&bp->hwrm_cmd_lock);
6107 return 0;
6108}
6109
391be5c2
MC
6110/* Caller must hold bp->hwrm_cmd_lock */
6111int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
6112{
6113 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6114 struct hwrm_func_qcfg_input req = {0};
6115 int rc;
6116
6117 if (bp->hwrm_spec_code < 0x10601)
6118 return 0;
6119
6120 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6121 req.fid = cpu_to_le16(fid);
6122 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6123 if (!rc)
6124 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6125
6126 return rc;
6127}
6128
41e8d798
MC
6129static bool bnxt_rfs_supported(struct bnxt *bp);
6130
4ed50ef4
MC
6131static void
6132__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
6133 int tx_rings, int rx_rings, int ring_grps,
780baad4 6134 int cp_rings, int stats, int vnics)
391be5c2 6135{
674f50a5 6136 u32 enables = 0;
391be5c2 6137
4ed50ef4
MC
6138 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
6139 req->fid = cpu_to_le16(0xffff);
674f50a5 6140 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
4ed50ef4 6141 req->num_tx_rings = cpu_to_le16(tx_rings);
f1ca94de 6142 if (BNXT_NEW_RM(bp)) {
674f50a5 6143 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
3f93cd3f 6144 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
41e8d798
MC
6145 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6146 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6147 enables |= tx_rings + ring_grps ?
3f93cd3f 6148 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
6149 enables |= rx_rings ?
6150 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6151 } else {
6152 enables |= cp_rings ?
3f93cd3f 6153 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
6154 enables |= ring_grps ?
6155 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
6156 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6157 }
dbe80d44 6158 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
674f50a5 6159
4ed50ef4 6160 req->num_rx_rings = cpu_to_le16(rx_rings);
41e8d798
MC
6161 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6162 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6163 req->num_msix = cpu_to_le16(cp_rings);
6164 req->num_rsscos_ctxs =
6165 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6166 } else {
6167 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6168 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6169 req->num_rsscos_ctxs = cpu_to_le16(1);
6170 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6171 bnxt_rfs_supported(bp))
6172 req->num_rsscos_ctxs =
6173 cpu_to_le16(ring_grps + 1);
6174 }
780baad4 6175 req->num_stat_ctxs = cpu_to_le16(stats);
4ed50ef4 6176 req->num_vnics = cpu_to_le16(vnics);
674f50a5 6177 }
4ed50ef4
MC
6178 req->enables = cpu_to_le32(enables);
6179}
6180
6181static void
6182__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
6183 struct hwrm_func_vf_cfg_input *req, int tx_rings,
6184 int rx_rings, int ring_grps, int cp_rings,
780baad4 6185 int stats, int vnics)
4ed50ef4
MC
6186{
6187 u32 enables = 0;
6188
6189 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
6190 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
41e8d798
MC
6191 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
6192 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
3f93cd3f 6193 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
41e8d798
MC
6194 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6195 enables |= tx_rings + ring_grps ?
3f93cd3f 6196 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
6197 } else {
6198 enables |= cp_rings ?
3f93cd3f 6199 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
6200 enables |= ring_grps ?
6201 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
6202 }
4ed50ef4 6203 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
41e8d798 6204 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
4ed50ef4 6205
41e8d798 6206 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
4ed50ef4
MC
6207 req->num_tx_rings = cpu_to_le16(tx_rings);
6208 req->num_rx_rings = cpu_to_le16(rx_rings);
41e8d798
MC
6209 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6210 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6211 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6212 } else {
6213 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6214 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6215 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6216 }
780baad4 6217 req->num_stat_ctxs = cpu_to_le16(stats);
4ed50ef4
MC
6218 req->num_vnics = cpu_to_le16(vnics);
6219
6220 req->enables = cpu_to_le32(enables);
6221}
6222
6223static int
6224bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4 6225 int ring_grps, int cp_rings, int stats, int vnics)
4ed50ef4
MC
6226{
6227 struct hwrm_func_cfg_input req = {0};
6228 int rc;
6229
6230 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
780baad4 6231 cp_rings, stats, vnics);
4ed50ef4 6232 if (!req.enables)
391be5c2
MC
6233 return 0;
6234
674f50a5
MC
6235 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6236 if (rc)
d4f1420d 6237 return rc;
674f50a5
MC
6238
6239 if (bp->hwrm_spec_code < 0x10601)
6240 bp->hw_resc.resv_tx_rings = tx_rings;
6241
9f90445c 6242 return bnxt_hwrm_get_rings(bp);
674f50a5
MC
6243}
6244
6245static int
6246bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4 6247 int ring_grps, int cp_rings, int stats, int vnics)
674f50a5
MC
6248{
6249 struct hwrm_func_vf_cfg_input req = {0};
674f50a5
MC
6250 int rc;
6251
f1ca94de 6252 if (!BNXT_NEW_RM(bp)) {
674f50a5 6253 bp->hw_resc.resv_tx_rings = tx_rings;
391be5c2 6254 return 0;
674f50a5 6255 }
391be5c2 6256
4ed50ef4 6257 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
780baad4 6258 cp_rings, stats, vnics);
391be5c2 6259 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
674f50a5 6260 if (rc)
d4f1420d 6261 return rc;
674f50a5 6262
9f90445c 6263 return bnxt_hwrm_get_rings(bp);
674f50a5
MC
6264}
6265
6266static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
780baad4 6267 int cp, int stat, int vnic)
674f50a5
MC
6268{
6269 if (BNXT_PF(bp))
780baad4
VV
6270 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6271 vnic);
674f50a5 6272 else
780baad4
VV
6273 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6274 vnic);
674f50a5
MC
6275}
6276
b16b6891 6277int bnxt_nq_rings_in_use(struct bnxt *bp)
08654eb2
MC
6278{
6279 int cp = bp->cp_nr_rings;
6280 int ulp_msix, ulp_base;
6281
6282 ulp_msix = bnxt_get_ulp_msix_num(bp);
6283 if (ulp_msix) {
6284 ulp_base = bnxt_get_ulp_msix_base(bp);
6285 cp += ulp_msix;
6286 if ((ulp_base + ulp_msix) > cp)
6287 cp = ulp_base + ulp_msix;
6288 }
6289 return cp;
6290}
6291
c0b8cda0
MC
6292static int bnxt_cp_rings_in_use(struct bnxt *bp)
6293{
6294 int cp;
6295
6296 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6297 return bnxt_nq_rings_in_use(bp);
6298
6299 cp = bp->tx_nr_rings + bp->rx_nr_rings;
6300 return cp;
6301}
6302
780baad4
VV
6303static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6304{
d77b1ad8
MC
6305 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6306 int cp = bp->cp_nr_rings;
6307
6308 if (!ulp_stat)
6309 return cp;
6310
6311 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6312 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6313
6314 return cp + ulp_stat;
780baad4
VV
6315}
6316
b43b9f53
MC
6317/* Check if a default RSS map needs to be setup. This function is only
6318 * used on older firmware that does not require reserving RX rings.
6319 */
6320static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6321{
6322 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6323
6324 /* The RSS map is valid for RX rings set to resv_rx_rings */
6325 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6326 hw_resc->resv_rx_rings = bp->rx_nr_rings;
6327 if (!netif_is_rxfh_configured(bp->dev))
6328 bnxt_set_dflt_rss_indir_tbl(bp);
6329 }
6330}
6331
4e41dc5d
MC
6332static bool bnxt_need_reserve_rings(struct bnxt *bp)
6333{
6334 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
fbcfc8e4 6335 int cp = bnxt_cp_rings_in_use(bp);
c0b8cda0 6336 int nq = bnxt_nq_rings_in_use(bp);
780baad4 6337 int rx = bp->rx_nr_rings, stat;
4e41dc5d
MC
6338 int vnic = 1, grp = rx;
6339
b43b9f53
MC
6340 if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6341 bp->hwrm_spec_code >= 0x10601)
4e41dc5d
MC
6342 return true;
6343
b43b9f53
MC
6344 /* Old firmware does not need RX ring reservations but we still
6345 * need to setup a default RSS map when needed. With new firmware
6346 * we go through RX ring reservations first and then set up the
6347 * RSS map for the successfully reserved RX rings when needed.
6348 */
6349 if (!BNXT_NEW_RM(bp)) {
6350 bnxt_check_rss_tbl_no_rmgr(bp);
6351 return false;
6352 }
41e8d798 6353 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
4e41dc5d
MC
6354 vnic = rx + 1;
6355 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6356 rx <<= 1;
780baad4 6357 stat = bnxt_get_func_stat_ctxs(bp);
b43b9f53
MC
6358 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6359 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6360 (hw_resc->resv_hw_ring_grps != grp &&
6361 !(bp->flags & BNXT_FLAG_CHIP_P5)))
4e41dc5d 6362 return true;
01989c6b
MC
6363 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6364 hw_resc->resv_irqs != nq)
6365 return true;
4e41dc5d
MC
6366 return false;
6367}
6368
674f50a5
MC
6369static int __bnxt_reserve_rings(struct bnxt *bp)
6370{
6371 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
c0b8cda0 6372 int cp = bnxt_nq_rings_in_use(bp);
674f50a5
MC
6373 int tx = bp->tx_nr_rings;
6374 int rx = bp->rx_nr_rings;
674f50a5 6375 int grp, rx_rings, rc;
780baad4 6376 int vnic = 1, stat;
674f50a5 6377 bool sh = false;
674f50a5 6378
4e41dc5d 6379 if (!bnxt_need_reserve_rings(bp))
674f50a5
MC
6380 return 0;
6381
6382 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6383 sh = true;
41e8d798 6384 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
674f50a5
MC
6385 vnic = rx + 1;
6386 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6387 rx <<= 1;
674f50a5 6388 grp = bp->rx_nr_rings;
780baad4 6389 stat = bnxt_get_func_stat_ctxs(bp);
674f50a5 6390
780baad4 6391 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
391be5c2
MC
6392 if (rc)
6393 return rc;
6394
674f50a5 6395 tx = hw_resc->resv_tx_rings;
f1ca94de 6396 if (BNXT_NEW_RM(bp)) {
674f50a5 6397 rx = hw_resc->resv_rx_rings;
c0b8cda0 6398 cp = hw_resc->resv_irqs;
674f50a5
MC
6399 grp = hw_resc->resv_hw_ring_grps;
6400 vnic = hw_resc->resv_vnics;
780baad4 6401 stat = hw_resc->resv_stat_ctxs;
674f50a5
MC
6402 }
6403
6404 rx_rings = rx;
6405 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6406 if (rx >= 2) {
6407 rx_rings = rx >> 1;
6408 } else {
6409 if (netif_running(bp->dev))
6410 return -ENOMEM;
6411
6412 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6413 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6414 bp->dev->hw_features &= ~NETIF_F_LRO;
6415 bp->dev->features &= ~NETIF_F_LRO;
6416 bnxt_set_ring_params(bp);
6417 }
6418 }
6419 rx_rings = min_t(int, rx_rings, grp);
780baad4
VV
6420 cp = min_t(int, cp, bp->cp_nr_rings);
6421 if (stat > bnxt_get_ulp_stat_ctxs(bp))
6422 stat -= bnxt_get_ulp_stat_ctxs(bp);
6423 cp = min_t(int, cp, stat);
674f50a5
MC
6424 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6425 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6426 rx = rx_rings << 1;
6427 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6428 bp->tx_nr_rings = tx;
bd3191b5
MC
6429
6430 /* If we cannot reserve all the RX rings, reset the RSS map only
6431 * if absolutely necessary
6432 */
6433 if (rx_rings != bp->rx_nr_rings) {
6434 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6435 rx_rings, bp->rx_nr_rings);
6436 if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
6437 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6438 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6439 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6440 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6441 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6442 }
6443 }
674f50a5
MC
6444 bp->rx_nr_rings = rx_rings;
6445 bp->cp_nr_rings = cp;
6446
780baad4 6447 if (!tx || !rx || !cp || !grp || !vnic || !stat)
674f50a5
MC
6448 return -ENOMEM;
6449
5fa65524
EP
6450 if (!netif_is_rxfh_configured(bp->dev))
6451 bnxt_set_dflt_rss_indir_tbl(bp);
6452
391be5c2
MC
6453 return rc;
6454}
6455
8f23d638 6456static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
6457 int ring_grps, int cp_rings, int stats,
6458 int vnics)
98fdbe73 6459{
8f23d638 6460 struct hwrm_func_vf_cfg_input req = {0};
6fc2ffdf 6461 u32 flags;
98fdbe73 6462
f1ca94de 6463 if (!BNXT_NEW_RM(bp))
98fdbe73
MC
6464 return 0;
6465
6fc2ffdf 6466 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
780baad4 6467 cp_rings, stats, vnics);
8f23d638
MC
6468 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6469 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6470 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8f23d638 6471 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
41e8d798
MC
6472 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6473 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6474 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6475 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8f23d638
MC
6476
6477 req.flags = cpu_to_le32(flags);
9f90445c
VV
6478 return hwrm_send_message_silent(bp, &req, sizeof(req),
6479 HWRM_CMD_TIMEOUT);
8f23d638
MC
6480}
6481
6482static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
6483 int ring_grps, int cp_rings, int stats,
6484 int vnics)
8f23d638
MC
6485{
6486 struct hwrm_func_cfg_input req = {0};
6fc2ffdf 6487 u32 flags;
98fdbe73 6488
6fc2ffdf 6489 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
780baad4 6490 cp_rings, stats, vnics);
8f23d638 6491 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
41e8d798 6492 if (BNXT_NEW_RM(bp)) {
8f23d638
MC
6493 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6494 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8f23d638
MC
6495 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6496 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
41e8d798 6497 if (bp->flags & BNXT_FLAG_CHIP_P5)
0b815023
MC
6498 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6499 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
41e8d798
MC
6500 else
6501 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6502 }
6fc2ffdf 6503
8f23d638 6504 req.flags = cpu_to_le32(flags);
9f90445c
VV
6505 return hwrm_send_message_silent(bp, &req, sizeof(req),
6506 HWRM_CMD_TIMEOUT);
98fdbe73
MC
6507}
6508
8f23d638 6509static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
6510 int ring_grps, int cp_rings, int stats,
6511 int vnics)
8f23d638
MC
6512{
6513 if (bp->hwrm_spec_code < 0x10801)
6514 return 0;
6515
6516 if (BNXT_PF(bp))
6517 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
780baad4
VV
6518 ring_grps, cp_rings, stats,
6519 vnics);
8f23d638
MC
6520
6521 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
780baad4 6522 cp_rings, stats, vnics);
8f23d638
MC
6523}
6524
74706afa
MC
6525static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6526{
6527 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6528 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6529 struct hwrm_ring_aggint_qcaps_input req = {0};
6530 int rc;
6531
6532 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6533 coal_cap->num_cmpl_dma_aggr_max = 63;
6534 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6535 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6536 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6537 coal_cap->int_lat_tmr_min_max = 65535;
6538 coal_cap->int_lat_tmr_max_max = 65535;
6539 coal_cap->num_cmpl_aggr_int_max = 65535;
6540 coal_cap->timer_units = 80;
6541
6542 if (bp->hwrm_spec_code < 0x10902)
6543 return;
6544
6545 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
6546 mutex_lock(&bp->hwrm_cmd_lock);
6547 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6548 if (!rc) {
6549 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
58590c8d 6550 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
74706afa
MC
6551 coal_cap->num_cmpl_dma_aggr_max =
6552 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6553 coal_cap->num_cmpl_dma_aggr_during_int_max =
6554 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6555 coal_cap->cmpl_aggr_dma_tmr_max =
6556 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6557 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6558 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6559 coal_cap->int_lat_tmr_min_max =
6560 le16_to_cpu(resp->int_lat_tmr_min_max);
6561 coal_cap->int_lat_tmr_max_max =
6562 le16_to_cpu(resp->int_lat_tmr_max_max);
6563 coal_cap->num_cmpl_aggr_int_max =
6564 le16_to_cpu(resp->num_cmpl_aggr_int_max);
6565 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6566 }
6567 mutex_unlock(&bp->hwrm_cmd_lock);
6568}
6569
6570static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6571{
6572 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6573
6574 return usec * 1000 / coal_cap->timer_units;
6575}
6576
6577static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6578 struct bnxt_coal *hw_coal,
bb053f52
MC
6579 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6580{
74706afa
MC
6581 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6582 u32 cmpl_params = coal_cap->cmpl_params;
6583 u16 val, tmr, max, flags = 0;
f8503969
MC
6584
6585 max = hw_coal->bufs_per_record * 128;
6586 if (hw_coal->budget)
6587 max = hw_coal->bufs_per_record * hw_coal->budget;
74706afa 6588 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
f8503969
MC
6589
6590 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6591 req->num_cmpl_aggr_int = cpu_to_le16(val);
b153cbc5 6592
74706afa 6593 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
f8503969
MC
6594 req->num_cmpl_dma_aggr = cpu_to_le16(val);
6595
74706afa
MC
6596 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6597 coal_cap->num_cmpl_dma_aggr_during_int_max);
f8503969
MC
6598 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6599
74706afa
MC
6600 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6601 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
f8503969
MC
6602 req->int_lat_tmr_max = cpu_to_le16(tmr);
6603
6604 /* min timer set to 1/2 of interrupt timer */
74706afa
MC
6605 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6606 val = tmr / 2;
6607 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6608 req->int_lat_tmr_min = cpu_to_le16(val);
6609 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6610 }
f8503969
MC
6611
6612 /* buf timer set to 1/4 of interrupt timer */
74706afa 6613 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
f8503969
MC
6614 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6615
74706afa
MC
6616 if (cmpl_params &
6617 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6618 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6619 val = clamp_t(u16, tmr, 1,
6620 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6adc4601 6621 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
74706afa
MC
6622 req->enables |=
6623 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6624 }
f8503969 6625
74706afa
MC
6626 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6627 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6628 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6629 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
f8503969 6630 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
bb053f52 6631 req->flags = cpu_to_le16(flags);
74706afa 6632 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
bb053f52
MC
6633}
6634
58590c8d
MC
6635/* Caller holds bp->hwrm_cmd_lock */
6636static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6637 struct bnxt_coal *hw_coal)
6638{
6639 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
6640 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6641 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6642 u32 nq_params = coal_cap->nq_params;
6643 u16 tmr;
6644
6645 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6646 return 0;
6647
6648 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
6649 -1, -1);
6650 req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6651 req.flags =
6652 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6653
6654 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6655 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6656 req.int_lat_tmr_min = cpu_to_le16(tmr);
6657 req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6658 return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6659}
6660
6a8788f2
AG
6661int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6662{
6663 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
6664 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6665 struct bnxt_coal coal;
6a8788f2
AG
6666
6667 /* Tick values in micro seconds.
6668 * 1 coal_buf x bufs_per_record = 1 completion record.
6669 */
6670 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6671
6672 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6673 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6674
6675 if (!bnapi->rx_ring)
6676 return -ENODEV;
6677
6678 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6679 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6680
74706afa 6681 bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
6a8788f2 6682
2c61d211 6683 req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6a8788f2
AG
6684
6685 return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
6686 HWRM_CMD_TIMEOUT);
6687}
6688
c0c050c5
MC
6689int bnxt_hwrm_set_coal(struct bnxt *bp)
6690{
6691 int i, rc = 0;
dfc9c94a
MC
6692 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
6693 req_tx = {0}, *req;
c0c050c5 6694
dfc9c94a
MC
6695 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6696 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6697 bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
6698 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
c0c050c5 6699
74706afa
MC
6700 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
6701 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
c0c050c5
MC
6702
6703 mutex_lock(&bp->hwrm_cmd_lock);
6704 for (i = 0; i < bp->cp_nr_rings; i++) {
dfc9c94a 6705 struct bnxt_napi *bnapi = bp->bnapi[i];
58590c8d 6706 struct bnxt_coal *hw_coal;
2c61d211 6707 u16 ring_id;
c0c050c5 6708
dfc9c94a 6709 req = &req_rx;
2c61d211
MC
6710 if (!bnapi->rx_ring) {
6711 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
dfc9c94a 6712 req = &req_tx;
2c61d211
MC
6713 } else {
6714 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6715 }
6716 req->ring_id = cpu_to_le16(ring_id);
dfc9c94a
MC
6717
6718 rc = _hwrm_send_message(bp, req, sizeof(*req),
c0c050c5
MC
6719 HWRM_CMD_TIMEOUT);
6720 if (rc)
6721 break;
58590c8d
MC
6722
6723 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6724 continue;
6725
6726 if (bnapi->rx_ring && bnapi->tx_ring) {
6727 req = &req_tx;
6728 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6729 req->ring_id = cpu_to_le16(ring_id);
6730 rc = _hwrm_send_message(bp, req, sizeof(*req),
6731 HWRM_CMD_TIMEOUT);
6732 if (rc)
6733 break;
6734 }
6735 if (bnapi->rx_ring)
6736 hw_coal = &bp->rx_coal;
6737 else
6738 hw_coal = &bp->tx_coal;
6739 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
c0c050c5
MC
6740 }
6741 mutex_unlock(&bp->hwrm_cmd_lock);
6742 return rc;
6743}
6744
3d061591 6745static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
c0c050c5 6746{
c2dec363 6747 struct hwrm_stat_ctx_clr_stats_input req0 = {0};
c0c050c5 6748 struct hwrm_stat_ctx_free_input req = {0};
3d061591 6749 int i;
c0c050c5
MC
6750
6751 if (!bp->bnapi)
3d061591 6752 return;
c0c050c5 6753
3e8060fa 6754 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3d061591 6755 return;
3e8060fa 6756
c2dec363 6757 bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1);
c0c050c5
MC
6758 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
6759
6760 mutex_lock(&bp->hwrm_cmd_lock);
6761 for (i = 0; i < bp->cp_nr_rings; i++) {
6762 struct bnxt_napi *bnapi = bp->bnapi[i];
6763 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6764
6765 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6766 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
c2dec363
MC
6767 if (BNXT_FW_MAJ(bp) <= 20) {
6768 req0.stat_ctx_id = req.stat_ctx_id;
6769 _hwrm_send_message(bp, &req0, sizeof(req0),
6770 HWRM_CMD_TIMEOUT);
6771 }
3d061591
VV
6772 _hwrm_send_message(bp, &req, sizeof(req),
6773 HWRM_CMD_TIMEOUT);
c0c050c5
MC
6774
6775 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6776 }
6777 }
6778 mutex_unlock(&bp->hwrm_cmd_lock);
c0c050c5
MC
6779}
6780
6781static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6782{
6783 int rc = 0, i;
6784 struct hwrm_stat_ctx_alloc_input req = {0};
6785 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6786
3e8060fa
PS
6787 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6788 return 0;
6789
c0c050c5
MC
6790 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
6791
4e748506 6792 req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
51f30785 6793 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
c0c050c5
MC
6794
6795 mutex_lock(&bp->hwrm_cmd_lock);
6796 for (i = 0; i < bp->cp_nr_rings; i++) {
6797 struct bnxt_napi *bnapi = bp->bnapi[i];
6798 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6799
177a6cde 6800 req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
c0c050c5
MC
6801
6802 rc = _hwrm_send_message(bp, &req, sizeof(req),
6803 HWRM_CMD_TIMEOUT);
6804 if (rc)
6805 break;
6806
6807 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6808
6809 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6810 }
6811 mutex_unlock(&bp->hwrm_cmd_lock);
89aa8445 6812 return rc;
c0c050c5
MC
6813}
6814
cf6645f8
MC
6815static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6816{
6817 struct hwrm_func_qcfg_input req = {0};
567b2abe 6818 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8ae24738 6819 u32 min_db_offset = 0;
9315edca 6820 u16 flags;
cf6645f8
MC
6821 int rc;
6822
6823 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6824 req.fid = cpu_to_le16(0xffff);
6825 mutex_lock(&bp->hwrm_cmd_lock);
6826 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6827 if (rc)
6828 goto func_qcfg_exit;
6829
6830#ifdef CONFIG_BNXT_SRIOV
6831 if (BNXT_VF(bp)) {
cf6645f8
MC
6832 struct bnxt_vf_info *vf = &bp->vf;
6833
6834 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
230d1f0d
MC
6835 } else {
6836 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
cf6645f8
MC
6837 }
6838#endif
9315edca
MC
6839 flags = le16_to_cpu(resp->flags);
6840 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6841 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
97381a18 6842 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
9315edca 6843 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
97381a18 6844 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
9315edca
MC
6845 }
6846 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6847 bp->flags |= BNXT_FLAG_MULTI_HOST;
8d4bd96b
MC
6848 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
6849 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
bc39f885 6850
567b2abe
SB
6851 switch (resp->port_partition_type) {
6852 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6853 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6854 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6855 bp->port_partition_type = resp->port_partition_type;
6856 break;
6857 }
32e8239c
MC
6858 if (bp->hwrm_spec_code < 0x10707 ||
6859 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6860 bp->br_mode = BRIDGE_MODE_VEB;
6861 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6862 bp->br_mode = BRIDGE_MODE_VEPA;
6863 else
6864 bp->br_mode = BRIDGE_MODE_UNDEF;
cf6645f8 6865
7eb9bb3a
MC
6866 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6867 if (!bp->max_mtu)
6868 bp->max_mtu = BNXT_MAX_MTU;
6869
8ae24738
MC
6870 if (bp->db_size)
6871 goto func_qcfg_exit;
6872
6873 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6874 if (BNXT_PF(bp))
6875 min_db_offset = DB_PF_OFFSET_P5;
6876 else
6877 min_db_offset = DB_VF_OFFSET_P5;
6878 }
6879 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6880 1024);
6881 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6882 bp->db_size <= min_db_offset)
6883 bp->db_size = pci_resource_len(bp->pdev, 2);
6884
cf6645f8
MC
6885func_qcfg_exit:
6886 mutex_unlock(&bp->hwrm_cmd_lock);
6887 return rc;
6888}
6889
e9696ff3
MC
6890static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
6891 struct hwrm_func_backing_store_qcaps_output *resp)
6892{
6893 struct bnxt_mem_init *mem_init;
41435c39 6894 u16 init_mask;
e9696ff3 6895 u8 init_val;
41435c39 6896 u8 *offset;
e9696ff3
MC
6897 int i;
6898
6899 init_val = resp->ctx_kind_initializer;
41435c39
MC
6900 init_mask = le16_to_cpu(resp->ctx_init_mask);
6901 offset = &resp->qp_init_offset;
6902 mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
6903 for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
e9696ff3 6904 mem_init->init_val = init_val;
41435c39
MC
6905 mem_init->offset = BNXT_MEM_INVALID_OFFSET;
6906 if (!init_mask)
6907 continue;
6908 if (i == BNXT_CTX_MEM_INIT_STAT)
6909 offset = &resp->stat_init_offset;
6910 if (init_mask & (1 << i))
6911 mem_init->offset = *offset * 4;
6912 else
6913 mem_init->init_val = 0;
6914 }
6915 ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
6916 ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
6917 ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
6918 ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
6919 ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
6920 ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
e9696ff3
MC
6921}
6922
98f04cf0
MC
6923static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6924{
6925 struct hwrm_func_backing_store_qcaps_input req = {0};
6926 struct hwrm_func_backing_store_qcaps_output *resp =
6927 bp->hwrm_cmd_resp_addr;
6928 int rc;
6929
6930 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6931 return 0;
6932
6933 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
6934 mutex_lock(&bp->hwrm_cmd_lock);
6935 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6936 if (!rc) {
6937 struct bnxt_ctx_pg_info *ctx_pg;
6938 struct bnxt_ctx_mem_info *ctx;
ac3158cb 6939 int i, tqm_rings;
98f04cf0
MC
6940
6941 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6942 if (!ctx) {
6943 rc = -ENOMEM;
6944 goto ctx_err;
6945 }
98f04cf0
MC
6946 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6947 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6948 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6949 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6950 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6951 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6952 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6953 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6954 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6955 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6956 ctx->vnic_max_vnic_entries =
6957 le16_to_cpu(resp->vnic_max_vnic_entries);
6958 ctx->vnic_max_ring_table_entries =
6959 le16_to_cpu(resp->vnic_max_ring_table_entries);
6960 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6961 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6962 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6963 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6964 ctx->tqm_min_entries_per_ring =
6965 le32_to_cpu(resp->tqm_min_entries_per_ring);
6966 ctx->tqm_max_entries_per_ring =
6967 le32_to_cpu(resp->tqm_max_entries_per_ring);
6968 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6969 if (!ctx->tqm_entries_multiple)
6970 ctx->tqm_entries_multiple = 1;
6971 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6972 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
53579e37
DS
6973 ctx->mrav_num_entries_units =
6974 le16_to_cpu(resp->mrav_num_entries_units);
98f04cf0
MC
6975 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6976 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
e9696ff3
MC
6977
6978 bnxt_init_ctx_initializer(ctx, resp);
6979
ac3158cb
MC
6980 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
6981 if (!ctx->tqm_fp_rings_count)
6982 ctx->tqm_fp_rings_count = bp->max_q;
a029a2fe
MC
6983 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
6984 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
ac3158cb 6985
a029a2fe 6986 tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
ac3158cb
MC
6987 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
6988 if (!ctx_pg) {
6989 kfree(ctx);
6990 rc = -ENOMEM;
6991 goto ctx_err;
6992 }
6993 for (i = 0; i < tqm_rings; i++, ctx_pg++)
6994 ctx->tqm_mem[i] = ctx_pg;
6995 bp->ctx = ctx;
98f04cf0
MC
6996 } else {
6997 rc = 0;
6998 }
6999ctx_err:
7000 mutex_unlock(&bp->hwrm_cmd_lock);
7001 return rc;
7002}
7003
1b9394e5
MC
7004static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
7005 __le64 *pg_dir)
7006{
be6d755f
EP
7007 if (!rmem->nr_pages)
7008 return;
7009
702279d2 7010 BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
08fe9d18
MC
7011 if (rmem->depth >= 1) {
7012 if (rmem->depth == 2)
7013 *pg_attr |= 2;
7014 else
7015 *pg_attr |= 1;
1b9394e5
MC
7016 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
7017 } else {
7018 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
7019 }
7020}
7021
7022#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
7023 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
7024 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
7025 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
7026 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
7027 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
7028
7029static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
7030{
7031 struct hwrm_func_backing_store_cfg_input req = {0};
7032 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7033 struct bnxt_ctx_pg_info *ctx_pg;
16db6323 7034 u32 req_len = sizeof(req);
1b9394e5
MC
7035 __le32 *num_entries;
7036 __le64 *pg_dir;
53579e37 7037 u32 flags = 0;
1b9394e5 7038 u8 *pg_attr;
1b9394e5 7039 u32 ena;
9f90445c 7040 int i;
1b9394e5
MC
7041
7042 if (!ctx)
7043 return 0;
7044
16db6323
MC
7045 if (req_len > bp->hwrm_max_ext_req_len)
7046 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
1b9394e5
MC
7047 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
7048 req.enables = cpu_to_le32(enables);
7049
7050 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
7051 ctx_pg = &ctx->qp_mem;
7052 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
7053 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
7054 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
7055 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
7056 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7057 &req.qpc_pg_size_qpc_lvl,
7058 &req.qpc_page_dir);
7059 }
7060 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
7061 ctx_pg = &ctx->srq_mem;
7062 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
7063 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
7064 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
7065 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7066 &req.srq_pg_size_srq_lvl,
7067 &req.srq_page_dir);
7068 }
7069 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
7070 ctx_pg = &ctx->cq_mem;
7071 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
7072 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
7073 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
7074 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
7075 &req.cq_page_dir);
7076 }
7077 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
7078 ctx_pg = &ctx->vnic_mem;
7079 req.vnic_num_vnic_entries =
7080 cpu_to_le16(ctx->vnic_max_vnic_entries);
7081 req.vnic_num_ring_table_entries =
7082 cpu_to_le16(ctx->vnic_max_ring_table_entries);
7083 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
7084 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7085 &req.vnic_pg_size_vnic_lvl,
7086 &req.vnic_page_dir);
7087 }
7088 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
7089 ctx_pg = &ctx->stat_mem;
7090 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
7091 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
7092 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7093 &req.stat_pg_size_stat_lvl,
7094 &req.stat_page_dir);
7095 }
cf6daed0
MC
7096 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
7097 ctx_pg = &ctx->mrav_mem;
7098 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
53579e37
DS
7099 if (ctx->mrav_num_entries_units)
7100 flags |=
7101 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
cf6daed0
MC
7102 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
7103 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7104 &req.mrav_pg_size_mrav_lvl,
7105 &req.mrav_page_dir);
7106 }
7107 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
7108 ctx_pg = &ctx->tim_mem;
7109 req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
7110 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
7111 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7112 &req.tim_pg_size_tim_lvl,
7113 &req.tim_page_dir);
7114 }
1b9394e5
MC
7115 for (i = 0, num_entries = &req.tqm_sp_num_entries,
7116 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
7117 pg_dir = &req.tqm_sp_page_dir,
7118 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
a029a2fe
MC
7119 i < BNXT_MAX_TQM_RINGS;
7120 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
1b9394e5
MC
7121 if (!(enables & ena))
7122 continue;
7123
7124 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
7125 ctx_pg = ctx->tqm_mem[i];
7126 *num_entries = cpu_to_le32(ctx_pg->entries);
7127 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
7128 }
53579e37 7129 req.flags = cpu_to_le32(flags);
16db6323 7130 return hwrm_send_message(bp, &req, req_len, HWRM_CMD_TIMEOUT);
1b9394e5
MC
7131}
7132
98f04cf0 7133static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
08fe9d18 7134 struct bnxt_ctx_pg_info *ctx_pg)
98f04cf0
MC
7135{
7136 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7137
98f04cf0
MC
7138 rmem->page_size = BNXT_PAGE_SIZE;
7139 rmem->pg_arr = ctx_pg->ctx_pg_arr;
7140 rmem->dma_arr = ctx_pg->ctx_dma_arr;
1b9394e5 7141 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
08fe9d18
MC
7142 if (rmem->depth >= 1)
7143 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
98f04cf0
MC
7144 return bnxt_alloc_ring(bp, rmem);
7145}
7146
08fe9d18
MC
7147static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7148 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
e9696ff3 7149 u8 depth, struct bnxt_mem_init *mem_init)
08fe9d18
MC
7150{
7151 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7152 int rc;
7153
7154 if (!mem_size)
bbf211b1 7155 return -EINVAL;
08fe9d18
MC
7156
7157 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7158 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
7159 ctx_pg->nr_pages = 0;
7160 return -EINVAL;
7161 }
7162 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
7163 int nr_tbls, i;
7164
7165 rmem->depth = 2;
7166 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
7167 GFP_KERNEL);
7168 if (!ctx_pg->ctx_pg_tbl)
7169 return -ENOMEM;
7170 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
7171 rmem->nr_pages = nr_tbls;
7172 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7173 if (rc)
7174 return rc;
7175 for (i = 0; i < nr_tbls; i++) {
7176 struct bnxt_ctx_pg_info *pg_tbl;
7177
7178 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
7179 if (!pg_tbl)
7180 return -ENOMEM;
7181 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
7182 rmem = &pg_tbl->ring_mem;
7183 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
7184 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
7185 rmem->depth = 1;
7186 rmem->nr_pages = MAX_CTX_PAGES;
e9696ff3 7187 rmem->mem_init = mem_init;
6ef982de
MC
7188 if (i == (nr_tbls - 1)) {
7189 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
7190
7191 if (rem)
7192 rmem->nr_pages = rem;
7193 }
08fe9d18
MC
7194 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7195 if (rc)
7196 break;
7197 }
7198 } else {
7199 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7200 if (rmem->nr_pages > 1 || depth)
7201 rmem->depth = 1;
e9696ff3 7202 rmem->mem_init = mem_init;
08fe9d18
MC
7203 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7204 }
7205 return rc;
7206}
7207
7208static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7209 struct bnxt_ctx_pg_info *ctx_pg)
7210{
7211 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7212
7213 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
7214 ctx_pg->ctx_pg_tbl) {
7215 int i, nr_tbls = rmem->nr_pages;
7216
7217 for (i = 0; i < nr_tbls; i++) {
7218 struct bnxt_ctx_pg_info *pg_tbl;
7219 struct bnxt_ring_mem_info *rmem2;
7220
7221 pg_tbl = ctx_pg->ctx_pg_tbl[i];
7222 if (!pg_tbl)
7223 continue;
7224 rmem2 = &pg_tbl->ring_mem;
7225 bnxt_free_ring(bp, rmem2);
7226 ctx_pg->ctx_pg_arr[i] = NULL;
7227 kfree(pg_tbl);
7228 ctx_pg->ctx_pg_tbl[i] = NULL;
7229 }
7230 kfree(ctx_pg->ctx_pg_tbl);
7231 ctx_pg->ctx_pg_tbl = NULL;
7232 }
7233 bnxt_free_ring(bp, rmem);
7234 ctx_pg->nr_pages = 0;
7235}
7236
98f04cf0
MC
7237static void bnxt_free_ctx_mem(struct bnxt *bp)
7238{
7239 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7240 int i;
7241
7242 if (!ctx)
7243 return;
7244
7245 if (ctx->tqm_mem[0]) {
ac3158cb 7246 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
08fe9d18 7247 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
98f04cf0
MC
7248 kfree(ctx->tqm_mem[0]);
7249 ctx->tqm_mem[0] = NULL;
7250 }
7251
cf6daed0
MC
7252 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7253 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
08fe9d18
MC
7254 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7255 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7256 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7257 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7258 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
98f04cf0
MC
7259 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7260}
7261
7262static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7263{
7264 struct bnxt_ctx_pg_info *ctx_pg;
7265 struct bnxt_ctx_mem_info *ctx;
e9696ff3 7266 struct bnxt_mem_init *init;
1b9394e5 7267 u32 mem_size, ena, entries;
c7dd7ab4 7268 u32 entries_sp, min;
53579e37 7269 u32 num_mr, num_ah;
cf6daed0
MC
7270 u32 extra_srqs = 0;
7271 u32 extra_qps = 0;
7272 u8 pg_lvl = 1;
98f04cf0
MC
7273 int i, rc;
7274
7275 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7276 if (rc) {
7277 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7278 rc);
7279 return rc;
7280 }
7281 ctx = bp->ctx;
7282 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7283 return 0;
7284
d629522e 7285 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
cf6daed0
MC
7286 pg_lvl = 2;
7287 extra_qps = 65536;
7288 extra_srqs = 8192;
7289 }
7290
98f04cf0 7291 ctx_pg = &ctx->qp_mem;
cf6daed0
MC
7292 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7293 extra_qps;
be6d755f
EP
7294 if (ctx->qp_entry_size) {
7295 mem_size = ctx->qp_entry_size * ctx_pg->entries;
e9696ff3
MC
7296 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7297 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
be6d755f
EP
7298 if (rc)
7299 return rc;
7300 }
98f04cf0
MC
7301
7302 ctx_pg = &ctx->srq_mem;
cf6daed0 7303 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
be6d755f
EP
7304 if (ctx->srq_entry_size) {
7305 mem_size = ctx->srq_entry_size * ctx_pg->entries;
e9696ff3
MC
7306 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
7307 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
be6d755f
EP
7308 if (rc)
7309 return rc;
7310 }
98f04cf0
MC
7311
7312 ctx_pg = &ctx->cq_mem;
cf6daed0 7313 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
be6d755f
EP
7314 if (ctx->cq_entry_size) {
7315 mem_size = ctx->cq_entry_size * ctx_pg->entries;
e9696ff3
MC
7316 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
7317 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
be6d755f
EP
7318 if (rc)
7319 return rc;
7320 }
98f04cf0
MC
7321
7322 ctx_pg = &ctx->vnic_mem;
7323 ctx_pg->entries = ctx->vnic_max_vnic_entries +
7324 ctx->vnic_max_ring_table_entries;
be6d755f
EP
7325 if (ctx->vnic_entry_size) {
7326 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
e9696ff3
MC
7327 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
7328 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
be6d755f
EP
7329 if (rc)
7330 return rc;
7331 }
98f04cf0
MC
7332
7333 ctx_pg = &ctx->stat_mem;
7334 ctx_pg->entries = ctx->stat_max_entries;
be6d755f
EP
7335 if (ctx->stat_entry_size) {
7336 mem_size = ctx->stat_entry_size * ctx_pg->entries;
e9696ff3
MC
7337 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
7338 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
be6d755f
EP
7339 if (rc)
7340 return rc;
7341 }
98f04cf0 7342
cf6daed0
MC
7343 ena = 0;
7344 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7345 goto skip_rdma;
7346
7347 ctx_pg = &ctx->mrav_mem;
53579e37
DS
7348 /* 128K extra is needed to accommodate static AH context
7349 * allocation by f/w.
7350 */
7351 num_mr = 1024 * 256;
7352 num_ah = 1024 * 128;
7353 ctx_pg->entries = num_mr + num_ah;
be6d755f
EP
7354 if (ctx->mrav_entry_size) {
7355 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
e9696ff3
MC
7356 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
7357 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
be6d755f
EP
7358 if (rc)
7359 return rc;
7360 }
cf6daed0 7361 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
53579e37
DS
7362 if (ctx->mrav_num_entries_units)
7363 ctx_pg->entries =
7364 ((num_mr / ctx->mrav_num_entries_units) << 16) |
7365 (num_ah / ctx->mrav_num_entries_units);
cf6daed0
MC
7366
7367 ctx_pg = &ctx->tim_mem;
7368 ctx_pg->entries = ctx->qp_mem.entries;
be6d755f
EP
7369 if (ctx->tim_entry_size) {
7370 mem_size = ctx->tim_entry_size * ctx_pg->entries;
e9696ff3 7371 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
be6d755f
EP
7372 if (rc)
7373 return rc;
7374 }
cf6daed0
MC
7375 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7376
7377skip_rdma:
c7dd7ab4
MC
7378 min = ctx->tqm_min_entries_per_ring;
7379 entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7380 2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7381 entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
c12e1643 7382 entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
98f04cf0 7383 entries = roundup(entries, ctx->tqm_entries_multiple);
c7dd7ab4 7384 entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
ac3158cb 7385 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
98f04cf0 7386 ctx_pg = ctx->tqm_mem[i];
c7dd7ab4 7387 ctx_pg->entries = i ? entries : entries_sp;
be6d755f
EP
7388 if (ctx->tqm_entry_size) {
7389 mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7390 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
e9696ff3 7391 NULL);
be6d755f
EP
7392 if (rc)
7393 return rc;
7394 }
1b9394e5 7395 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
98f04cf0 7396 }
1b9394e5
MC
7397 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7398 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
0b5b561c 7399 if (rc) {
1b9394e5
MC
7400 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7401 rc);
0b5b561c
MC
7402 return rc;
7403 }
7404 ctx->flags |= BNXT_CTX_FLAG_INITED;
98f04cf0
MC
7405 return 0;
7406}
7407
db4723b3 7408int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
be0dd9c4
MC
7409{
7410 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7411 struct hwrm_func_resource_qcaps_input req = {0};
7412 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7413 int rc;
7414
7415 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
7416 req.fid = cpu_to_le16(0xffff);
7417
7418 mutex_lock(&bp->hwrm_cmd_lock);
351cbde9
JT
7419 rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
7420 HWRM_CMD_TIMEOUT);
d4f1420d 7421 if (rc)
be0dd9c4 7422 goto hwrm_func_resc_qcaps_exit;
be0dd9c4 7423
db4723b3
MC
7424 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7425 if (!all)
7426 goto hwrm_func_resc_qcaps_exit;
7427
be0dd9c4
MC
7428 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7429 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7430 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7431 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7432 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7433 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7434 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7435 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7436 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7437 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7438 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7439 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7440 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7441 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7442 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7443 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7444
9c1fabdf
MC
7445 if (bp->flags & BNXT_FLAG_CHIP_P5) {
7446 u16 max_msix = le16_to_cpu(resp->max_msix);
7447
f7588cd8 7448 hw_resc->max_nqs = max_msix;
9c1fabdf
MC
7449 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7450 }
7451
4673d664
MC
7452 if (BNXT_PF(bp)) {
7453 struct bnxt_pf_info *pf = &bp->pf;
7454
7455 pf->vf_resv_strategy =
7456 le16_to_cpu(resp->vf_reservation_strategy);
bf82736d 7457 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
4673d664
MC
7458 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7459 }
be0dd9c4
MC
7460hwrm_func_resc_qcaps_exit:
7461 mutex_unlock(&bp->hwrm_cmd_lock);
7462 return rc;
7463}
7464
ae5c42f0
MC
7465/* bp->hwrm_cmd_lock already held. */
7466static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
7467{
7468 struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7469 struct hwrm_port_mac_ptp_qcfg_input req = {0};
7470 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
7471 u8 flags;
7472 int rc;
7473
7474 if (bp->hwrm_spec_code < 0x10801) {
7475 rc = -ENODEV;
7476 goto no_ptp;
7477 }
7478
7479 req.port_id = cpu_to_le16(bp->pf.port_id);
7480 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_PTP_QCFG, -1, -1);
7481 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7482 if (rc)
7483 goto no_ptp;
7484
7485 flags = resp->flags;
7486 if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
7487 rc = -ENODEV;
7488 goto no_ptp;
7489 }
7490 if (!ptp) {
7491 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
7492 if (!ptp)
7493 return -ENOMEM;
7494 ptp->bp = bp;
7495 bp->ptp_cfg = ptp;
7496 }
7497 if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
7498 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
7499 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
7500 } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
7501 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
7502 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
7503 } else {
7504 rc = -ENODEV;
7505 goto no_ptp;
7506 }
7507 return 0;
7508
7509no_ptp:
7510 kfree(ptp);
7511 bp->ptp_cfg = NULL;
7512 return rc;
7513}
7514
be0dd9c4 7515static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
c0c050c5
MC
7516{
7517 int rc = 0;
7518 struct hwrm_func_qcaps_input req = {0};
7519 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6a4f2947 7520 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
1da63ddd 7521 u32 flags, flags_ext;
c0c050c5
MC
7522
7523 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
7524 req.fid = cpu_to_le16(0xffff);
7525
7526 mutex_lock(&bp->hwrm_cmd_lock);
7527 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7528 if (rc)
7529 goto hwrm_func_qcaps_exit;
7530
6a4f2947
MC
7531 flags = le32_to_cpu(resp->flags);
7532 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
e4060d30 7533 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
6a4f2947 7534 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
e4060d30 7535 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
55e4398d
VV
7536 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7537 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
0a3f4e4f
VV
7538 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7539 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
6154532f
VV
7540 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7541 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
07f83d72
MC
7542 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7543 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
4037eb71
VV
7544 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7545 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
1da63ddd
EP
7546 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7547 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7548
7549 flags_ext = le32_to_cpu(resp->flags_ext);
7550 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7551 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
e4060d30 7552
7cc5a20e 7553 bp->tx_push_thresh = 0;
fed7edd1
MC
7554 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7555 BNXT_FW_MAJ(bp) > 217)
7cc5a20e
MC
7556 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7557
6a4f2947
MC
7558 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7559 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7560 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7561 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7562 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7563 if (!hw_resc->max_hw_ring_grps)
7564 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7565 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7566 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7567 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7568
c0c050c5
MC
7569 if (BNXT_PF(bp)) {
7570 struct bnxt_pf_info *pf = &bp->pf;
7571
7572 pf->fw_fid = le16_to_cpu(resp->fid);
7573 pf->port_id = le16_to_cpu(resp->port_id);
11f15ed3 7574 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
c0c050c5
MC
7575 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7576 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7577 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7578 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7579 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7580 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7581 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7582 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
ba642ab7 7583 bp->flags &= ~BNXT_FLAG_WOL_CAP;
6a4f2947 7584 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
c1ef146a 7585 bp->flags |= BNXT_FLAG_WOL_CAP;
de5bf194 7586 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
ae5c42f0 7587 __bnxt_hwrm_ptp_qcfg(bp);
de5bf194
MC
7588 } else {
7589 kfree(bp->ptp_cfg);
7590 bp->ptp_cfg = NULL;
7591 }
c0c050c5 7592 } else {
379a80a1 7593#ifdef CONFIG_BNXT_SRIOV
c0c050c5
MC
7594 struct bnxt_vf_info *vf = &bp->vf;
7595
7596 vf->fw_fid = le16_to_cpu(resp->fid);
7cc5a20e 7597 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
379a80a1 7598#endif
c0c050c5
MC
7599 }
7600
c0c050c5
MC
7601hwrm_func_qcaps_exit:
7602 mutex_unlock(&bp->hwrm_cmd_lock);
7603 return rc;
7604}
7605
804fba4e
MC
7606static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7607
be0dd9c4
MC
7608static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7609{
7610 int rc;
7611
7612 rc = __bnxt_hwrm_func_qcaps(bp);
7613 if (rc)
7614 return rc;
804fba4e
MC
7615 rc = bnxt_hwrm_queue_qportcfg(bp);
7616 if (rc) {
7617 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7618 return rc;
7619 }
be0dd9c4 7620 if (bp->hwrm_spec_code >= 0x10803) {
98f04cf0
MC
7621 rc = bnxt_alloc_ctx_mem(bp);
7622 if (rc)
7623 return rc;
db4723b3 7624 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
be0dd9c4 7625 if (!rc)
97381a18 7626 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
be0dd9c4
MC
7627 }
7628 return 0;
7629}
7630
e969ae5b
MC
7631static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7632{
7633 struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
7634 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7635 int rc = 0;
7636 u32 flags;
7637
7638 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7639 return 0;
7640
7641 resp = bp->hwrm_cmd_resp_addr;
7642 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
7643
7644 mutex_lock(&bp->hwrm_cmd_lock);
7645 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7646 if (rc)
7647 goto hwrm_cfa_adv_qcaps_exit;
7648
7649 flags = le32_to_cpu(resp->flags);
7650 if (flags &
41136ab3
MC
7651 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7652 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
e969ae5b
MC
7653
7654hwrm_cfa_adv_qcaps_exit:
7655 mutex_unlock(&bp->hwrm_cmd_lock);
7656 return rc;
7657}
7658
3e9ec2bb
EP
7659static int __bnxt_alloc_fw_health(struct bnxt *bp)
7660{
7661 if (bp->fw_health)
7662 return 0;
7663
7664 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7665 if (!bp->fw_health)
7666 return -ENOMEM;
7667
7668 return 0;
7669}
7670
7671static int bnxt_alloc_fw_health(struct bnxt *bp)
7672{
7673 int rc;
7674
7675 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7676 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7677 return 0;
7678
7679 rc = __bnxt_alloc_fw_health(bp);
7680 if (rc) {
7681 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7682 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7683 return rc;
7684 }
7685
7686 return 0;
7687}
7688
ba02629f
EP
7689static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7690{
7691 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7692 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7693 BNXT_FW_HEALTH_WIN_MAP_OFF);
7694}
7695
80a9641f
PC
7696bool bnxt_is_fw_healthy(struct bnxt *bp)
7697{
7698 if (bp->fw_health && bp->fw_health->status_reliable) {
7699 u32 fw_status;
7700
7701 fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
7702 if (fw_status && !BNXT_FW_IS_HEALTHY(fw_status))
7703 return false;
7704 }
7705
7706 return true;
7707}
7708
43a440c4
MC
7709static void bnxt_inv_fw_health_reg(struct bnxt *bp)
7710{
7711 struct bnxt_fw_health *fw_health = bp->fw_health;
7712 u32 reg_type;
7713
7714 if (!fw_health || !fw_health->status_reliable)
7715 return;
7716
7717 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
7718 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7719 fw_health->status_reliable = false;
7720}
7721
ba02629f
EP
7722static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7723{
7724 void __iomem *hs;
7725 u32 status_loc;
7726 u32 reg_type;
7727 u32 sig;
7728
43a440c4
MC
7729 if (bp->fw_health)
7730 bp->fw_health->status_reliable = false;
7731
ba02629f
EP
7732 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7733 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7734
7735 sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
7736 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
d1cbd165
MC
7737 if (!bp->chip_num) {
7738 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
7739 bp->chip_num = readl(bp->bar0 +
7740 BNXT_FW_HEALTH_WIN_BASE +
7741 BNXT_GRC_REG_CHIP_NUM);
7742 }
43a440c4 7743 if (!BNXT_CHIP_P5(bp))
d1cbd165 7744 return;
43a440c4 7745
d1cbd165
MC
7746 status_loc = BNXT_GRC_REG_STATUS_P5 |
7747 BNXT_FW_HEALTH_REG_TYPE_BAR0;
7748 } else {
7749 status_loc = readl(hs + offsetof(struct hcomm_status,
7750 fw_status_loc));
ba02629f
EP
7751 }
7752
7753 if (__bnxt_alloc_fw_health(bp)) {
7754 netdev_warn(bp->dev, "no memory for firmware status checks\n");
7755 return;
7756 }
7757
ba02629f
EP
7758 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7759 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
7760 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
7761 __bnxt_map_fw_health_reg(bp, status_loc);
7762 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7763 BNXT_FW_HEALTH_WIN_OFF(status_loc);
7764 }
7765
7766 bp->fw_health->status_reliable = true;
7767}
7768
9ffbd677
MC
7769static int bnxt_map_fw_health_regs(struct bnxt *bp)
7770{
7771 struct bnxt_fw_health *fw_health = bp->fw_health;
7772 u32 reg_base = 0xffffffff;
7773 int i;
7774
43a440c4 7775 bp->fw_health->status_reliable = false;
9ffbd677
MC
7776 /* Only pre-map the monitoring GRC registers using window 3 */
7777 for (i = 0; i < 4; i++) {
7778 u32 reg = fw_health->regs[i];
7779
7780 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7781 continue;
7782 if (reg_base == 0xffffffff)
7783 reg_base = reg & BNXT_GRC_BASE_MASK;
7784 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7785 return -ERANGE;
ba02629f 7786 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
9ffbd677 7787 }
43a440c4 7788 bp->fw_health->status_reliable = true;
9ffbd677
MC
7789 if (reg_base == 0xffffffff)
7790 return 0;
7791
ba02629f 7792 __bnxt_map_fw_health_reg(bp, reg_base);
9ffbd677
MC
7793 return 0;
7794}
7795
07f83d72
MC
7796static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7797{
7798 struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7799 struct bnxt_fw_health *fw_health = bp->fw_health;
7800 struct hwrm_error_recovery_qcfg_input req = {0};
7801 int rc, i;
7802
7803 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7804 return 0;
7805
7806 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
7807 mutex_lock(&bp->hwrm_cmd_lock);
7808 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7809 if (rc)
7810 goto err_recovery_out;
07f83d72
MC
7811 fw_health->flags = le32_to_cpu(resp->flags);
7812 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7813 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7814 rc = -EINVAL;
7815 goto err_recovery_out;
7816 }
7817 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7818 fw_health->master_func_wait_dsecs =
7819 le32_to_cpu(resp->master_func_wait_period);
7820 fw_health->normal_func_wait_dsecs =
7821 le32_to_cpu(resp->normal_func_wait_period);
7822 fw_health->post_reset_wait_dsecs =
7823 le32_to_cpu(resp->master_func_wait_period_after_reset);
7824 fw_health->post_reset_max_wait_dsecs =
7825 le32_to_cpu(resp->max_bailout_time_after_reset);
7826 fw_health->regs[BNXT_FW_HEALTH_REG] =
7827 le32_to_cpu(resp->fw_health_status_reg);
7828 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7829 le32_to_cpu(resp->fw_heartbeat_reg);
7830 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7831 le32_to_cpu(resp->fw_reset_cnt_reg);
7832 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7833 le32_to_cpu(resp->reset_inprogress_reg);
7834 fw_health->fw_reset_inprog_reg_mask =
7835 le32_to_cpu(resp->reset_inprogress_reg_mask);
7836 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7837 if (fw_health->fw_reset_seq_cnt >= 16) {
7838 rc = -EINVAL;
7839 goto err_recovery_out;
7840 }
7841 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7842 fw_health->fw_reset_seq_regs[i] =
7843 le32_to_cpu(resp->reset_reg[i]);
7844 fw_health->fw_reset_seq_vals[i] =
7845 le32_to_cpu(resp->reset_reg_val[i]);
7846 fw_health->fw_reset_seq_delay_msec[i] =
7847 resp->delay_after_reset[i];
7848 }
7849err_recovery_out:
7850 mutex_unlock(&bp->hwrm_cmd_lock);
9ffbd677
MC
7851 if (!rc)
7852 rc = bnxt_map_fw_health_regs(bp);
07f83d72
MC
7853 if (rc)
7854 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7855 return rc;
7856}
7857
c0c050c5
MC
7858static int bnxt_hwrm_func_reset(struct bnxt *bp)
7859{
7860 struct hwrm_func_reset_input req = {0};
7861
7862 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
7863 req.enables = 0;
7864
7865 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
7866}
7867
4933f675
VV
7868static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
7869{
7870 struct hwrm_nvm_get_dev_info_output nvm_info;
7871
7872 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
7873 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
7874 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
7875 nvm_info.nvm_cfg_ver_upd);
7876}
7877
c0c050c5
MC
7878static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7879{
7880 int rc = 0;
7881 struct hwrm_queue_qportcfg_input req = {0};
7882 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
aabfc016
MC
7883 u8 i, j, *qptr;
7884 bool no_rdma;
c0c050c5
MC
7885
7886 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
7887
7888 mutex_lock(&bp->hwrm_cmd_lock);
7889 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7890 if (rc)
7891 goto qportcfg_exit;
7892
7893 if (!resp->max_configurable_queues) {
7894 rc = -EINVAL;
7895 goto qportcfg_exit;
7896 }
7897 bp->max_tc = resp->max_configurable_queues;
87c374de 7898 bp->max_lltc = resp->max_configurable_lossless_queues;
c0c050c5
MC
7899 if (bp->max_tc > BNXT_MAX_QUEUE)
7900 bp->max_tc = BNXT_MAX_QUEUE;
7901
aabfc016
MC
7902 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7903 qptr = &resp->queue_id0;
7904 for (i = 0, j = 0; i < bp->max_tc; i++) {
98f04cf0
MC
7905 bp->q_info[j].queue_id = *qptr;
7906 bp->q_ids[i] = *qptr++;
aabfc016
MC
7907 bp->q_info[j].queue_profile = *qptr++;
7908 bp->tc_to_qidx[j] = j;
7909 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7910 (no_rdma && BNXT_PF(bp)))
7911 j++;
7912 }
98f04cf0 7913 bp->max_q = bp->max_tc;
aabfc016
MC
7914 bp->max_tc = max_t(u8, j, 1);
7915
441cabbb
MC
7916 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7917 bp->max_tc = 1;
7918
87c374de
MC
7919 if (bp->max_lltc > bp->max_tc)
7920 bp->max_lltc = bp->max_tc;
7921
c0c050c5
MC
7922qportcfg_exit:
7923 mutex_unlock(&bp->hwrm_cmd_lock);
7924 return rc;
7925}
7926
ba642ab7 7927static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
c0c050c5 7928{
c0c050c5 7929 struct hwrm_ver_get_input req = {0};
ba642ab7 7930 int rc;
c0c050c5
MC
7931
7932 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
7933 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
7934 req.hwrm_intf_min = HWRM_VERSION_MINOR;
7935 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
ba642ab7
MC
7936
7937 rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT,
7938 silent);
7939 return rc;
7940}
7941
7942static int bnxt_hwrm_ver_get(struct bnxt *bp)
7943{
7944 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
d0ad2ea2 7945 u16 fw_maj, fw_min, fw_bld, fw_rsv;
b7a444f0 7946 u32 dev_caps_cfg, hwrm_ver;
d0ad2ea2 7947 int rc, len;
ba642ab7
MC
7948
7949 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
c0c050c5 7950 mutex_lock(&bp->hwrm_cmd_lock);
ba642ab7 7951 rc = __bnxt_hwrm_ver_get(bp, false);
c0c050c5
MC
7952 if (rc)
7953 goto hwrm_ver_get_exit;
7954
7955 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7956
894aa69a
MC
7957 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7958 resp->hwrm_intf_min_8b << 8 |
7959 resp->hwrm_intf_upd_8b;
7960 if (resp->hwrm_intf_maj_8b < 1) {
c193554e 7961 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
894aa69a
MC
7962 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7963 resp->hwrm_intf_upd_8b);
c193554e 7964 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
c0c050c5 7965 }
b7a444f0
VV
7966
7967 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
7968 HWRM_VERSION_UPDATE;
7969
7970 if (bp->hwrm_spec_code > hwrm_ver)
7971 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7972 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
7973 HWRM_VERSION_UPDATE);
7974 else
7975 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7976 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7977 resp->hwrm_intf_upd_8b);
7978
d0ad2ea2
MC
7979 fw_maj = le16_to_cpu(resp->hwrm_fw_major);
7980 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
7981 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
7982 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
7983 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
7984 len = FW_VER_STR_LEN;
7985 } else {
7986 fw_maj = resp->hwrm_fw_maj_8b;
7987 fw_min = resp->hwrm_fw_min_8b;
7988 fw_bld = resp->hwrm_fw_bld_8b;
7989 fw_rsv = resp->hwrm_fw_rsvd_8b;
7990 len = BC_HWRM_STR_LEN;
7991 }
7992 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
7993 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
7994 fw_rsv);
c0c050c5 7995
691aa620
VV
7996 if (strlen(resp->active_pkg_name)) {
7997 int fw_ver_len = strlen(bp->fw_ver_str);
7998
7999 snprintf(bp->fw_ver_str + fw_ver_len,
8000 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
8001 resp->active_pkg_name);
8002 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
8003 }
8004
ff4fe81d
MC
8005 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
8006 if (!bp->hwrm_cmd_timeout)
8007 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
8008
1dfddc41 8009 if (resp->hwrm_intf_maj_8b >= 1) {
e6ef2699 8010 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
1dfddc41
MC
8011 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
8012 }
8013 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
8014 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
e6ef2699 8015
659c805c 8016 bp->chip_num = le16_to_cpu(resp->chip_num);
5313845f 8017 bp->chip_rev = resp->chip_rev;
3e8060fa
PS
8018 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
8019 !resp->chip_metal)
8020 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
659c805c 8021
e605db80
DK
8022 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
8023 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
8024 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
97381a18 8025 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
e605db80 8026
760b6d33
VD
8027 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
8028 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
8029
abd43a13
VD
8030 if (dev_caps_cfg &
8031 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
8032 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
8033
2a516444
MC
8034 if (dev_caps_cfg &
8035 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
8036 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
8037
e969ae5b
MC
8038 if (dev_caps_cfg &
8039 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
8040 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
8041
c0c050c5
MC
8042hwrm_ver_get_exit:
8043 mutex_unlock(&bp->hwrm_cmd_lock);
8044 return rc;
8045}
8046
5ac67d8b
RS
8047int bnxt_hwrm_fw_set_time(struct bnxt *bp)
8048{
8049 struct hwrm_fw_set_time_input req = {0};
7dfaa7bc
AB
8050 struct tm tm;
8051 time64_t now = ktime_get_real_seconds();
5ac67d8b 8052
ca2c39e2
MC
8053 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
8054 bp->hwrm_spec_code < 0x10400)
5ac67d8b
RS
8055 return -EOPNOTSUPP;
8056
7dfaa7bc 8057 time64_to_tm(now, 0, &tm);
5ac67d8b
RS
8058 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
8059 req.year = cpu_to_le16(1900 + tm.tm_year);
8060 req.month = 1 + tm.tm_mon;
8061 req.day = tm.tm_mday;
8062 req.hour = tm.tm_hour;
8063 req.minute = tm.tm_min;
8064 req.second = tm.tm_sec;
8065 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8066}
8067
fea6b333
MC
8068static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
8069{
8070 u64 sw_tmp;
8071
fa97f303 8072 hw &= mask;
fea6b333
MC
8073 sw_tmp = (*sw & ~mask) | hw;
8074 if (hw < (*sw & mask))
8075 sw_tmp += mask + 1;
8076 WRITE_ONCE(*sw, sw_tmp);
8077}
8078
8079static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
8080 int count, bool ignore_zero)
8081{
8082 int i;
8083
8084 for (i = 0; i < count; i++) {
8085 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
8086
8087 if (ignore_zero && !hw)
8088 continue;
8089
8090 if (masks[i] == -1ULL)
8091 sw_stats[i] = hw;
8092 else
8093 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
8094 }
8095}
8096
8097static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
8098{
8099 if (!stats->hw_stats)
8100 return;
8101
8102 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8103 stats->hw_masks, stats->len / 8, false);
8104}
8105
8106static void bnxt_accumulate_all_stats(struct bnxt *bp)
8107{
8108 struct bnxt_stats_mem *ring0_stats;
8109 bool ignore_zero = false;
8110 int i;
8111
8112 /* Chip bug. Counter intermittently becomes 0. */
8113 if (bp->flags & BNXT_FLAG_CHIP_P5)
8114 ignore_zero = true;
8115
8116 for (i = 0; i < bp->cp_nr_rings; i++) {
8117 struct bnxt_napi *bnapi = bp->bnapi[i];
8118 struct bnxt_cp_ring_info *cpr;
8119 struct bnxt_stats_mem *stats;
8120
8121 cpr = &bnapi->cp_ring;
8122 stats = &cpr->stats;
8123 if (!i)
8124 ring0_stats = stats;
8125 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8126 ring0_stats->hw_masks,
8127 ring0_stats->len / 8, ignore_zero);
8128 }
8129 if (bp->flags & BNXT_FLAG_PORT_STATS) {
8130 struct bnxt_stats_mem *stats = &bp->port_stats;
8131 __le64 *hw_stats = stats->hw_stats;
8132 u64 *sw_stats = stats->sw_stats;
8133 u64 *masks = stats->hw_masks;
8134 int cnt;
8135
8136 cnt = sizeof(struct rx_port_stats) / 8;
8137 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8138
8139 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8140 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8141 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8142 cnt = sizeof(struct tx_port_stats) / 8;
8143 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8144 }
8145 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
8146 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
8147 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
8148 }
8149}
8150
531d1d26 8151static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
3bdf56c4 8152{
3bdf56c4
MC
8153 struct bnxt_pf_info *pf = &bp->pf;
8154 struct hwrm_port_qstats_input req = {0};
8155
8156 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
8157 return 0;
8158
531d1d26
MC
8159 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8160 return -EOPNOTSUPP;
8161
8162 req.flags = flags;
3bdf56c4
MC
8163 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
8164 req.port_id = cpu_to_le16(pf->port_id);
177a6cde
MC
8165 req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
8166 BNXT_TX_PORT_STATS_BYTE_OFFSET);
8167 req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
9f90445c 8168 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3bdf56c4
MC
8169}
8170
531d1d26 8171static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
00db3cba 8172{
36e53349 8173 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
e37fed79 8174 struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
00db3cba
VV
8175 struct hwrm_port_qstats_ext_input req = {0};
8176 struct bnxt_pf_info *pf = &bp->pf;
ad361adf 8177 u32 tx_stat_size;
36e53349 8178 int rc;
00db3cba
VV
8179
8180 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
8181 return 0;
8182
531d1d26
MC
8183 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8184 return -EOPNOTSUPP;
8185
00db3cba 8186 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
531d1d26 8187 req.flags = flags;
00db3cba
VV
8188 req.port_id = cpu_to_le16(pf->port_id);
8189 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
177a6cde
MC
8190 req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
8191 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
8192 sizeof(struct tx_port_stats_ext) : 0;
ad361adf 8193 req.tx_stat_size = cpu_to_le16(tx_stat_size);
177a6cde 8194 req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
36e53349
MC
8195 mutex_lock(&bp->hwrm_cmd_lock);
8196 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8197 if (!rc) {
8198 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
ad361adf
MC
8199 bp->fw_tx_stats_ext_size = tx_stat_size ?
8200 le16_to_cpu(resp->tx_stat_size) / 8 : 0;
36e53349
MC
8201 } else {
8202 bp->fw_rx_stats_ext_size = 0;
8203 bp->fw_tx_stats_ext_size = 0;
8204 }
531d1d26
MC
8205 if (flags)
8206 goto qstats_done;
8207
e37fed79
MC
8208 if (bp->fw_tx_stats_ext_size <=
8209 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
8210 mutex_unlock(&bp->hwrm_cmd_lock);
8211 bp->pri2cos_valid = 0;
8212 return rc;
8213 }
8214
8215 bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
8216 req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
8217
8218 rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
8219 if (!rc) {
8220 struct hwrm_queue_pri2cos_qcfg_output *resp2;
8221 u8 *pri2cos;
8222 int i, j;
8223
8224 resp2 = bp->hwrm_cmd_resp_addr;
8225 pri2cos = &resp2->pri0_cos_queue_id;
8226 for (i = 0; i < 8; i++) {
8227 u8 queue_id = pri2cos[i];
a24ec322 8228 u8 queue_idx;
e37fed79 8229
a24ec322
MC
8230 /* Per port queue IDs start from 0, 10, 20, etc */
8231 queue_idx = queue_id % 10;
8232 if (queue_idx > BNXT_MAX_QUEUE) {
8233 bp->pri2cos_valid = false;
8234 goto qstats_done;
8235 }
e37fed79
MC
8236 for (j = 0; j < bp->max_q; j++) {
8237 if (bp->q_ids[j] == queue_id)
a24ec322 8238 bp->pri2cos_idx[i] = queue_idx;
e37fed79
MC
8239 }
8240 }
8241 bp->pri2cos_valid = 1;
8242 }
a24ec322 8243qstats_done:
36e53349
MC
8244 mutex_unlock(&bp->hwrm_cmd_lock);
8245 return rc;
00db3cba
VV
8246}
8247
c0c050c5
MC
8248static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8249{
442a35a5 8250 if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID)
c0c050c5
MC
8251 bnxt_hwrm_tunnel_dst_port_free(
8252 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
442a35a5 8253 if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID)
c0c050c5
MC
8254 bnxt_hwrm_tunnel_dst_port_free(
8255 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
c0c050c5
MC
8256}
8257
8258static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8259{
8260 int rc, i;
8261 u32 tpa_flags = 0;
8262
8263 if (set_tpa)
8264 tpa_flags = bp->flags & BNXT_FLAG_TPA;
b340dc68 8265 else if (BNXT_NO_FW_ACCESS(bp))
b4fff207 8266 return 0;
c0c050c5
MC
8267 for (i = 0; i < bp->nr_vnics; i++) {
8268 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8269 if (rc) {
8270 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
23e12c89 8271 i, rc);
c0c050c5
MC
8272 return rc;
8273 }
8274 }
8275 return 0;
8276}
8277
8278static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8279{
8280 int i;
8281
8282 for (i = 0; i < bp->nr_vnics; i++)
8283 bnxt_hwrm_vnic_set_rss(bp, i, false);
8284}
8285
a46ecb11 8286static void bnxt_clear_vnic(struct bnxt *bp)
c0c050c5 8287{
a46ecb11
MC
8288 if (!bp->vnic_info)
8289 return;
8290
8291 bnxt_hwrm_clear_vnic_filter(bp);
8292 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
c0c050c5
MC
8293 /* clear all RSS setting before free vnic ctx */
8294 bnxt_hwrm_clear_vnic_rss(bp);
8295 bnxt_hwrm_vnic_ctx_free(bp);
c0c050c5 8296 }
a46ecb11
MC
8297 /* before free the vnic, undo the vnic tpa settings */
8298 if (bp->flags & BNXT_FLAG_TPA)
8299 bnxt_set_tpa(bp, false);
8300 bnxt_hwrm_vnic_free(bp);
8301 if (bp->flags & BNXT_FLAG_CHIP_P5)
8302 bnxt_hwrm_vnic_ctx_free(bp);
8303}
8304
8305static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8306 bool irq_re_init)
8307{
8308 bnxt_clear_vnic(bp);
c0c050c5
MC
8309 bnxt_hwrm_ring_free(bp, close_path);
8310 bnxt_hwrm_ring_grp_free(bp);
8311 if (irq_re_init) {
8312 bnxt_hwrm_stat_ctx_free(bp);
8313 bnxt_hwrm_free_tunnel_ports(bp);
8314 }
8315}
8316
39d8ba2e
MC
8317static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8318{
8319 struct hwrm_func_cfg_input req = {0};
39d8ba2e
MC
8320
8321 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
8322 req.fid = cpu_to_le16(0xffff);
8323 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
8324 if (br_mode == BRIDGE_MODE_VEB)
8325 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
8326 else if (br_mode == BRIDGE_MODE_VEPA)
8327 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
8328 else
8329 return -EINVAL;
9f90445c 8330 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
39d8ba2e
MC
8331}
8332
c3480a60
MC
8333static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8334{
8335 struct hwrm_func_cfg_input req = {0};
c3480a60
MC
8336
8337 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8338 return 0;
8339
8340 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
8341 req.fid = cpu_to_le16(0xffff);
8342 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
d4f52de0 8343 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
c3480a60 8344 if (size == 128)
d4f52de0 8345 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
c3480a60 8346
9f90445c 8347 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
c3480a60
MC
8348}
8349
7b3af4f7 8350static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
c0c050c5 8351{
ae10ae74 8352 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
c0c050c5
MC
8353 int rc;
8354
ae10ae74
MC
8355 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8356 goto skip_rss_ctx;
8357
c0c050c5 8358 /* allocate context for vnic */
94ce9caa 8359 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
c0c050c5
MC
8360 if (rc) {
8361 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8362 vnic_id, rc);
8363 goto vnic_setup_err;
8364 }
8365 bp->rsscos_nr_ctxs++;
8366
94ce9caa
PS
8367 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8368 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8369 if (rc) {
8370 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8371 vnic_id, rc);
8372 goto vnic_setup_err;
8373 }
8374 bp->rsscos_nr_ctxs++;
8375 }
8376
ae10ae74 8377skip_rss_ctx:
c0c050c5
MC
8378 /* configure default vnic, ring grp */
8379 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8380 if (rc) {
8381 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8382 vnic_id, rc);
8383 goto vnic_setup_err;
8384 }
8385
8386 /* Enable RSS hashing on vnic */
8387 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8388 if (rc) {
8389 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8390 vnic_id, rc);
8391 goto vnic_setup_err;
8392 }
8393
8394 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8395 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8396 if (rc) {
8397 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8398 vnic_id, rc);
8399 }
8400 }
8401
8402vnic_setup_err:
8403 return rc;
8404}
8405
7b3af4f7
MC
8406static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8407{
8408 int rc, i, nr_ctxs;
8409
f9f6a3fb 8410 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
7b3af4f7
MC
8411 for (i = 0; i < nr_ctxs; i++) {
8412 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8413 if (rc) {
8414 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8415 vnic_id, i, rc);
8416 break;
8417 }
8418 bp->rsscos_nr_ctxs++;
8419 }
8420 if (i < nr_ctxs)
8421 return -ENOMEM;
8422
8423 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8424 if (rc) {
8425 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8426 vnic_id, rc);
8427 return rc;
8428 }
8429 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8430 if (rc) {
8431 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8432 vnic_id, rc);
8433 return rc;
8434 }
8435 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8436 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8437 if (rc) {
8438 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8439 vnic_id, rc);
8440 }
8441 }
8442 return rc;
8443}
8444
8445static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8446{
8447 if (bp->flags & BNXT_FLAG_CHIP_P5)
8448 return __bnxt_setup_vnic_p5(bp, vnic_id);
8449 else
8450 return __bnxt_setup_vnic(bp, vnic_id);
8451}
8452
c0c050c5
MC
8453static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8454{
8455#ifdef CONFIG_RFS_ACCEL
8456 int i, rc = 0;
8457
9b3d15e6
MC
8458 if (bp->flags & BNXT_FLAG_CHIP_P5)
8459 return 0;
8460
c0c050c5 8461 for (i = 0; i < bp->rx_nr_rings; i++) {
ae10ae74 8462 struct bnxt_vnic_info *vnic;
c0c050c5
MC
8463 u16 vnic_id = i + 1;
8464 u16 ring_id = i;
8465
8466 if (vnic_id >= bp->nr_vnics)
8467 break;
8468
ae10ae74
MC
8469 vnic = &bp->vnic_info[vnic_id];
8470 vnic->flags |= BNXT_VNIC_RFS_FLAG;
8471 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8472 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
b81a90d3 8473 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
c0c050c5
MC
8474 if (rc) {
8475 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8476 vnic_id, rc);
8477 break;
8478 }
8479 rc = bnxt_setup_vnic(bp, vnic_id);
8480 if (rc)
8481 break;
8482 }
8483 return rc;
8484#else
8485 return 0;
8486#endif
8487}
8488
dd85fc0a 8489/* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
17c71ac3
MC
8490static bool bnxt_promisc_ok(struct bnxt *bp)
8491{
8492#ifdef CONFIG_BNXT_SRIOV
dd85fc0a 8493 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
17c71ac3
MC
8494 return false;
8495#endif
8496 return true;
8497}
8498
dc52c6c7
PS
8499static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8500{
8501 unsigned int rc = 0;
8502
8503 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8504 if (rc) {
8505 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8506 rc);
8507 return rc;
8508 }
8509
8510 rc = bnxt_hwrm_vnic_cfg(bp, 1);
8511 if (rc) {
8512 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8513 rc);
8514 return rc;
8515 }
8516 return rc;
8517}
8518
b664f008 8519static int bnxt_cfg_rx_mode(struct bnxt *);
7d2837dd 8520static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
b664f008 8521
c0c050c5
MC
8522static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8523{
7d2837dd 8524 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
c0c050c5 8525 int rc = 0;
76595193 8526 unsigned int rx_nr_rings = bp->rx_nr_rings;
c0c050c5
MC
8527
8528 if (irq_re_init) {
8529 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8530 if (rc) {
8531 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8532 rc);
8533 goto err_out;
8534 }
8535 }
8536
8537 rc = bnxt_hwrm_ring_alloc(bp);
8538 if (rc) {
8539 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8540 goto err_out;
8541 }
8542
8543 rc = bnxt_hwrm_ring_grp_alloc(bp);
8544 if (rc) {
8545 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8546 goto err_out;
8547 }
8548
76595193
PS
8549 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8550 rx_nr_rings--;
8551
c0c050c5 8552 /* default vnic 0 */
76595193 8553 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
c0c050c5
MC
8554 if (rc) {
8555 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8556 goto err_out;
8557 }
8558
8559 rc = bnxt_setup_vnic(bp, 0);
8560 if (rc)
8561 goto err_out;
8562
8563 if (bp->flags & BNXT_FLAG_RFS) {
8564 rc = bnxt_alloc_rfs_vnics(bp);
8565 if (rc)
8566 goto err_out;
8567 }
8568
8569 if (bp->flags & BNXT_FLAG_TPA) {
8570 rc = bnxt_set_tpa(bp, true);
8571 if (rc)
8572 goto err_out;
8573 }
8574
8575 if (BNXT_VF(bp))
8576 bnxt_update_vf_mac(bp);
8577
8578 /* Filter for default vnic 0 */
8579 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8580 if (rc) {
8581 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8582 goto err_out;
8583 }
7d2837dd 8584 vnic->uc_filter_count = 1;
c0c050c5 8585
30e33848
MC
8586 vnic->rx_mask = 0;
8587 if (bp->dev->flags & IFF_BROADCAST)
8588 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
c0c050c5 8589
dd85fc0a 8590 if (bp->dev->flags & IFF_PROMISC)
7d2837dd
MC
8591 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8592
8593 if (bp->dev->flags & IFF_ALLMULTI) {
8594 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8595 vnic->mc_list_count = 0;
8596 } else {
8597 u32 mask = 0;
8598
8599 bnxt_mc_list_updated(bp, &mask);
8600 vnic->rx_mask |= mask;
8601 }
c0c050c5 8602
b664f008
MC
8603 rc = bnxt_cfg_rx_mode(bp);
8604 if (rc)
c0c050c5 8605 goto err_out;
c0c050c5
MC
8606
8607 rc = bnxt_hwrm_set_coal(bp);
8608 if (rc)
8609 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
dc52c6c7
PS
8610 rc);
8611
8612 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8613 rc = bnxt_setup_nitroa0_vnic(bp);
8614 if (rc)
8615 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8616 rc);
8617 }
c0c050c5 8618
cf6645f8
MC
8619 if (BNXT_VF(bp)) {
8620 bnxt_hwrm_func_qcfg(bp);
8621 netdev_update_features(bp->dev);
8622 }
8623
c0c050c5
MC
8624 return 0;
8625
8626err_out:
8627 bnxt_hwrm_resource_free(bp, 0, true);
8628
8629 return rc;
8630}
8631
8632static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8633{
8634 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8635 return 0;
8636}
8637
8638static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8639{
2247925f 8640 bnxt_init_cp_rings(bp);
c0c050c5
MC
8641 bnxt_init_rx_rings(bp);
8642 bnxt_init_tx_rings(bp);
8643 bnxt_init_ring_grps(bp, irq_re_init);
8644 bnxt_init_vnics(bp);
8645
8646 return bnxt_init_chip(bp, irq_re_init);
8647}
8648
c0c050c5
MC
8649static int bnxt_set_real_num_queues(struct bnxt *bp)
8650{
8651 int rc;
8652 struct net_device *dev = bp->dev;
8653
5f449249
MC
8654 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8655 bp->tx_nr_rings_xdp);
c0c050c5
MC
8656 if (rc)
8657 return rc;
8658
8659 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8660 if (rc)
8661 return rc;
8662
8663#ifdef CONFIG_RFS_ACCEL
45019a18 8664 if (bp->flags & BNXT_FLAG_RFS)
c0c050c5 8665 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
c0c050c5
MC
8666#endif
8667
8668 return rc;
8669}
8670
6e6c5a57
MC
8671static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8672 bool shared)
8673{
8674 int _rx = *rx, _tx = *tx;
8675
8676 if (shared) {
8677 *rx = min_t(int, _rx, max);
8678 *tx = min_t(int, _tx, max);
8679 } else {
8680 if (max < 2)
8681 return -ENOMEM;
8682
8683 while (_rx + _tx > max) {
8684 if (_rx > _tx && _rx > 1)
8685 _rx--;
8686 else if (_tx > 1)
8687 _tx--;
8688 }
8689 *rx = _rx;
8690 *tx = _tx;
8691 }
8692 return 0;
8693}
8694
7809592d
MC
8695static void bnxt_setup_msix(struct bnxt *bp)
8696{
8697 const int len = sizeof(bp->irq_tbl[0].name);
8698 struct net_device *dev = bp->dev;
8699 int tcs, i;
8700
8701 tcs = netdev_get_num_tc(dev);
18e4960c 8702 if (tcs) {
d1e7925e 8703 int i, off, count;
7809592d 8704
d1e7925e
MC
8705 for (i = 0; i < tcs; i++) {
8706 count = bp->tx_nr_rings_per_tc;
8707 off = i * count;
8708 netdev_set_tc_queue(dev, i, count, off);
7809592d
MC
8709 }
8710 }
8711
8712 for (i = 0; i < bp->cp_nr_rings; i++) {
e5811b8c 8713 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7809592d
MC
8714 char *attr;
8715
8716 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8717 attr = "TxRx";
8718 else if (i < bp->rx_nr_rings)
8719 attr = "rx";
8720 else
8721 attr = "tx";
8722
e5811b8c
MC
8723 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8724 attr, i);
8725 bp->irq_tbl[map_idx].handler = bnxt_msix;
7809592d
MC
8726 }
8727}
8728
8729static void bnxt_setup_inta(struct bnxt *bp)
8730{
8731 const int len = sizeof(bp->irq_tbl[0].name);
8732
8733 if (netdev_get_num_tc(bp->dev))
8734 netdev_reset_tc(bp->dev);
8735
8736 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8737 0);
8738 bp->irq_tbl[0].handler = bnxt_inta;
8739}
8740
20d7d1c5
EP
8741static int bnxt_init_int_mode(struct bnxt *bp);
8742
7809592d
MC
8743static int bnxt_setup_int_mode(struct bnxt *bp)
8744{
8745 int rc;
8746
20d7d1c5
EP
8747 if (!bp->irq_tbl) {
8748 rc = bnxt_init_int_mode(bp);
8749 if (rc || !bp->irq_tbl)
8750 return rc ?: -ENODEV;
8751 }
8752
7809592d
MC
8753 if (bp->flags & BNXT_FLAG_USING_MSIX)
8754 bnxt_setup_msix(bp);
8755 else
8756 bnxt_setup_inta(bp);
8757
8758 rc = bnxt_set_real_num_queues(bp);
8759 return rc;
8760}
8761
b7429954 8762#ifdef CONFIG_RFS_ACCEL
8079e8f1
MC
8763static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
8764{
6a4f2947 8765 return bp->hw_resc.max_rsscos_ctxs;
8079e8f1
MC
8766}
8767
8768static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
8769{
6a4f2947 8770 return bp->hw_resc.max_vnics;
8079e8f1 8771}
b7429954 8772#endif
8079e8f1 8773
e4060d30
MC
8774unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
8775{
6a4f2947 8776 return bp->hw_resc.max_stat_ctxs;
e4060d30
MC
8777}
8778
8779unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
8780{
6a4f2947 8781 return bp->hw_resc.max_cp_rings;
e4060d30
MC
8782}
8783
e916b081 8784static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
a588e458 8785{
c0b8cda0
MC
8786 unsigned int cp = bp->hw_resc.max_cp_rings;
8787
8788 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8789 cp -= bnxt_get_ulp_msix_num(bp);
8790
8791 return cp;
a588e458
MC
8792}
8793
ad95c27b 8794static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
7809592d 8795{
6a4f2947
MC
8796 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8797
f7588cd8
MC
8798 if (bp->flags & BNXT_FLAG_CHIP_P5)
8799 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
8800
6a4f2947 8801 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
7809592d
MC
8802}
8803
30f52947 8804static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
33c2657e 8805{
6a4f2947 8806 bp->hw_resc.max_irqs = max_irqs;
33c2657e
MC
8807}
8808
e916b081
MC
8809unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
8810{
8811 unsigned int cp;
8812
8813 cp = bnxt_get_max_func_cp_rings_for_en(bp);
8814 if (bp->flags & BNXT_FLAG_CHIP_P5)
8815 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8816 else
8817 return cp - bp->cp_nr_rings;
8818}
8819
c027c6b4
VV
8820unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8821{
d77b1ad8 8822 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
c027c6b4
VV
8823}
8824
fbcfc8e4
MC
8825int bnxt_get_avail_msix(struct bnxt *bp, int num)
8826{
8827 int max_cp = bnxt_get_max_func_cp_rings(bp);
8828 int max_irq = bnxt_get_max_func_irqs(bp);
8829 int total_req = bp->cp_nr_rings + num;
8830 int max_idx, avail_msix;
8831
75720e63
MC
8832 max_idx = bp->total_irqs;
8833 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8834 max_idx = min_t(int, bp->total_irqs, max_cp);
fbcfc8e4 8835 avail_msix = max_idx - bp->cp_nr_rings;
f1ca94de 8836 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
fbcfc8e4
MC
8837 return avail_msix;
8838
8839 if (max_irq < total_req) {
8840 num = max_irq - bp->cp_nr_rings;
8841 if (num <= 0)
8842 return 0;
8843 }
8844 return num;
8845}
8846
08654eb2
MC
8847static int bnxt_get_num_msix(struct bnxt *bp)
8848{
f1ca94de 8849 if (!BNXT_NEW_RM(bp))
08654eb2
MC
8850 return bnxt_get_max_func_irqs(bp);
8851
c0b8cda0 8852 return bnxt_nq_rings_in_use(bp);
08654eb2
MC
8853}
8854
7809592d 8855static int bnxt_init_msix(struct bnxt *bp)
c0c050c5 8856{
fbcfc8e4 8857 int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
7809592d 8858 struct msix_entry *msix_ent;
c0c050c5 8859
08654eb2
MC
8860 total_vecs = bnxt_get_num_msix(bp);
8861 max = bnxt_get_max_func_irqs(bp);
8862 if (total_vecs > max)
8863 total_vecs = max;
8864
2773dfb2
MC
8865 if (!total_vecs)
8866 return 0;
8867
c0c050c5
MC
8868 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8869 if (!msix_ent)
8870 return -ENOMEM;
8871
8872 for (i = 0; i < total_vecs; i++) {
8873 msix_ent[i].entry = i;
8874 msix_ent[i].vector = 0;
8875 }
8876
01657bcd
MC
8877 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8878 min = 2;
8879
8880 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
fbcfc8e4
MC
8881 ulp_msix = bnxt_get_ulp_msix_num(bp);
8882 if (total_vecs < 0 || total_vecs < ulp_msix) {
c0c050c5
MC
8883 rc = -ENODEV;
8884 goto msix_setup_exit;
8885 }
8886
8887 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8888 if (bp->irq_tbl) {
7809592d
MC
8889 for (i = 0; i < total_vecs; i++)
8890 bp->irq_tbl[i].vector = msix_ent[i].vector;
c0c050c5 8891
7809592d 8892 bp->total_irqs = total_vecs;
c0c050c5 8893 /* Trim rings based upon num of vectors allocated */
6e6c5a57 8894 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
fbcfc8e4 8895 total_vecs - ulp_msix, min == 1);
6e6c5a57
MC
8896 if (rc)
8897 goto msix_setup_exit;
8898
7809592d
MC
8899 bp->cp_nr_rings = (min == 1) ?
8900 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8901 bp->tx_nr_rings + bp->rx_nr_rings;
c0c050c5 8902
c0c050c5
MC
8903 } else {
8904 rc = -ENOMEM;
8905 goto msix_setup_exit;
8906 }
8907 bp->flags |= BNXT_FLAG_USING_MSIX;
8908 kfree(msix_ent);
8909 return 0;
8910
8911msix_setup_exit:
7809592d
MC
8912 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8913 kfree(bp->irq_tbl);
8914 bp->irq_tbl = NULL;
c0c050c5
MC
8915 pci_disable_msix(bp->pdev);
8916 kfree(msix_ent);
8917 return rc;
8918}
8919
7809592d 8920static int bnxt_init_inta(struct bnxt *bp)
c0c050c5 8921{
33dbcf60 8922 bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
7809592d
MC
8923 if (!bp->irq_tbl)
8924 return -ENOMEM;
8925
8926 bp->total_irqs = 1;
c0c050c5
MC
8927 bp->rx_nr_rings = 1;
8928 bp->tx_nr_rings = 1;
8929 bp->cp_nr_rings = 1;
01657bcd 8930 bp->flags |= BNXT_FLAG_SHARED_RINGS;
c0c050c5 8931 bp->irq_tbl[0].vector = bp->pdev->irq;
7809592d 8932 return 0;
c0c050c5
MC
8933}
8934
7809592d 8935static int bnxt_init_int_mode(struct bnxt *bp)
c0c050c5 8936{
20d7d1c5 8937 int rc = -ENODEV;
c0c050c5
MC
8938
8939 if (bp->flags & BNXT_FLAG_MSIX_CAP)
7809592d 8940 rc = bnxt_init_msix(bp);
c0c050c5 8941
1fa72e29 8942 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
c0c050c5 8943 /* fallback to INTA */
7809592d 8944 rc = bnxt_init_inta(bp);
c0c050c5
MC
8945 }
8946 return rc;
8947}
8948
7809592d
MC
8949static void bnxt_clear_int_mode(struct bnxt *bp)
8950{
8951 if (bp->flags & BNXT_FLAG_USING_MSIX)
8952 pci_disable_msix(bp->pdev);
8953
8954 kfree(bp->irq_tbl);
8955 bp->irq_tbl = NULL;
8956 bp->flags &= ~BNXT_FLAG_USING_MSIX;
8957}
8958
1b3f0b75 8959int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
674f50a5 8960{
674f50a5 8961 int tcs = netdev_get_num_tc(bp->dev);
1b3f0b75 8962 bool irq_cleared = false;
674f50a5
MC
8963 int rc;
8964
8965 if (!bnxt_need_reserve_rings(bp))
8966 return 0;
8967
1b3f0b75
MC
8968 if (irq_re_init && BNXT_NEW_RM(bp) &&
8969 bnxt_get_num_msix(bp) != bp->total_irqs) {
ec86f14e 8970 bnxt_ulp_irq_stop(bp);
674f50a5 8971 bnxt_clear_int_mode(bp);
1b3f0b75 8972 irq_cleared = true;
36d65be9
MC
8973 }
8974 rc = __bnxt_reserve_rings(bp);
1b3f0b75 8975 if (irq_cleared) {
36d65be9
MC
8976 if (!rc)
8977 rc = bnxt_init_int_mode(bp);
ec86f14e 8978 bnxt_ulp_irq_restart(bp, rc);
36d65be9
MC
8979 }
8980 if (rc) {
8981 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
8982 return rc;
674f50a5
MC
8983 }
8984 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
8985 netdev_err(bp->dev, "tx ring reservation failure\n");
8986 netdev_reset_tc(bp->dev);
8987 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8988 return -ENOMEM;
8989 }
674f50a5
MC
8990 return 0;
8991}
8992
c0c050c5
MC
8993static void bnxt_free_irq(struct bnxt *bp)
8994{
8995 struct bnxt_irq *irq;
8996 int i;
8997
8998#ifdef CONFIG_RFS_ACCEL
8999 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
9000 bp->dev->rx_cpu_rmap = NULL;
9001#endif
cb98526b 9002 if (!bp->irq_tbl || !bp->bnapi)
c0c050c5
MC
9003 return;
9004
9005 for (i = 0; i < bp->cp_nr_rings; i++) {
e5811b8c
MC
9006 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9007
9008 irq = &bp->irq_tbl[map_idx];
56f0fd80
VV
9009 if (irq->requested) {
9010 if (irq->have_cpumask) {
9011 irq_set_affinity_hint(irq->vector, NULL);
9012 free_cpumask_var(irq->cpu_mask);
9013 irq->have_cpumask = 0;
9014 }
c0c050c5 9015 free_irq(irq->vector, bp->bnapi[i]);
56f0fd80
VV
9016 }
9017
c0c050c5
MC
9018 irq->requested = 0;
9019 }
c0c050c5
MC
9020}
9021
9022static int bnxt_request_irq(struct bnxt *bp)
9023{
b81a90d3 9024 int i, j, rc = 0;
c0c050c5
MC
9025 unsigned long flags = 0;
9026#ifdef CONFIG_RFS_ACCEL
e5811b8c 9027 struct cpu_rmap *rmap;
c0c050c5
MC
9028#endif
9029
e5811b8c
MC
9030 rc = bnxt_setup_int_mode(bp);
9031 if (rc) {
9032 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
9033 rc);
9034 return rc;
9035 }
9036#ifdef CONFIG_RFS_ACCEL
9037 rmap = bp->dev->rx_cpu_rmap;
9038#endif
c0c050c5
MC
9039 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
9040 flags = IRQF_SHARED;
9041
b81a90d3 9042 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
e5811b8c
MC
9043 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9044 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
9045
c0c050c5 9046#ifdef CONFIG_RFS_ACCEL
b81a90d3 9047 if (rmap && bp->bnapi[i]->rx_ring) {
c0c050c5
MC
9048 rc = irq_cpu_rmap_add(rmap, irq->vector);
9049 if (rc)
9050 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
b81a90d3
MC
9051 j);
9052 j++;
c0c050c5
MC
9053 }
9054#endif
9055 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
9056 bp->bnapi[i]);
9057 if (rc)
9058 break;
9059
9060 irq->requested = 1;
56f0fd80
VV
9061
9062 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
9063 int numa_node = dev_to_node(&bp->pdev->dev);
9064
9065 irq->have_cpumask = 1;
9066 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
9067 irq->cpu_mask);
9068 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
9069 if (rc) {
9070 netdev_warn(bp->dev,
9071 "Set affinity failed, IRQ = %d\n",
9072 irq->vector);
9073 break;
9074 }
9075 }
c0c050c5
MC
9076 }
9077 return rc;
9078}
9079
9080static void bnxt_del_napi(struct bnxt *bp)
9081{
9082 int i;
9083
9084 if (!bp->bnapi)
9085 return;
9086
9087 for (i = 0; i < bp->cp_nr_rings; i++) {
9088 struct bnxt_napi *bnapi = bp->bnapi[i];
9089
5198d545 9090 __netif_napi_del(&bnapi->napi);
c0c050c5 9091 }
5198d545 9092 /* We called __netif_napi_del(), we need
e5f6f564
ED
9093 * to respect an RCU grace period before freeing napi structures.
9094 */
9095 synchronize_net();
c0c050c5
MC
9096}
9097
9098static void bnxt_init_napi(struct bnxt *bp)
9099{
9100 int i;
10bbdaf5 9101 unsigned int cp_nr_rings = bp->cp_nr_rings;
c0c050c5
MC
9102 struct bnxt_napi *bnapi;
9103
9104 if (bp->flags & BNXT_FLAG_USING_MSIX) {
0fcec985
MC
9105 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
9106
9107 if (bp->flags & BNXT_FLAG_CHIP_P5)
9108 poll_fn = bnxt_poll_p5;
9109 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
10bbdaf5
PS
9110 cp_nr_rings--;
9111 for (i = 0; i < cp_nr_rings; i++) {
c0c050c5 9112 bnapi = bp->bnapi[i];
0fcec985 9113 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
c0c050c5 9114 }
10bbdaf5
PS
9115 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9116 bnapi = bp->bnapi[cp_nr_rings];
9117 netif_napi_add(bp->dev, &bnapi->napi,
9118 bnxt_poll_nitroa0, 64);
10bbdaf5 9119 }
c0c050c5
MC
9120 } else {
9121 bnapi = bp->bnapi[0];
9122 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
c0c050c5
MC
9123 }
9124}
9125
9126static void bnxt_disable_napi(struct bnxt *bp)
9127{
9128 int i;
9129
e340a5c4
MC
9130 if (!bp->bnapi ||
9131 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
c0c050c5
MC
9132 return;
9133
0bc0b97f
AG
9134 for (i = 0; i < bp->cp_nr_rings; i++) {
9135 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
9136
9137 if (bp->bnapi[i]->rx_ring)
9138 cancel_work_sync(&cpr->dim.work);
9139
c0c050c5 9140 napi_disable(&bp->bnapi[i]->napi);
0bc0b97f 9141 }
c0c050c5
MC
9142}
9143
9144static void bnxt_enable_napi(struct bnxt *bp)
9145{
9146 int i;
9147
e340a5c4 9148 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
c0c050c5 9149 for (i = 0; i < bp->cp_nr_rings; i++) {
8a27d4b9
MC
9150 struct bnxt_napi *bnapi = bp->bnapi[i];
9151 struct bnxt_cp_ring_info *cpr;
9152
9153 cpr = &bnapi->cp_ring;
9154 if (bnapi->in_reset)
9155 cpr->sw_stats.rx.rx_resets++;
9156 bnapi->in_reset = false;
6a8788f2 9157
8a27d4b9 9158 if (bnapi->rx_ring) {
6a8788f2 9159 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
c002bd52 9160 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6a8788f2 9161 }
8a27d4b9 9162 napi_enable(&bnapi->napi);
c0c050c5
MC
9163 }
9164}
9165
7df4ae9f 9166void bnxt_tx_disable(struct bnxt *bp)
c0c050c5
MC
9167{
9168 int i;
c0c050c5 9169 struct bnxt_tx_ring_info *txr;
c0c050c5 9170
b6ab4b01 9171 if (bp->tx_ring) {
c0c050c5 9172 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 9173 txr = &bp->tx_ring[i];
3c603136 9174 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
c0c050c5
MC
9175 }
9176 }
3c603136
JK
9177 /* Make sure napi polls see @dev_state change */
9178 synchronize_net();
132e0b65
EP
9179 /* Drop carrier first to prevent TX timeout */
9180 netif_carrier_off(bp->dev);
c0c050c5
MC
9181 /* Stop all TX queues */
9182 netif_tx_disable(bp->dev);
c0c050c5
MC
9183}
9184
7df4ae9f 9185void bnxt_tx_enable(struct bnxt *bp)
c0c050c5
MC
9186{
9187 int i;
c0c050c5 9188 struct bnxt_tx_ring_info *txr;
c0c050c5
MC
9189
9190 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 9191 txr = &bp->tx_ring[i];
3c603136 9192 WRITE_ONCE(txr->dev_state, 0);
c0c050c5 9193 }
3c603136
JK
9194 /* Make sure napi polls see @dev_state change */
9195 synchronize_net();
c0c050c5
MC
9196 netif_tx_wake_all_queues(bp->dev);
9197 if (bp->link_info.link_up)
9198 netif_carrier_on(bp->dev);
9199}
9200
2046e3c3
MC
9201static char *bnxt_report_fec(struct bnxt_link_info *link_info)
9202{
9203 u8 active_fec = link_info->active_fec_sig_mode &
9204 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
9205
9206 switch (active_fec) {
9207 default:
9208 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
9209 return "None";
9210 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
9211 return "Clause 74 BaseR";
9212 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
9213 return "Clause 91 RS(528,514)";
9214 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
9215 return "Clause 91 RS544_1XN";
9216 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
9217 return "Clause 91 RS(544,514)";
9218 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
9219 return "Clause 91 RS272_1XN";
9220 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
9221 return "Clause 91 RS(272,257)";
9222 }
9223}
9224
c0c050c5
MC
9225static void bnxt_report_link(struct bnxt *bp)
9226{
9227 if (bp->link_info.link_up) {
1d2deb61 9228 const char *signal = "";
c0c050c5 9229 const char *flow_ctrl;
1d2deb61 9230 const char *duplex;
38a21b34
DK
9231 u32 speed;
9232 u16 fec;
c0c050c5
MC
9233
9234 netif_carrier_on(bp->dev);
8eddb3e7
MC
9235 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9236 if (speed == SPEED_UNKNOWN) {
9237 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9238 return;
9239 }
c0c050c5
MC
9240 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9241 duplex = "full";
9242 else
9243 duplex = "half";
9244 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9245 flow_ctrl = "ON - receive & transmit";
9246 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9247 flow_ctrl = "ON - transmit";
9248 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9249 flow_ctrl = "ON - receive";
9250 else
9251 flow_ctrl = "none";
1d2deb61
EP
9252 if (bp->link_info.phy_qcfg_resp.option_flags &
9253 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
9254 u8 sig_mode = bp->link_info.active_fec_sig_mode &
9255 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
9256 switch (sig_mode) {
9257 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
9258 signal = "(NRZ) ";
9259 break;
9260 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
9261 signal = "(PAM4) ";
9262 break;
9263 default:
9264 break;
9265 }
9266 }
9267 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
9268 speed, signal, duplex, flow_ctrl);
b0d28207 9269 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
170ce013
MC
9270 netdev_info(bp->dev, "EEE is %s\n",
9271 bp->eee.eee_active ? "active" :
9272 "not active");
e70c752f
MC
9273 fec = bp->link_info.fec_cfg;
9274 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
2046e3c3 9275 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
e70c752f 9276 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
2046e3c3 9277 bnxt_report_fec(&bp->link_info));
c0c050c5
MC
9278 } else {
9279 netif_carrier_off(bp->dev);
9280 netdev_err(bp->dev, "NIC Link is Down\n");
9281 }
9282}
9283
3128e811
MC
9284static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
9285{
9286 if (!resp->supported_speeds_auto_mode &&
9287 !resp->supported_speeds_force_mode &&
9288 !resp->supported_pam4_speeds_auto_mode &&
9289 !resp->supported_pam4_speeds_force_mode)
9290 return true;
9291 return false;
9292}
9293
170ce013
MC
9294static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9295{
9296 int rc = 0;
9297 struct hwrm_port_phy_qcaps_input req = {0};
9298 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
93ed8117 9299 struct bnxt_link_info *link_info = &bp->link_info;
170ce013
MC
9300
9301 if (bp->hwrm_spec_code < 0x10201)
9302 return 0;
9303
9304 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
9305
9306 mutex_lock(&bp->hwrm_cmd_lock);
9307 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9308 if (rc)
9309 goto hwrm_phy_qcaps_exit;
9310
b0d28207 9311 bp->phy_flags = resp->flags;
acb20054 9312 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
170ce013
MC
9313 struct ethtool_eee *eee = &bp->eee;
9314 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
9315
170ce013
MC
9316 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9317 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9318 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
9319 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9320 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
9321 }
fea6b333 9322
3128e811
MC
9323 if (bp->hwrm_spec_code >= 0x10a01) {
9324 if (bnxt_phy_qcaps_no_speed(resp)) {
9325 link_info->phy_state = BNXT_PHY_STATE_DISABLED;
9326 netdev_warn(bp->dev, "Ethernet link disabled\n");
9327 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
9328 link_info->phy_state = BNXT_PHY_STATE_ENABLED;
9329 netdev_info(bp->dev, "Ethernet link enabled\n");
9330 /* Phy re-enabled, reprobe the speeds */
9331 link_info->support_auto_speeds = 0;
9332 link_info->support_pam4_auto_speeds = 0;
9333 }
9334 }
520ad89a
MC
9335 if (resp->supported_speeds_auto_mode)
9336 link_info->support_auto_speeds =
9337 le16_to_cpu(resp->supported_speeds_auto_mode);
d058426e
EP
9338 if (resp->supported_pam4_speeds_auto_mode)
9339 link_info->support_pam4_auto_speeds =
9340 le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
170ce013 9341
d5430d31
MC
9342 bp->port_count = resp->port_cnt;
9343
170ce013
MC
9344hwrm_phy_qcaps_exit:
9345 mutex_unlock(&bp->hwrm_cmd_lock);
9346 return rc;
9347}
9348
c916062a
EP
9349static bool bnxt_support_dropped(u16 advertising, u16 supported)
9350{
9351 u16 diff = advertising ^ supported;
9352
9353 return ((supported | diff) != supported);
9354}
9355
ccd6a9dc 9356int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
c0c050c5
MC
9357{
9358 int rc = 0;
9359 struct bnxt_link_info *link_info = &bp->link_info;
9360 struct hwrm_port_phy_qcfg_input req = {0};
9361 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9362 u8 link_up = link_info->link_up;
d058426e 9363 bool support_changed = false;
c0c050c5
MC
9364
9365 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
9366
9367 mutex_lock(&bp->hwrm_cmd_lock);
9368 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9369 if (rc) {
9370 mutex_unlock(&bp->hwrm_cmd_lock);
9371 return rc;
9372 }
9373
9374 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
9375 link_info->phy_link_status = resp->link;
acb20054
MC
9376 link_info->duplex = resp->duplex_cfg;
9377 if (bp->hwrm_spec_code >= 0x10800)
9378 link_info->duplex = resp->duplex_state;
c0c050c5
MC
9379 link_info->pause = resp->pause;
9380 link_info->auto_mode = resp->auto_mode;
9381 link_info->auto_pause_setting = resp->auto_pause;
3277360e 9382 link_info->lp_pause = resp->link_partner_adv_pause;
c0c050c5 9383 link_info->force_pause_setting = resp->force_pause;
acb20054 9384 link_info->duplex_setting = resp->duplex_cfg;
c0c050c5
MC
9385 if (link_info->phy_link_status == BNXT_LINK_LINK)
9386 link_info->link_speed = le16_to_cpu(resp->link_speed);
9387 else
9388 link_info->link_speed = 0;
9389 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
d058426e
EP
9390 link_info->force_pam4_link_speed =
9391 le16_to_cpu(resp->force_pam4_link_speed);
c0c050c5 9392 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
d058426e 9393 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
c0c050c5 9394 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
d058426e
EP
9395 link_info->auto_pam4_link_speeds =
9396 le16_to_cpu(resp->auto_pam4_link_speed_mask);
3277360e
MC
9397 link_info->lp_auto_link_speeds =
9398 le16_to_cpu(resp->link_partner_adv_speeds);
d058426e
EP
9399 link_info->lp_auto_pam4_link_speeds =
9400 resp->link_partner_pam4_adv_speeds;
c0c050c5
MC
9401 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
9402 link_info->phy_ver[0] = resp->phy_maj;
9403 link_info->phy_ver[1] = resp->phy_min;
9404 link_info->phy_ver[2] = resp->phy_bld;
9405 link_info->media_type = resp->media_type;
03efbec0 9406 link_info->phy_type = resp->phy_type;
11f15ed3 9407 link_info->transceiver = resp->xcvr_pkg_type;
170ce013
MC
9408 link_info->phy_addr = resp->eee_config_phy_addr &
9409 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
42ee18fe 9410 link_info->module_status = resp->module_status;
170ce013 9411
b0d28207 9412 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
170ce013
MC
9413 struct ethtool_eee *eee = &bp->eee;
9414 u16 fw_speeds;
9415
9416 eee->eee_active = 0;
9417 if (resp->eee_config_phy_addr &
9418 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
9419 eee->eee_active = 1;
9420 fw_speeds = le16_to_cpu(
9421 resp->link_partner_adv_eee_link_speed_mask);
9422 eee->lp_advertised =
9423 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9424 }
9425
9426 /* Pull initial EEE config */
9427 if (!chng_link_state) {
9428 if (resp->eee_config_phy_addr &
9429 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
9430 eee->eee_enabled = 1;
c0c050c5 9431
170ce013
MC
9432 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
9433 eee->advertised =
9434 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9435
9436 if (resp->eee_config_phy_addr &
9437 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
9438 __le32 tmr;
9439
9440 eee->tx_lpi_enabled = 1;
9441 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
9442 eee->tx_lpi_timer = le32_to_cpu(tmr) &
9443 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
9444 }
9445 }
9446 }
e70c752f
MC
9447
9448 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
8b277589 9449 if (bp->hwrm_spec_code >= 0x10504) {
e70c752f 9450 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
8b277589
MC
9451 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
9452 }
c0c050c5
MC
9453 /* TODO: need to add more logic to report VF link */
9454 if (chng_link_state) {
9455 if (link_info->phy_link_status == BNXT_LINK_LINK)
9456 link_info->link_up = 1;
9457 else
9458 link_info->link_up = 0;
9459 if (link_up != link_info->link_up)
9460 bnxt_report_link(bp);
9461 } else {
9462 /* alwasy link down if not require to update link state */
9463 link_info->link_up = 0;
9464 }
9465 mutex_unlock(&bp->hwrm_cmd_lock);
286ef9d6 9466
c7e457f4 9467 if (!BNXT_PHY_CFG_ABLE(bp))
dac04907
MC
9468 return 0;
9469
c916062a
EP
9470 /* Check if any advertised speeds are no longer supported. The caller
9471 * holds the link_lock mutex, so we can modify link_info settings.
9472 */
9473 if (bnxt_support_dropped(link_info->advertising,
9474 link_info->support_auto_speeds)) {
286ef9d6 9475 link_info->advertising = link_info->support_auto_speeds;
d058426e 9476 support_changed = true;
286ef9d6 9477 }
d058426e
EP
9478 if (bnxt_support_dropped(link_info->advertising_pam4,
9479 link_info->support_pam4_auto_speeds)) {
9480 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
9481 support_changed = true;
9482 }
9483 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
9484 bnxt_hwrm_set_link_setting(bp, true, false);
c0c050c5
MC
9485 return 0;
9486}
9487
10289bec
MC
9488static void bnxt_get_port_module_status(struct bnxt *bp)
9489{
9490 struct bnxt_link_info *link_info = &bp->link_info;
9491 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
9492 u8 module_status;
9493
9494 if (bnxt_update_link(bp, true))
9495 return;
9496
9497 module_status = link_info->module_status;
9498 switch (module_status) {
9499 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
9500 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
9501 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
9502 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9503 bp->pf.port_id);
9504 if (bp->hwrm_spec_code >= 0x10201) {
9505 netdev_warn(bp->dev, "Module part number %s\n",
9506 resp->phy_vendor_partnumber);
9507 }
9508 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
9509 netdev_warn(bp->dev, "TX is disabled\n");
9510 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
9511 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9512 }
9513}
9514
c0c050c5
MC
9515static void
9516bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9517{
9518 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
c9ee9516
MC
9519 if (bp->hwrm_spec_code >= 0x10201)
9520 req->auto_pause =
9521 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
c0c050c5
MC
9522 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9523 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
9524 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
49b5c7a1 9525 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
c0c050c5
MC
9526 req->enables |=
9527 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9528 } else {
9529 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9530 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
9531 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9532 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9533 req->enables |=
9534 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
c9ee9516
MC
9535 if (bp->hwrm_spec_code >= 0x10201) {
9536 req->auto_pause = req->force_pause;
9537 req->enables |= cpu_to_le32(
9538 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9539 }
c0c050c5
MC
9540 }
9541}
9542
d058426e 9543static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
c0c050c5 9544{
d058426e
EP
9545 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9546 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9547 if (bp->link_info.advertising) {
9548 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9549 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9550 }
9551 if (bp->link_info.advertising_pam4) {
9552 req->enables |=
9553 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
9554 req->auto_link_pam4_speed_mask =
9555 cpu_to_le16(bp->link_info.advertising_pam4);
9556 }
c0c050c5 9557 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
d058426e 9558 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
c0c050c5 9559 } else {
c0c050c5 9560 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
d058426e
EP
9561 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9562 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9563 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
9564 } else {
9565 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9566 }
c0c050c5
MC
9567 }
9568
c0c050c5
MC
9569 /* tell chimp that the setting takes effect immediately */
9570 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9571}
9572
9573int bnxt_hwrm_set_pause(struct bnxt *bp)
9574{
9575 struct hwrm_port_phy_cfg_input req = {0};
9576 int rc;
9577
9578 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9579 bnxt_hwrm_set_pause_common(bp, &req);
9580
9581 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9582 bp->link_info.force_link_chng)
9583 bnxt_hwrm_set_link_common(bp, &req);
9584
9585 mutex_lock(&bp->hwrm_cmd_lock);
9586 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9587 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9588 /* since changing of pause setting doesn't trigger any link
9589 * change event, the driver needs to update the current pause
9590 * result upon successfully return of the phy_cfg command
9591 */
9592 bp->link_info.pause =
9593 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9594 bp->link_info.auto_pause_setting = 0;
9595 if (!bp->link_info.force_link_chng)
9596 bnxt_report_link(bp);
9597 }
9598 bp->link_info.force_link_chng = false;
9599 mutex_unlock(&bp->hwrm_cmd_lock);
9600 return rc;
9601}
9602
939f7f0c
MC
9603static void bnxt_hwrm_set_eee(struct bnxt *bp,
9604 struct hwrm_port_phy_cfg_input *req)
9605{
9606 struct ethtool_eee *eee = &bp->eee;
9607
9608 if (eee->eee_enabled) {
9609 u16 eee_speeds;
9610 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9611
9612 if (eee->tx_lpi_enabled)
9613 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9614 else
9615 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9616
9617 req->flags |= cpu_to_le32(flags);
9618 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9619 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9620 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9621 } else {
9622 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9623 }
9624}
9625
9626int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
c0c050c5
MC
9627{
9628 struct hwrm_port_phy_cfg_input req = {0};
9629
9630 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9631 if (set_pause)
9632 bnxt_hwrm_set_pause_common(bp, &req);
9633
9634 bnxt_hwrm_set_link_common(bp, &req);
939f7f0c
MC
9635
9636 if (set_eee)
9637 bnxt_hwrm_set_eee(bp, &req);
c0c050c5
MC
9638 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9639}
9640
33f7d55f
MC
9641static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9642{
9643 struct hwrm_port_phy_cfg_input req = {0};
9644
567b2abe 9645 if (!BNXT_SINGLE_PF(bp))
33f7d55f
MC
9646 return 0;
9647
d5ca9905
MC
9648 if (pci_num_vf(bp->pdev) &&
9649 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
33f7d55f
MC
9650 return 0;
9651
9652 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
16d663a6 9653 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
33f7d55f
MC
9654 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9655}
9656
ec5d31e3
MC
9657static int bnxt_fw_init_one(struct bnxt *bp);
9658
b187e4ba
EP
9659static int bnxt_fw_reset_via_optee(struct bnxt *bp)
9660{
9661#ifdef CONFIG_TEE_BNXT_FW
9662 int rc = tee_bnxt_fw_load();
9663
9664 if (rc)
9665 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
9666
9667 return rc;
9668#else
9669 netdev_err(bp->dev, "OP-TEE not supported\n");
9670 return -ENODEV;
9671#endif
9672}
9673
9674static int bnxt_try_recover_fw(struct bnxt *bp)
9675{
9676 if (bp->fw_health && bp->fw_health->status_reliable) {
d1cbd165
MC
9677 int retry = 0, rc;
9678 u32 sts;
9679
9680 mutex_lock(&bp->hwrm_cmd_lock);
9681 do {
d1cbd165 9682 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
861aae78 9683 rc = __bnxt_hwrm_ver_get(bp, true);
17e1be34
MC
9684 if (!BNXT_FW_IS_BOOTING(sts) &&
9685 !BNXT_FW_IS_RECOVERING(sts))
d1cbd165
MC
9686 break;
9687 retry++;
9688 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
9689 mutex_unlock(&bp->hwrm_cmd_lock);
b187e4ba 9690
d1cbd165
MC
9691 if (!BNXT_FW_IS_HEALTHY(sts)) {
9692 netdev_err(bp->dev,
9693 "Firmware not responding, status: 0x%x\n",
9694 sts);
9695 rc = -ENODEV;
9696 }
b187e4ba
EP
9697 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
9698 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
9699 return bnxt_fw_reset_via_optee(bp);
9700 }
d1cbd165 9701 return rc;
b187e4ba
EP
9702 }
9703
9704 return -ENODEV;
9705}
9706
25e1acd6
MC
9707static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
9708{
9709 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
9710 struct hwrm_func_drv_if_change_input req = {0};
20d7d1c5
EP
9711 bool fw_reset = !bp->irq_tbl;
9712 bool resc_reinit = false;
5d06eb5c 9713 int rc, retry = 0;
ec5d31e3 9714 u32 flags = 0;
25e1acd6
MC
9715
9716 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
9717 return 0;
9718
9719 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
9720 if (up)
9721 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
9722 mutex_lock(&bp->hwrm_cmd_lock);
5d06eb5c
VV
9723 while (retry < BNXT_FW_IF_RETRY) {
9724 rc = _hwrm_send_message(bp, &req, sizeof(req),
9725 HWRM_CMD_TIMEOUT);
9726 if (rc != -EAGAIN)
9727 break;
9728
9729 msleep(50);
9730 retry++;
9731 }
ec5d31e3
MC
9732 if (!rc)
9733 flags = le32_to_cpu(resp->flags);
25e1acd6 9734 mutex_unlock(&bp->hwrm_cmd_lock);
5d06eb5c
VV
9735
9736 if (rc == -EAGAIN)
9737 return rc;
b187e4ba
EP
9738 if (rc && up) {
9739 rc = bnxt_try_recover_fw(bp);
9740 fw_reset = true;
9741 }
ec5d31e3
MC
9742 if (rc)
9743 return rc;
25e1acd6 9744
43a440c4
MC
9745 if (!up) {
9746 bnxt_inv_fw_health_reg(bp);
ec5d31e3 9747 return 0;
43a440c4 9748 }
25e1acd6 9749
ec5d31e3
MC
9750 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
9751 resc_reinit = true;
9752 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
9753 fw_reset = true;
43a440c4
MC
9754 else if (bp->fw_health && !bp->fw_health->status_reliable)
9755 bnxt_try_map_fw_health_reg(bp);
ec5d31e3 9756
3bc7d4a3
MC
9757 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
9758 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
20d7d1c5 9759 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
3bc7d4a3
MC
9760 return -ENODEV;
9761 }
ec5d31e3
MC
9762 if (resc_reinit || fw_reset) {
9763 if (fw_reset) {
2924ad95 9764 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
f3a6d206
VV
9765 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9766 bnxt_ulp_stop(bp);
325f85f3
MC
9767 bnxt_free_ctx_mem(bp);
9768 kfree(bp->ctx);
9769 bp->ctx = NULL;
843d699d 9770 bnxt_dcb_free(bp);
ec5d31e3
MC
9771 rc = bnxt_fw_init_one(bp);
9772 if (rc) {
2924ad95 9773 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
ec5d31e3
MC
9774 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9775 return rc;
9776 }
9777 bnxt_clear_int_mode(bp);
9778 rc = bnxt_init_int_mode(bp);
9779 if (rc) {
2924ad95 9780 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
ec5d31e3
MC
9781 netdev_err(bp->dev, "init int mode failed\n");
9782 return rc;
9783 }
ec5d31e3
MC
9784 }
9785 if (BNXT_NEW_RM(bp)) {
9786 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9787
9788 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
15a7deb8
SB
9789 if (rc)
9790 netdev_err(bp->dev, "resc_qcaps failed\n");
9791
ec5d31e3
MC
9792 hw_resc->resv_cp_rings = 0;
9793 hw_resc->resv_stat_ctxs = 0;
9794 hw_resc->resv_irqs = 0;
9795 hw_resc->resv_tx_rings = 0;
9796 hw_resc->resv_rx_rings = 0;
9797 hw_resc->resv_hw_ring_grps = 0;
9798 hw_resc->resv_vnics = 0;
9799 if (!fw_reset) {
9800 bp->tx_nr_rings = 0;
9801 bp->rx_nr_rings = 0;
9802 }
9803 }
25e1acd6 9804 }
15a7deb8 9805 return rc;
25e1acd6
MC
9806}
9807
5ad2cbee
MC
9808static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
9809{
9810 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
9811 struct hwrm_port_led_qcaps_input req = {0};
9812 struct bnxt_pf_info *pf = &bp->pf;
9813 int rc;
9814
ba642ab7 9815 bp->num_leds = 0;
5ad2cbee
MC
9816 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
9817 return 0;
9818
9819 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
9820 req.port_id = cpu_to_le16(pf->port_id);
9821 mutex_lock(&bp->hwrm_cmd_lock);
9822 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9823 if (rc) {
9824 mutex_unlock(&bp->hwrm_cmd_lock);
9825 return rc;
9826 }
9827 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
9828 int i;
9829
9830 bp->num_leds = resp->num_leds;
9831 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
9832 bp->num_leds);
9833 for (i = 0; i < bp->num_leds; i++) {
9834 struct bnxt_led_info *led = &bp->leds[i];
9835 __le16 caps = led->led_state_caps;
9836
9837 if (!led->led_group_id ||
9838 !BNXT_LED_ALT_BLINK_CAP(caps)) {
9839 bp->num_leds = 0;
9840 break;
9841 }
9842 }
9843 }
9844 mutex_unlock(&bp->hwrm_cmd_lock);
9845 return 0;
9846}
9847
5282db6c
MC
9848int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
9849{
9850 struct hwrm_wol_filter_alloc_input req = {0};
9851 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
9852 int rc;
9853
9854 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
9855 req.port_id = cpu_to_le16(bp->pf.port_id);
9856 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
9857 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
9858 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
9859 mutex_lock(&bp->hwrm_cmd_lock);
9860 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9861 if (!rc)
9862 bp->wol_filter_id = resp->wol_filter_id;
9863 mutex_unlock(&bp->hwrm_cmd_lock);
9864 return rc;
9865}
9866
9867int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
9868{
9869 struct hwrm_wol_filter_free_input req = {0};
5282db6c
MC
9870
9871 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
9872 req.port_id = cpu_to_le16(bp->pf.port_id);
9873 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
9874 req.wol_filter_id = bp->wol_filter_id;
9f90445c 9875 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5282db6c
MC
9876}
9877
c1ef146a
MC
9878static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
9879{
9880 struct hwrm_wol_filter_qcfg_input req = {0};
9881 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9882 u16 next_handle = 0;
9883 int rc;
9884
9885 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
9886 req.port_id = cpu_to_le16(bp->pf.port_id);
9887 req.handle = cpu_to_le16(handle);
9888 mutex_lock(&bp->hwrm_cmd_lock);
9889 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9890 if (!rc) {
9891 next_handle = le16_to_cpu(resp->next_handle);
9892 if (next_handle != 0) {
9893 if (resp->wol_type ==
9894 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
9895 bp->wol = 1;
9896 bp->wol_filter_id = resp->wol_filter_id;
9897 }
9898 }
9899 }
9900 mutex_unlock(&bp->hwrm_cmd_lock);
9901 return next_handle;
9902}
9903
9904static void bnxt_get_wol_settings(struct bnxt *bp)
9905{
9906 u16 handle = 0;
9907
ba642ab7 9908 bp->wol = 0;
c1ef146a
MC
9909 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
9910 return;
9911
9912 do {
9913 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
9914 } while (handle && handle != 0xffff);
9915}
9916
cde49a42
VV
9917#ifdef CONFIG_BNXT_HWMON
9918static ssize_t bnxt_show_temp(struct device *dev,
9919 struct device_attribute *devattr, char *buf)
9920{
9921 struct hwrm_temp_monitor_query_input req = {0};
9922 struct hwrm_temp_monitor_query_output *resp;
9923 struct bnxt *bp = dev_get_drvdata(dev);
12cce90b 9924 u32 len = 0;
d69753fa 9925 int rc;
cde49a42
VV
9926
9927 resp = bp->hwrm_cmd_resp_addr;
9928 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
9929 mutex_lock(&bp->hwrm_cmd_lock);
d69753fa
EP
9930 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9931 if (!rc)
12cce90b 9932 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
cde49a42 9933 mutex_unlock(&bp->hwrm_cmd_lock);
27537929
DC
9934 if (rc)
9935 return rc;
9936 return len;
cde49a42
VV
9937}
9938static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
9939
9940static struct attribute *bnxt_attrs[] = {
9941 &sensor_dev_attr_temp1_input.dev_attr.attr,
9942 NULL
9943};
9944ATTRIBUTE_GROUPS(bnxt);
9945
9946static void bnxt_hwmon_close(struct bnxt *bp)
9947{
9948 if (bp->hwmon_dev) {
9949 hwmon_device_unregister(bp->hwmon_dev);
9950 bp->hwmon_dev = NULL;
9951 }
9952}
9953
9954static void bnxt_hwmon_open(struct bnxt *bp)
9955{
d69753fa 9956 struct hwrm_temp_monitor_query_input req = {0};
cde49a42 9957 struct pci_dev *pdev = bp->pdev;
d69753fa
EP
9958 int rc;
9959
9960 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
9961 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9962 if (rc == -EACCES || rc == -EOPNOTSUPP) {
9963 bnxt_hwmon_close(bp);
9964 return;
9965 }
cde49a42 9966
ba642ab7
MC
9967 if (bp->hwmon_dev)
9968 return;
9969
cde49a42
VV
9970 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
9971 DRV_MODULE_NAME, bp,
9972 bnxt_groups);
9973 if (IS_ERR(bp->hwmon_dev)) {
9974 bp->hwmon_dev = NULL;
9975 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
9976 }
9977}
9978#else
9979static void bnxt_hwmon_close(struct bnxt *bp)
9980{
9981}
9982
9983static void bnxt_hwmon_open(struct bnxt *bp)
9984{
9985}
9986#endif
9987
939f7f0c
MC
9988static bool bnxt_eee_config_ok(struct bnxt *bp)
9989{
9990 struct ethtool_eee *eee = &bp->eee;
9991 struct bnxt_link_info *link_info = &bp->link_info;
9992
b0d28207 9993 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
939f7f0c
MC
9994 return true;
9995
9996 if (eee->eee_enabled) {
9997 u32 advertising =
9998 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
9999
10000 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10001 eee->eee_enabled = 0;
10002 return false;
10003 }
10004 if (eee->advertised & ~advertising) {
10005 eee->advertised = advertising & eee->supported;
10006 return false;
10007 }
10008 }
10009 return true;
10010}
10011
c0c050c5
MC
10012static int bnxt_update_phy_setting(struct bnxt *bp)
10013{
10014 int rc;
10015 bool update_link = false;
10016 bool update_pause = false;
939f7f0c 10017 bool update_eee = false;
c0c050c5
MC
10018 struct bnxt_link_info *link_info = &bp->link_info;
10019
10020 rc = bnxt_update_link(bp, true);
10021 if (rc) {
10022 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
10023 rc);
10024 return rc;
10025 }
33dac24a
MC
10026 if (!BNXT_SINGLE_PF(bp))
10027 return 0;
10028
c0c050c5 10029 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
c9ee9516
MC
10030 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
10031 link_info->req_flow_ctrl)
c0c050c5
MC
10032 update_pause = true;
10033 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10034 link_info->force_pause_setting != link_info->req_flow_ctrl)
10035 update_pause = true;
c0c050c5
MC
10036 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10037 if (BNXT_AUTO_MODE(link_info->auto_mode))
10038 update_link = true;
d058426e
EP
10039 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
10040 link_info->req_link_speed != link_info->force_link_speed)
10041 update_link = true;
10042 else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
10043 link_info->req_link_speed != link_info->force_pam4_link_speed)
c0c050c5 10044 update_link = true;
de73018f
MC
10045 if (link_info->req_duplex != link_info->duplex_setting)
10046 update_link = true;
c0c050c5
MC
10047 } else {
10048 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
10049 update_link = true;
d058426e
EP
10050 if (link_info->advertising != link_info->auto_link_speeds ||
10051 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
c0c050c5 10052 update_link = true;
c0c050c5
MC
10053 }
10054
16d663a6
MC
10055 /* The last close may have shutdown the link, so need to call
10056 * PHY_CFG to bring it back up.
10057 */
83d8f5e9 10058 if (!bp->link_info.link_up)
16d663a6
MC
10059 update_link = true;
10060
939f7f0c
MC
10061 if (!bnxt_eee_config_ok(bp))
10062 update_eee = true;
10063
c0c050c5 10064 if (update_link)
939f7f0c 10065 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
c0c050c5
MC
10066 else if (update_pause)
10067 rc = bnxt_hwrm_set_pause(bp);
10068 if (rc) {
10069 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
10070 rc);
10071 return rc;
10072 }
10073
10074 return rc;
10075}
10076
11809490
JH
10077/* Common routine to pre-map certain register block to different GRC window.
10078 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
10079 * in PF and 3 windows in VF that can be customized to map in different
10080 * register blocks.
10081 */
10082static void bnxt_preset_reg_win(struct bnxt *bp)
10083{
10084 if (BNXT_PF(bp)) {
10085 /* CAG registers map to GRC window #4 */
10086 writel(BNXT_CAG_REG_BASE,
10087 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
10088 }
10089}
10090
47558acd
MC
10091static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
10092
6882c36c
EP
10093static int bnxt_reinit_after_abort(struct bnxt *bp)
10094{
10095 int rc;
10096
10097 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10098 return -EBUSY;
10099
d20cd745
VV
10100 if (bp->dev->reg_state == NETREG_UNREGISTERED)
10101 return -ENODEV;
10102
6882c36c
EP
10103 rc = bnxt_fw_init_one(bp);
10104 if (!rc) {
10105 bnxt_clear_int_mode(bp);
10106 rc = bnxt_init_int_mode(bp);
10107 if (!rc) {
10108 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10109 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10110 }
10111 }
10112 return rc;
10113}
10114
c0c050c5
MC
10115static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10116{
10117 int rc = 0;
10118
11809490 10119 bnxt_preset_reg_win(bp);
c0c050c5
MC
10120 netif_carrier_off(bp->dev);
10121 if (irq_re_init) {
47558acd
MC
10122 /* Reserve rings now if none were reserved at driver probe. */
10123 rc = bnxt_init_dflt_ring_mode(bp);
10124 if (rc) {
10125 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
10126 return rc;
10127 }
c0c050c5 10128 }
1b3f0b75 10129 rc = bnxt_reserve_rings(bp, irq_re_init);
41e8d798
MC
10130 if (rc)
10131 return rc;
c0c050c5
MC
10132 if ((bp->flags & BNXT_FLAG_RFS) &&
10133 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
10134 /* disable RFS if falling back to INTA */
10135 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
10136 bp->flags &= ~BNXT_FLAG_RFS;
10137 }
10138
10139 rc = bnxt_alloc_mem(bp, irq_re_init);
10140 if (rc) {
10141 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10142 goto open_err_free_mem;
10143 }
10144
10145 if (irq_re_init) {
10146 bnxt_init_napi(bp);
10147 rc = bnxt_request_irq(bp);
10148 if (rc) {
10149 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
c58387ab 10150 goto open_err_irq;
c0c050c5
MC
10151 }
10152 }
10153
c0c050c5
MC
10154 rc = bnxt_init_nic(bp, irq_re_init);
10155 if (rc) {
10156 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
96ecdcc9 10157 goto open_err_irq;
c0c050c5
MC
10158 }
10159
96ecdcc9
JK
10160 bnxt_enable_napi(bp);
10161 bnxt_debug_dev_init(bp);
10162
c0c050c5 10163 if (link_re_init) {
e2dc9b6e 10164 mutex_lock(&bp->link_lock);
c0c050c5 10165 rc = bnxt_update_phy_setting(bp);
e2dc9b6e 10166 mutex_unlock(&bp->link_lock);
a1ef4a79 10167 if (rc) {
ba41d46f 10168 netdev_warn(bp->dev, "failed to update phy settings\n");
a1ef4a79
MC
10169 if (BNXT_SINGLE_PF(bp)) {
10170 bp->link_info.phy_retry = true;
10171 bp->link_info.phy_retry_expires =
10172 jiffies + 5 * HZ;
10173 }
10174 }
c0c050c5
MC
10175 }
10176
7cdd5fc3 10177 if (irq_re_init)
442a35a5 10178 udp_tunnel_nic_reset_ntf(bp->dev);
c0c050c5 10179
caefe526 10180 set_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
10181 bnxt_enable_int(bp);
10182 /* Enable TX queues */
10183 bnxt_tx_enable(bp);
10184 mod_timer(&bp->timer, jiffies + bp->current_interval);
10289bec
MC
10185 /* Poll link status and check for SFP+ module status */
10186 bnxt_get_port_module_status(bp);
c0c050c5 10187
ee5c7fb3
SP
10188 /* VF-reps may need to be re-opened after the PF is re-opened */
10189 if (BNXT_PF(bp))
10190 bnxt_vf_reps_open(bp);
c0c050c5
MC
10191 return 0;
10192
c58387ab 10193open_err_irq:
c0c050c5
MC
10194 bnxt_del_napi(bp);
10195
10196open_err_free_mem:
10197 bnxt_free_skbs(bp);
10198 bnxt_free_irq(bp);
10199 bnxt_free_mem(bp, true);
10200 return rc;
10201}
10202
10203/* rtnl_lock held */
10204int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10205{
10206 int rc = 0;
10207
a1301f08
MC
10208 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
10209 rc = -EIO;
10210 if (!rc)
10211 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
c0c050c5
MC
10212 if (rc) {
10213 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
10214 dev_close(bp->dev);
10215 }
10216 return rc;
10217}
10218
f7dc1ea6
MC
10219/* rtnl_lock held, open the NIC half way by allocating all resources, but
10220 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
10221 * self tests.
10222 */
10223int bnxt_half_open_nic(struct bnxt *bp)
10224{
10225 int rc = 0;
10226
11a39259
SK
10227 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10228 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
10229 rc = -ENODEV;
10230 goto half_open_err;
10231 }
10232
f7dc1ea6
MC
10233 rc = bnxt_alloc_mem(bp, false);
10234 if (rc) {
10235 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10236 goto half_open_err;
10237 }
10238 rc = bnxt_init_nic(bp, false);
10239 if (rc) {
10240 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10241 goto half_open_err;
10242 }
10243 return 0;
10244
10245half_open_err:
10246 bnxt_free_skbs(bp);
10247 bnxt_free_mem(bp, false);
10248 dev_close(bp->dev);
10249 return rc;
10250}
10251
10252/* rtnl_lock held, this call can only be made after a previous successful
10253 * call to bnxt_half_open_nic().
10254 */
10255void bnxt_half_close_nic(struct bnxt *bp)
10256{
10257 bnxt_hwrm_resource_free(bp, false, false);
10258 bnxt_free_skbs(bp);
10259 bnxt_free_mem(bp, false);
10260}
10261
c16d4ee0
MC
10262static void bnxt_reenable_sriov(struct bnxt *bp)
10263{
10264 if (BNXT_PF(bp)) {
10265 struct bnxt_pf_info *pf = &bp->pf;
10266 int n = pf->active_vfs;
10267
10268 if (n)
10269 bnxt_cfg_hw_sriov(bp, &n, true);
10270 }
10271}
10272
c0c050c5
MC
10273static int bnxt_open(struct net_device *dev)
10274{
10275 struct bnxt *bp = netdev_priv(dev);
25e1acd6 10276 int rc;
c0c050c5 10277
ec5d31e3 10278 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
6882c36c
EP
10279 rc = bnxt_reinit_after_abort(bp);
10280 if (rc) {
10281 if (rc == -EBUSY)
10282 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
10283 else
10284 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
10285 return -ENODEV;
10286 }
ec5d31e3
MC
10287 }
10288
10289 rc = bnxt_hwrm_if_change(bp, true);
25e1acd6 10290 if (rc)
ec5d31e3 10291 return rc;
d7859afb
MC
10292
10293 if (bnxt_ptp_init(bp)) {
10294 netdev_warn(dev, "PTP initialization failed.\n");
10295 kfree(bp->ptp_cfg);
10296 bp->ptp_cfg = NULL;
10297 }
ec5d31e3
MC
10298 rc = __bnxt_open_nic(bp, true, true);
10299 if (rc) {
25e1acd6 10300 bnxt_hwrm_if_change(bp, false);
d7859afb 10301 bnxt_ptp_clear(bp);
ec5d31e3 10302 } else {
f3a6d206 10303 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
12de2ead 10304 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
f3a6d206 10305 bnxt_ulp_start(bp, 0);
12de2ead
MC
10306 bnxt_reenable_sriov(bp);
10307 }
ec5d31e3
MC
10308 }
10309 bnxt_hwmon_open(bp);
10310 }
cde49a42 10311
25e1acd6 10312 return rc;
c0c050c5
MC
10313}
10314
f9b76ebd
MC
10315static bool bnxt_drv_busy(struct bnxt *bp)
10316{
10317 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
10318 test_bit(BNXT_STATE_READ_STATS, &bp->state));
10319}
10320
b8875ca3
MC
10321static void bnxt_get_ring_stats(struct bnxt *bp,
10322 struct rtnl_link_stats64 *stats);
10323
86e953db
MC
10324static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
10325 bool link_re_init)
c0c050c5 10326{
ee5c7fb3
SP
10327 /* Close the VF-reps before closing PF */
10328 if (BNXT_PF(bp))
10329 bnxt_vf_reps_close(bp);
86e953db 10330
c0c050c5
MC
10331 /* Change device state to avoid TX queue wake up's */
10332 bnxt_tx_disable(bp);
10333
caefe526 10334 clear_bit(BNXT_STATE_OPEN, &bp->state);
4cebdcec 10335 smp_mb__after_atomic();
f9b76ebd 10336 while (bnxt_drv_busy(bp))
4cebdcec 10337 msleep(20);
c0c050c5 10338
9d8bc097 10339 /* Flush rings and and disable interrupts */
c0c050c5
MC
10340 bnxt_shutdown_nic(bp, irq_re_init);
10341
10342 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
10343
cabfb09d 10344 bnxt_debug_dev_exit(bp);
c0c050c5 10345 bnxt_disable_napi(bp);
c0c050c5
MC
10346 del_timer_sync(&bp->timer);
10347 bnxt_free_skbs(bp);
10348
b8875ca3 10349 /* Save ring stats before shutdown */
b8056e84 10350 if (bp->bnapi && irq_re_init)
b8875ca3 10351 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
c0c050c5
MC
10352 if (irq_re_init) {
10353 bnxt_free_irq(bp);
10354 bnxt_del_napi(bp);
10355 }
10356 bnxt_free_mem(bp, irq_re_init);
86e953db
MC
10357}
10358
10359int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10360{
10361 int rc = 0;
10362
3bc7d4a3
MC
10363 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10364 /* If we get here, it means firmware reset is in progress
10365 * while we are trying to close. We can safely proceed with
10366 * the close because we are holding rtnl_lock(). Some firmware
10367 * messages may fail as we proceed to close. We set the
10368 * ABORT_ERR flag here so that the FW reset thread will later
10369 * abort when it gets the rtnl_lock() and sees the flag.
10370 */
10371 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10372 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10373 }
10374
86e953db
MC
10375#ifdef CONFIG_BNXT_SRIOV
10376 if (bp->sriov_cfg) {
10377 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10378 !bp->sriov_cfg,
10379 BNXT_SRIOV_CFG_WAIT_TMO);
10380 if (rc)
10381 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
10382 }
10383#endif
10384 __bnxt_close_nic(bp, irq_re_init, link_re_init);
c0c050c5
MC
10385 return rc;
10386}
10387
10388static int bnxt_close(struct net_device *dev)
10389{
10390 struct bnxt *bp = netdev_priv(dev);
10391
d7859afb 10392 bnxt_ptp_clear(bp);
cde49a42 10393 bnxt_hwmon_close(bp);
c0c050c5 10394 bnxt_close_nic(bp, true, true);
33f7d55f 10395 bnxt_hwrm_shutdown_link(bp);
25e1acd6 10396 bnxt_hwrm_if_change(bp, false);
c0c050c5
MC
10397 return 0;
10398}
10399
0ca12be9
VV
10400static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10401 u16 *val)
10402{
10403 struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
10404 struct hwrm_port_phy_mdio_read_input req = {0};
10405 int rc;
10406
10407 if (bp->hwrm_spec_code < 0x10a00)
10408 return -EOPNOTSUPP;
10409
10410 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
10411 req.port_id = cpu_to_le16(bp->pf.port_id);
10412 req.phy_addr = phy_addr;
10413 req.reg_addr = cpu_to_le16(reg & 0x1f);
2730214d 10414 if (mdio_phy_id_is_c45(phy_addr)) {
0ca12be9
VV
10415 req.cl45_mdio = 1;
10416 req.phy_addr = mdio_phy_id_prtad(phy_addr);
10417 req.dev_addr = mdio_phy_id_devad(phy_addr);
10418 req.reg_addr = cpu_to_le16(reg);
10419 }
10420
10421 mutex_lock(&bp->hwrm_cmd_lock);
10422 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10423 if (!rc)
10424 *val = le16_to_cpu(resp->reg_data);
10425 mutex_unlock(&bp->hwrm_cmd_lock);
10426 return rc;
10427}
10428
10429static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10430 u16 val)
10431{
10432 struct hwrm_port_phy_mdio_write_input req = {0};
10433
10434 if (bp->hwrm_spec_code < 0x10a00)
10435 return -EOPNOTSUPP;
10436
10437 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
10438 req.port_id = cpu_to_le16(bp->pf.port_id);
10439 req.phy_addr = phy_addr;
10440 req.reg_addr = cpu_to_le16(reg & 0x1f);
2730214d 10441 if (mdio_phy_id_is_c45(phy_addr)) {
0ca12be9
VV
10442 req.cl45_mdio = 1;
10443 req.phy_addr = mdio_phy_id_prtad(phy_addr);
10444 req.dev_addr = mdio_phy_id_devad(phy_addr);
10445 req.reg_addr = cpu_to_le16(reg);
10446 }
10447 req.reg_data = cpu_to_le16(val);
10448
10449 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10450}
10451
c0c050c5
MC
10452/* rtnl_lock held */
10453static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10454{
0ca12be9
VV
10455 struct mii_ioctl_data *mdio = if_mii(ifr);
10456 struct bnxt *bp = netdev_priv(dev);
10457 int rc;
10458
c0c050c5
MC
10459 switch (cmd) {
10460 case SIOCGMIIPHY:
0ca12be9
VV
10461 mdio->phy_id = bp->link_info.phy_addr;
10462
df561f66 10463 fallthrough;
c0c050c5 10464 case SIOCGMIIREG: {
0ca12be9
VV
10465 u16 mii_regval = 0;
10466
c0c050c5
MC
10467 if (!netif_running(dev))
10468 return -EAGAIN;
10469
0ca12be9
VV
10470 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10471 &mii_regval);
10472 mdio->val_out = mii_regval;
10473 return rc;
c0c050c5
MC
10474 }
10475
10476 case SIOCSMIIREG:
10477 if (!netif_running(dev))
10478 return -EAGAIN;
10479
0ca12be9
VV
10480 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10481 mdio->val_in);
c0c050c5 10482
118612d5
MC
10483 case SIOCSHWTSTAMP:
10484 return bnxt_hwtstamp_set(dev, ifr);
10485
10486 case SIOCGHWTSTAMP:
10487 return bnxt_hwtstamp_get(dev, ifr);
10488
c0c050c5
MC
10489 default:
10490 /* do nothing */
10491 break;
10492 }
10493 return -EOPNOTSUPP;
10494}
10495
b8875ca3
MC
10496static void bnxt_get_ring_stats(struct bnxt *bp,
10497 struct rtnl_link_stats64 *stats)
c0c050c5 10498{
b8875ca3 10499 int i;
c0c050c5 10500
c0c050c5
MC
10501 for (i = 0; i < bp->cp_nr_rings; i++) {
10502 struct bnxt_napi *bnapi = bp->bnapi[i];
10503 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
a0c30621 10504 u64 *sw = cpr->stats.sw_stats;
c0c050c5 10505
a0c30621
MC
10506 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
10507 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10508 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
c0c050c5 10509
a0c30621
MC
10510 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
10511 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
10512 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
c0c050c5 10513
a0c30621
MC
10514 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
10515 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
10516 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
c0c050c5 10517
a0c30621
MC
10518 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
10519 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
10520 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
c0c050c5
MC
10521
10522 stats->rx_missed_errors +=
a0c30621 10523 BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
c0c050c5 10524
a0c30621 10525 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
c0c050c5 10526
a0c30621 10527 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
c0c050c5 10528 }
b8875ca3
MC
10529}
10530
10531static void bnxt_add_prev_stats(struct bnxt *bp,
10532 struct rtnl_link_stats64 *stats)
10533{
10534 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10535
10536 stats->rx_packets += prev_stats->rx_packets;
10537 stats->tx_packets += prev_stats->tx_packets;
10538 stats->rx_bytes += prev_stats->rx_bytes;
10539 stats->tx_bytes += prev_stats->tx_bytes;
10540 stats->rx_missed_errors += prev_stats->rx_missed_errors;
10541 stats->multicast += prev_stats->multicast;
10542 stats->tx_dropped += prev_stats->tx_dropped;
10543}
10544
10545static void
10546bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
10547{
10548 struct bnxt *bp = netdev_priv(dev);
10549
10550 set_bit(BNXT_STATE_READ_STATS, &bp->state);
10551 /* Make sure bnxt_close_nic() sees that we are reading stats before
10552 * we check the BNXT_STATE_OPEN flag.
10553 */
10554 smp_mb__after_atomic();
10555 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10556 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10557 *stats = bp->net_stats_prev;
10558 return;
10559 }
10560
10561 bnxt_get_ring_stats(bp, stats);
10562 bnxt_add_prev_stats(bp, stats);
c0c050c5 10563
9947f83f 10564 if (bp->flags & BNXT_FLAG_PORT_STATS) {
a0c30621
MC
10565 u64 *rx = bp->port_stats.sw_stats;
10566 u64 *tx = bp->port_stats.sw_stats +
10567 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10568
10569 stats->rx_crc_errors =
10570 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
10571 stats->rx_frame_errors =
10572 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
10573 stats->rx_length_errors =
10574 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
10575 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
10576 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
10577 stats->rx_errors =
10578 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
10579 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
10580 stats->collisions =
10581 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
10582 stats->tx_fifo_errors =
10583 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
10584 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
9947f83f 10585 }
f9b76ebd 10586 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
c0c050c5
MC
10587}
10588
10589static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10590{
10591 struct net_device *dev = bp->dev;
10592 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10593 struct netdev_hw_addr *ha;
10594 u8 *haddr;
10595 int mc_count = 0;
10596 bool update = false;
10597 int off = 0;
10598
10599 netdev_for_each_mc_addr(ha, dev) {
10600 if (mc_count >= BNXT_MAX_MC_ADDRS) {
10601 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10602 vnic->mc_list_count = 0;
10603 return false;
10604 }
10605 haddr = ha->addr;
10606 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
10607 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
10608 update = true;
10609 }
10610 off += ETH_ALEN;
10611 mc_count++;
10612 }
10613 if (mc_count)
10614 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
10615
10616 if (mc_count != vnic->mc_list_count) {
10617 vnic->mc_list_count = mc_count;
10618 update = true;
10619 }
10620 return update;
10621}
10622
10623static bool bnxt_uc_list_updated(struct bnxt *bp)
10624{
10625 struct net_device *dev = bp->dev;
10626 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10627 struct netdev_hw_addr *ha;
10628 int off = 0;
10629
10630 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
10631 return true;
10632
10633 netdev_for_each_uc_addr(ha, dev) {
10634 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
10635 return true;
10636
10637 off += ETH_ALEN;
10638 }
10639 return false;
10640}
10641
10642static void bnxt_set_rx_mode(struct net_device *dev)
10643{
10644 struct bnxt *bp = netdev_priv(dev);
268d0895 10645 struct bnxt_vnic_info *vnic;
c0c050c5
MC
10646 bool mc_update = false;
10647 bool uc_update;
268d0895 10648 u32 mask;
c0c050c5 10649
268d0895 10650 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
c0c050c5
MC
10651 return;
10652
268d0895
MC
10653 vnic = &bp->vnic_info[0];
10654 mask = vnic->rx_mask;
c0c050c5
MC
10655 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
10656 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
30e33848
MC
10657 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
10658 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
c0c050c5 10659
dd85fc0a 10660 if (dev->flags & IFF_PROMISC)
c0c050c5
MC
10661 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10662
10663 uc_update = bnxt_uc_list_updated(bp);
10664
30e33848
MC
10665 if (dev->flags & IFF_BROADCAST)
10666 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
c0c050c5
MC
10667 if (dev->flags & IFF_ALLMULTI) {
10668 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10669 vnic->mc_list_count = 0;
10670 } else {
10671 mc_update = bnxt_mc_list_updated(bp, &mask);
10672 }
10673
10674 if (mask != vnic->rx_mask || uc_update || mc_update) {
10675 vnic->rx_mask = mask;
10676
10677 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
c213eae8 10678 bnxt_queue_sp_work(bp);
c0c050c5
MC
10679 }
10680}
10681
b664f008 10682static int bnxt_cfg_rx_mode(struct bnxt *bp)
c0c050c5
MC
10683{
10684 struct net_device *dev = bp->dev;
10685 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10686 struct netdev_hw_addr *ha;
10687 int i, off = 0, rc;
10688 bool uc_update;
10689
10690 netif_addr_lock_bh(dev);
10691 uc_update = bnxt_uc_list_updated(bp);
10692 netif_addr_unlock_bh(dev);
10693
10694 if (!uc_update)
10695 goto skip_uc;
10696
10697 mutex_lock(&bp->hwrm_cmd_lock);
10698 for (i = 1; i < vnic->uc_filter_count; i++) {
10699 struct hwrm_cfa_l2_filter_free_input req = {0};
10700
10701 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
10702 -1);
10703
10704 req.l2_filter_id = vnic->fw_l2_filter_id[i];
10705
10706 rc = _hwrm_send_message(bp, &req, sizeof(req),
10707 HWRM_CMD_TIMEOUT);
10708 }
10709 mutex_unlock(&bp->hwrm_cmd_lock);
10710
10711 vnic->uc_filter_count = 1;
10712
10713 netif_addr_lock_bh(dev);
10714 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
10715 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10716 } else {
10717 netdev_for_each_uc_addr(ha, dev) {
10718 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
10719 off += ETH_ALEN;
10720 vnic->uc_filter_count++;
10721 }
10722 }
10723 netif_addr_unlock_bh(dev);
10724
10725 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
10726 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
10727 if (rc) {
10728 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
10729 rc);
10730 vnic->uc_filter_count = i;
b664f008 10731 return rc;
c0c050c5
MC
10732 }
10733 }
10734
10735skip_uc:
dd85fc0a
EP
10736 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
10737 !bnxt_promisc_ok(bp))
10738 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
c0c050c5 10739 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
b4e30e8e
MC
10740 if (rc && vnic->mc_list_count) {
10741 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
10742 rc);
10743 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10744 vnic->mc_list_count = 0;
10745 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10746 }
c0c050c5 10747 if (rc)
b4e30e8e 10748 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
c0c050c5 10749 rc);
b664f008
MC
10750
10751 return rc;
c0c050c5
MC
10752}
10753
2773dfb2
MC
10754static bool bnxt_can_reserve_rings(struct bnxt *bp)
10755{
10756#ifdef CONFIG_BNXT_SRIOV
f1ca94de 10757 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
2773dfb2
MC
10758 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10759
10760 /* No minimum rings were provisioned by the PF. Don't
10761 * reserve rings by default when device is down.
10762 */
10763 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
10764 return true;
10765
10766 if (!netif_running(bp->dev))
10767 return false;
10768 }
10769#endif
10770 return true;
10771}
10772
8079e8f1
MC
10773/* If the chip and firmware supports RFS */
10774static bool bnxt_rfs_supported(struct bnxt *bp)
10775{
e969ae5b 10776 if (bp->flags & BNXT_FLAG_CHIP_P5) {
41136ab3 10777 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
e969ae5b 10778 return true;
41e8d798 10779 return false;
e969ae5b 10780 }
8079e8f1
MC
10781 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
10782 return true;
ae10ae74
MC
10783 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10784 return true;
8079e8f1
MC
10785 return false;
10786}
10787
10788/* If runtime conditions support RFS */
2bcfa6f6
MC
10789static bool bnxt_rfs_capable(struct bnxt *bp)
10790{
10791#ifdef CONFIG_RFS_ACCEL
8079e8f1 10792 int vnics, max_vnics, max_rss_ctxs;
2bcfa6f6 10793
41e8d798 10794 if (bp->flags & BNXT_FLAG_CHIP_P5)
ac33906c 10795 return bnxt_rfs_supported(bp);
2773dfb2 10796 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
2bcfa6f6
MC
10797 return false;
10798
10799 vnics = 1 + bp->rx_nr_rings;
8079e8f1
MC
10800 max_vnics = bnxt_get_max_func_vnics(bp);
10801 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
ae10ae74
MC
10802
10803 /* RSS contexts not a limiting factor */
10804 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10805 max_rss_ctxs = max_vnics;
8079e8f1 10806 if (vnics > max_vnics || vnics > max_rss_ctxs) {
6a1eef5b
MC
10807 if (bp->rx_nr_rings > 1)
10808 netdev_warn(bp->dev,
10809 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
10810 min(max_rss_ctxs - 1, max_vnics - 1));
2bcfa6f6 10811 return false;
a2304909 10812 }
2bcfa6f6 10813
f1ca94de 10814 if (!BNXT_NEW_RM(bp))
6a1eef5b
MC
10815 return true;
10816
10817 if (vnics == bp->hw_resc.resv_vnics)
10818 return true;
10819
780baad4 10820 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
6a1eef5b
MC
10821 if (vnics <= bp->hw_resc.resv_vnics)
10822 return true;
10823
10824 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
780baad4 10825 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
6a1eef5b 10826 return false;
2bcfa6f6
MC
10827#else
10828 return false;
10829#endif
10830}
10831
c0c050c5
MC
10832static netdev_features_t bnxt_fix_features(struct net_device *dev,
10833 netdev_features_t features)
10834{
2bcfa6f6 10835 struct bnxt *bp = netdev_priv(dev);
c72cb303 10836 netdev_features_t vlan_features;
2bcfa6f6 10837
a2304909 10838 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
2bcfa6f6 10839 features &= ~NETIF_F_NTUPLE;
5a9f6b23 10840
1054aee8
MC
10841 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10842 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10843
10844 if (!(features & NETIF_F_GRO))
10845 features &= ~NETIF_F_GRO_HW;
10846
10847 if (features & NETIF_F_GRO_HW)
10848 features &= ~NETIF_F_LRO;
10849
5a9f6b23
MC
10850 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
10851 * turned on or off together.
10852 */
a196e96b
EP
10853 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
10854 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
10855 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10856 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
c72cb303 10857 else if (vlan_features)
a196e96b 10858 features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
5a9f6b23 10859 }
cf6645f8 10860#ifdef CONFIG_BNXT_SRIOV
a196e96b
EP
10861 if (BNXT_VF(bp) && bp->vf.vlan)
10862 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
cf6645f8 10863#endif
c0c050c5
MC
10864 return features;
10865}
10866
10867static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
10868{
10869 struct bnxt *bp = netdev_priv(dev);
10870 u32 flags = bp->flags;
10871 u32 changes;
10872 int rc = 0;
10873 bool re_init = false;
10874 bool update_tpa = false;
10875
10876 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
1054aee8 10877 if (features & NETIF_F_GRO_HW)
c0c050c5 10878 flags |= BNXT_FLAG_GRO;
1054aee8 10879 else if (features & NETIF_F_LRO)
c0c050c5
MC
10880 flags |= BNXT_FLAG_LRO;
10881
bdbd1eb5
MC
10882 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10883 flags &= ~BNXT_FLAG_TPA;
10884
a196e96b 10885 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
c0c050c5
MC
10886 flags |= BNXT_FLAG_STRIP_VLAN;
10887
10888 if (features & NETIF_F_NTUPLE)
10889 flags |= BNXT_FLAG_RFS;
10890
10891 changes = flags ^ bp->flags;
10892 if (changes & BNXT_FLAG_TPA) {
10893 update_tpa = true;
10894 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
f45b7b78
MC
10895 (flags & BNXT_FLAG_TPA) == 0 ||
10896 (bp->flags & BNXT_FLAG_CHIP_P5))
c0c050c5
MC
10897 re_init = true;
10898 }
10899
10900 if (changes & ~BNXT_FLAG_TPA)
10901 re_init = true;
10902
10903 if (flags != bp->flags) {
10904 u32 old_flags = bp->flags;
10905
2bcfa6f6 10906 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
f45b7b78 10907 bp->flags = flags;
c0c050c5
MC
10908 if (update_tpa)
10909 bnxt_set_ring_params(bp);
10910 return rc;
10911 }
10912
10913 if (re_init) {
10914 bnxt_close_nic(bp, false, false);
f45b7b78 10915 bp->flags = flags;
c0c050c5
MC
10916 if (update_tpa)
10917 bnxt_set_ring_params(bp);
10918
10919 return bnxt_open_nic(bp, false, false);
10920 }
10921 if (update_tpa) {
f45b7b78 10922 bp->flags = flags;
c0c050c5
MC
10923 rc = bnxt_set_tpa(bp,
10924 (flags & BNXT_FLAG_TPA) ?
10925 true : false);
10926 if (rc)
10927 bp->flags = old_flags;
10928 }
10929 }
10930 return rc;
10931}
10932
aa473d6c
MC
10933static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
10934 u8 **nextp)
10935{
10936 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
10937 int hdr_count = 0;
10938 u8 *nexthdr;
10939 int start;
10940
10941 /* Check that there are at most 2 IPv6 extension headers, no
10942 * fragment header, and each is <= 64 bytes.
10943 */
10944 start = nw_off + sizeof(*ip6h);
10945 nexthdr = &ip6h->nexthdr;
10946 while (ipv6_ext_hdr(*nexthdr)) {
10947 struct ipv6_opt_hdr *hp;
10948 int hdrlen;
10949
10950 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
10951 *nexthdr == NEXTHDR_FRAGMENT)
10952 return false;
10953 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
10954 skb_headlen(skb), NULL);
10955 if (!hp)
10956 return false;
10957 if (*nexthdr == NEXTHDR_AUTH)
10958 hdrlen = ipv6_authlen(hp);
10959 else
10960 hdrlen = ipv6_optlen(hp);
10961
10962 if (hdrlen > 64)
10963 return false;
10964 nexthdr = &hp->nexthdr;
10965 start += hdrlen;
10966 hdr_count++;
10967 }
10968 if (nextp) {
10969 /* Caller will check inner protocol */
10970 if (skb->encapsulation) {
10971 *nextp = nexthdr;
10972 return true;
10973 }
10974 *nextp = NULL;
10975 }
10976 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
10977 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
10978}
10979
10980/* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
10981static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
10982{
10983 struct udphdr *uh = udp_hdr(skb);
10984 __be16 udp_port = uh->dest;
10985
10986 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
10987 return false;
10988 if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
10989 struct ethhdr *eh = inner_eth_hdr(skb);
10990
10991 switch (eh->h_proto) {
10992 case htons(ETH_P_IP):
10993 return true;
10994 case htons(ETH_P_IPV6):
10995 return bnxt_exthdr_check(bp, skb,
10996 skb_inner_network_offset(skb),
10997 NULL);
10998 }
10999 }
11000 return false;
11001}
11002
11003static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
11004{
11005 switch (l4_proto) {
11006 case IPPROTO_UDP:
11007 return bnxt_udp_tunl_check(bp, skb);
11008 case IPPROTO_IPIP:
11009 return true;
11010 case IPPROTO_GRE: {
11011 switch (skb->inner_protocol) {
11012 default:
11013 return false;
11014 case htons(ETH_P_IP):
11015 return true;
11016 case htons(ETH_P_IPV6):
11017 fallthrough;
11018 }
11019 }
11020 case IPPROTO_IPV6:
11021 /* Check ext headers of inner ipv6 */
11022 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
11023 NULL);
11024 }
11025 return false;
11026}
11027
1698d600
MC
11028static netdev_features_t bnxt_features_check(struct sk_buff *skb,
11029 struct net_device *dev,
11030 netdev_features_t features)
11031{
aa473d6c
MC
11032 struct bnxt *bp = netdev_priv(dev);
11033 u8 *l4_proto;
1698d600
MC
11034
11035 features = vlan_features_check(skb, features);
1698d600
MC
11036 switch (vlan_get_protocol(skb)) {
11037 case htons(ETH_P_IP):
aa473d6c
MC
11038 if (!skb->encapsulation)
11039 return features;
11040 l4_proto = &ip_hdr(skb)->protocol;
11041 if (bnxt_tunl_check(bp, skb, *l4_proto))
11042 return features;
1698d600
MC
11043 break;
11044 case htons(ETH_P_IPV6):
aa473d6c
MC
11045 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
11046 &l4_proto))
11047 break;
11048 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
11049 return features;
1698d600 11050 break;
1698d600 11051 }
1698d600
MC
11052 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11053}
11054
b5d600b0
VV
11055int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
11056 u32 *reg_buf)
11057{
11058 struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr;
11059 struct hwrm_dbg_read_direct_input req = {0};
11060 __le32 *dbg_reg_buf;
11061 dma_addr_t mapping;
11062 int rc, i;
11063
11064 dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4,
11065 &mapping, GFP_KERNEL);
11066 if (!dbg_reg_buf)
11067 return -ENOMEM;
11068 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1);
11069 req.host_dest_addr = cpu_to_le64(mapping);
11070 req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
11071 req.read_len32 = cpu_to_le32(num_words);
11072 mutex_lock(&bp->hwrm_cmd_lock);
11073 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11074 if (rc || resp->error_code) {
11075 rc = -EIO;
11076 goto dbg_rd_reg_exit;
11077 }
11078 for (i = 0; i < num_words; i++)
11079 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
11080
11081dbg_rd_reg_exit:
11082 mutex_unlock(&bp->hwrm_cmd_lock);
11083 dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping);
11084 return rc;
11085}
11086
ffd77621
MC
11087static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
11088 u32 ring_id, u32 *prod, u32 *cons)
11089{
11090 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
11091 struct hwrm_dbg_ring_info_get_input req = {0};
11092 int rc;
11093
11094 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
11095 req.ring_type = ring_type;
11096 req.fw_ring_id = cpu_to_le32(ring_id);
11097 mutex_lock(&bp->hwrm_cmd_lock);
11098 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11099 if (!rc) {
11100 *prod = le32_to_cpu(resp->producer_index);
11101 *cons = le32_to_cpu(resp->consumer_index);
11102 }
11103 mutex_unlock(&bp->hwrm_cmd_lock);
11104 return rc;
11105}
11106
9f554590
MC
11107static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
11108{
b6ab4b01 11109 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
9f554590
MC
11110 int i = bnapi->index;
11111
3b2b7d9d
MC
11112 if (!txr)
11113 return;
11114
9f554590
MC
11115 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
11116 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
11117 txr->tx_cons);
11118}
11119
11120static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
11121{
b6ab4b01 11122 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
9f554590
MC
11123 int i = bnapi->index;
11124
3b2b7d9d
MC
11125 if (!rxr)
11126 return;
11127
9f554590
MC
11128 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
11129 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
11130 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
11131 rxr->rx_sw_agg_prod);
11132}
11133
11134static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
11135{
11136 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
11137 int i = bnapi->index;
11138
11139 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
11140 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
11141}
11142
c0c050c5
MC
11143static void bnxt_dbg_dump_states(struct bnxt *bp)
11144{
11145 int i;
11146 struct bnxt_napi *bnapi;
c0c050c5
MC
11147
11148 for (i = 0; i < bp->cp_nr_rings; i++) {
11149 bnapi = bp->bnapi[i];
c0c050c5 11150 if (netif_msg_drv(bp)) {
9f554590
MC
11151 bnxt_dump_tx_sw_state(bnapi);
11152 bnxt_dump_rx_sw_state(bnapi);
11153 bnxt_dump_cp_sw_state(bnapi);
c0c050c5
MC
11154 }
11155 }
11156}
11157
8fbf58e1
MC
11158static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
11159{
11160 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
11161 struct hwrm_ring_reset_input req = {0};
11162 struct bnxt_napi *bnapi = rxr->bnapi;
11163 struct bnxt_cp_ring_info *cpr;
11164 u16 cp_ring_id;
11165
11166 cpr = &bnapi->cp_ring;
11167 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
11168 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_RESET, cp_ring_id, -1);
11169 req.ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
11170 req.ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
11171 return hwrm_send_message_silent(bp, &req, sizeof(req),
11172 HWRM_CMD_TIMEOUT);
11173}
11174
6988bd92 11175static void bnxt_reset_task(struct bnxt *bp, bool silent)
c0c050c5 11176{
6988bd92
MC
11177 if (!silent)
11178 bnxt_dbg_dump_states(bp);
028de140 11179 if (netif_running(bp->dev)) {
b386cd36
MC
11180 int rc;
11181
aa46dfff
VV
11182 if (silent) {
11183 bnxt_close_nic(bp, false, false);
11184 bnxt_open_nic(bp, false, false);
11185 } else {
b386cd36 11186 bnxt_ulp_stop(bp);
aa46dfff
VV
11187 bnxt_close_nic(bp, true, false);
11188 rc = bnxt_open_nic(bp, true, false);
11189 bnxt_ulp_start(bp, rc);
11190 }
028de140 11191 }
c0c050c5
MC
11192}
11193
0290bd29 11194static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
c0c050c5
MC
11195{
11196 struct bnxt *bp = netdev_priv(dev);
11197
11198 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
11199 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
c213eae8 11200 bnxt_queue_sp_work(bp);
c0c050c5
MC
11201}
11202
acfb50e4
VV
11203static void bnxt_fw_health_check(struct bnxt *bp)
11204{
11205 struct bnxt_fw_health *fw_health = bp->fw_health;
11206 u32 val;
11207
0797c10d 11208 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
acfb50e4
VV
11209 return;
11210
11211 if (fw_health->tmr_counter) {
11212 fw_health->tmr_counter--;
11213 return;
11214 }
11215
11216 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11217 if (val == fw_health->last_fw_heartbeat)
11218 goto fw_reset;
11219
11220 fw_health->last_fw_heartbeat = val;
11221
11222 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11223 if (val != fw_health->last_fw_reset_cnt)
11224 goto fw_reset;
11225
11226 fw_health->tmr_counter = fw_health->tmr_multiplier;
11227 return;
11228
11229fw_reset:
11230 set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
11231 bnxt_queue_sp_work(bp);
11232}
11233
e99e88a9 11234static void bnxt_timer(struct timer_list *t)
c0c050c5 11235{
e99e88a9 11236 struct bnxt *bp = from_timer(bp, t, timer);
c0c050c5
MC
11237 struct net_device *dev = bp->dev;
11238
e0009404 11239 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
c0c050c5
MC
11240 return;
11241
11242 if (atomic_read(&bp->intr_sem) != 0)
11243 goto bnxt_restart_timer;
11244
acfb50e4
VV
11245 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
11246 bnxt_fw_health_check(bp);
11247
fea6b333 11248 if (bp->link_info.link_up && bp->stats_coal_ticks) {
3bdf56c4 11249 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
c213eae8 11250 bnxt_queue_sp_work(bp);
3bdf56c4 11251 }
5a84acbe
SP
11252
11253 if (bnxt_tc_flower_enabled(bp)) {
11254 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
11255 bnxt_queue_sp_work(bp);
11256 }
a1ef4a79 11257
87d67f59
PC
11258#ifdef CONFIG_RFS_ACCEL
11259 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
11260 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11261 bnxt_queue_sp_work(bp);
11262 }
11263#endif /*CONFIG_RFS_ACCEL*/
11264
a1ef4a79
MC
11265 if (bp->link_info.phy_retry) {
11266 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
acda6180 11267 bp->link_info.phy_retry = false;
a1ef4a79
MC
11268 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
11269 } else {
11270 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
11271 bnxt_queue_sp_work(bp);
11272 }
11273 }
ffd77621 11274
5313845f
MC
11275 if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
11276 netif_carrier_ok(dev)) {
ffd77621
MC
11277 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
11278 bnxt_queue_sp_work(bp);
11279 }
c0c050c5
MC
11280bnxt_restart_timer:
11281 mod_timer(&bp->timer, jiffies + bp->current_interval);
11282}
11283
a551ee94 11284static void bnxt_rtnl_lock_sp(struct bnxt *bp)
6988bd92 11285{
a551ee94
MC
11286 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
11287 * set. If the device is being closed, bnxt_close() may be holding
6988bd92
MC
11288 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
11289 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
11290 */
11291 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11292 rtnl_lock();
a551ee94
MC
11293}
11294
11295static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
11296{
6988bd92
MC
11297 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11298 rtnl_unlock();
11299}
11300
a551ee94
MC
11301/* Only called from bnxt_sp_task() */
11302static void bnxt_reset(struct bnxt *bp, bool silent)
11303{
11304 bnxt_rtnl_lock_sp(bp);
11305 if (test_bit(BNXT_STATE_OPEN, &bp->state))
11306 bnxt_reset_task(bp, silent);
11307 bnxt_rtnl_unlock_sp(bp);
11308}
11309
8fbf58e1
MC
11310/* Only called from bnxt_sp_task() */
11311static void bnxt_rx_ring_reset(struct bnxt *bp)
11312{
11313 int i;
11314
11315 bnxt_rtnl_lock_sp(bp);
11316 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11317 bnxt_rtnl_unlock_sp(bp);
11318 return;
11319 }
11320 /* Disable and flush TPA before resetting the RX ring */
11321 if (bp->flags & BNXT_FLAG_TPA)
11322 bnxt_set_tpa(bp, false);
11323 for (i = 0; i < bp->rx_nr_rings; i++) {
11324 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
11325 struct bnxt_cp_ring_info *cpr;
11326 int rc;
11327
11328 if (!rxr->bnapi->in_reset)
11329 continue;
11330
11331 rc = bnxt_hwrm_rx_ring_reset(bp, i);
11332 if (rc) {
11333 if (rc == -EINVAL || rc == -EOPNOTSUPP)
11334 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
11335 else
11336 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
11337 rc);
8fb35cd3 11338 bnxt_reset_task(bp, true);
8fbf58e1
MC
11339 break;
11340 }
11341 bnxt_free_one_rx_ring_skbs(bp, i);
11342 rxr->rx_prod = 0;
11343 rxr->rx_agg_prod = 0;
11344 rxr->rx_sw_agg_prod = 0;
11345 rxr->rx_next_cons = 0;
11346 rxr->bnapi->in_reset = false;
11347 bnxt_alloc_one_rx_ring(bp, i);
11348 cpr = &rxr->bnapi->cp_ring;
8a27d4b9 11349 cpr->sw_stats.rx.rx_resets++;
8fbf58e1
MC
11350 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11351 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
11352 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
11353 }
11354 if (bp->flags & BNXT_FLAG_TPA)
11355 bnxt_set_tpa(bp, true);
11356 bnxt_rtnl_unlock_sp(bp);
11357}
11358
230d1f0d
MC
11359static void bnxt_fw_reset_close(struct bnxt *bp)
11360{
f3a6d206 11361 bnxt_ulp_stop(bp);
4f036b2e
MC
11362 /* When firmware is in fatal state, quiesce device and disable
11363 * bus master to prevent any potential bad DMAs before freeing
11364 * kernel memory.
d4073028 11365 */
4f036b2e 11366 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
dab62e7c
MC
11367 u16 val = 0;
11368
11369 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
11370 if (val == 0xffff)
11371 bp->fw_reset_min_dsecs = 0;
4f036b2e
MC
11372 bnxt_tx_disable(bp);
11373 bnxt_disable_napi(bp);
11374 bnxt_disable_int_sync(bp);
11375 bnxt_free_irq(bp);
11376 bnxt_clear_int_mode(bp);
d4073028 11377 pci_disable_device(bp->pdev);
4f036b2e 11378 }
d7859afb 11379 bnxt_ptp_clear(bp);
230d1f0d 11380 __bnxt_close_nic(bp, true, false);
ac797ced 11381 bnxt_vf_reps_free(bp);
230d1f0d
MC
11382 bnxt_clear_int_mode(bp);
11383 bnxt_hwrm_func_drv_unrgtr(bp);
d4073028
VV
11384 if (pci_is_enabled(bp->pdev))
11385 pci_disable_device(bp->pdev);
230d1f0d
MC
11386 bnxt_free_ctx_mem(bp);
11387 kfree(bp->ctx);
11388 bp->ctx = NULL;
11389}
11390
acfb50e4
VV
11391static bool is_bnxt_fw_ok(struct bnxt *bp)
11392{
11393 struct bnxt_fw_health *fw_health = bp->fw_health;
11394 bool no_heartbeat = false, has_reset = false;
11395 u32 val;
11396
11397 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11398 if (val == fw_health->last_fw_heartbeat)
11399 no_heartbeat = true;
11400
11401 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11402 if (val != fw_health->last_fw_reset_cnt)
11403 has_reset = true;
11404
11405 if (!no_heartbeat && has_reset)
11406 return true;
11407
11408 return false;
11409}
11410
d1db9e16
MC
11411/* rtnl_lock is acquired before calling this function */
11412static void bnxt_force_fw_reset(struct bnxt *bp)
11413{
11414 struct bnxt_fw_health *fw_health = bp->fw_health;
11415 u32 wait_dsecs;
11416
11417 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
11418 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11419 return;
11420
11421 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11422 bnxt_fw_reset_close(bp);
11423 wait_dsecs = fw_health->master_func_wait_dsecs;
11424 if (fw_health->master) {
11425 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
11426 wait_dsecs = 0;
11427 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11428 } else {
11429 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
11430 wait_dsecs = fw_health->normal_func_wait_dsecs;
11431 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11432 }
4037eb71
VV
11433
11434 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
d1db9e16
MC
11435 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
11436 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11437}
11438
11439void bnxt_fw_exception(struct bnxt *bp)
11440{
a2b31e27 11441 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
d1db9e16
MC
11442 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11443 bnxt_rtnl_lock_sp(bp);
11444 bnxt_force_fw_reset(bp);
11445 bnxt_rtnl_unlock_sp(bp);
11446}
11447
e72cb7d6
MC
11448/* Returns the number of registered VFs, or 1 if VF configuration is pending, or
11449 * < 0 on error.
11450 */
11451static int bnxt_get_registered_vfs(struct bnxt *bp)
230d1f0d 11452{
e72cb7d6 11453#ifdef CONFIG_BNXT_SRIOV
230d1f0d
MC
11454 int rc;
11455
e72cb7d6
MC
11456 if (!BNXT_PF(bp))
11457 return 0;
11458
11459 rc = bnxt_hwrm_func_qcfg(bp);
11460 if (rc) {
11461 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
11462 return rc;
11463 }
11464 if (bp->pf.registered_vfs)
11465 return bp->pf.registered_vfs;
11466 if (bp->sriov_cfg)
11467 return 1;
11468#endif
11469 return 0;
11470}
11471
11472void bnxt_fw_reset(struct bnxt *bp)
11473{
230d1f0d
MC
11474 bnxt_rtnl_lock_sp(bp);
11475 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11476 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
4037eb71 11477 int n = 0, tmo;
e72cb7d6 11478
230d1f0d 11479 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
e72cb7d6
MC
11480 if (bp->pf.active_vfs &&
11481 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11482 n = bnxt_get_registered_vfs(bp);
11483 if (n < 0) {
11484 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11485 n);
11486 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11487 dev_close(bp->dev);
11488 goto fw_reset_exit;
11489 } else if (n > 0) {
11490 u16 vf_tmo_dsecs = n * 10;
11491
11492 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11493 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11494 bp->fw_reset_state =
11495 BNXT_FW_RESET_STATE_POLL_VF;
11496 bnxt_queue_fw_reset_work(bp, HZ / 10);
11497 goto fw_reset_exit;
230d1f0d
MC
11498 }
11499 bnxt_fw_reset_close(bp);
4037eb71
VV
11500 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11501 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11502 tmo = HZ / 10;
11503 } else {
11504 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11505 tmo = bp->fw_reset_min_dsecs * HZ / 10;
11506 }
11507 bnxt_queue_fw_reset_work(bp, tmo);
230d1f0d
MC
11508 }
11509fw_reset_exit:
11510 bnxt_rtnl_unlock_sp(bp);
11511}
11512
ffd77621
MC
11513static void bnxt_chk_missed_irq(struct bnxt *bp)
11514{
11515 int i;
11516
11517 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11518 return;
11519
11520 for (i = 0; i < bp->cp_nr_rings; i++) {
11521 struct bnxt_napi *bnapi = bp->bnapi[i];
11522 struct bnxt_cp_ring_info *cpr;
11523 u32 fw_ring_id;
11524 int j;
11525
11526 if (!bnapi)
11527 continue;
11528
11529 cpr = &bnapi->cp_ring;
11530 for (j = 0; j < 2; j++) {
11531 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
11532 u32 val[2];
11533
11534 if (!cpr2 || cpr2->has_more_work ||
11535 !bnxt_has_work(bp, cpr2))
11536 continue;
11537
11538 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
11539 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
11540 continue;
11541 }
11542 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
11543 bnxt_dbg_hwrm_ring_info_get(bp,
11544 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
11545 fw_ring_id, &val[0], &val[1]);
9d8b5f05 11546 cpr->sw_stats.cmn.missed_irqs++;
ffd77621
MC
11547 }
11548 }
11549}
11550
c0c050c5
MC
11551static void bnxt_cfg_ntp_filters(struct bnxt *);
11552
8119e49b
MC
11553static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
11554{
11555 struct bnxt_link_info *link_info = &bp->link_info;
11556
11557 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
11558 link_info->autoneg = BNXT_AUTONEG_SPEED;
11559 if (bp->hwrm_spec_code >= 0x10201) {
11560 if (link_info->auto_pause_setting &
11561 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
11562 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11563 } else {
11564 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11565 }
11566 link_info->advertising = link_info->auto_link_speeds;
d058426e 11567 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
8119e49b
MC
11568 } else {
11569 link_info->req_link_speed = link_info->force_link_speed;
d058426e
EP
11570 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
11571 if (link_info->force_pam4_link_speed) {
11572 link_info->req_link_speed =
11573 link_info->force_pam4_link_speed;
11574 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
11575 }
8119e49b
MC
11576 link_info->req_duplex = link_info->duplex_setting;
11577 }
11578 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
11579 link_info->req_flow_ctrl =
11580 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
11581 else
11582 link_info->req_flow_ctrl = link_info->force_pause_setting;
11583}
11584
df97b34d
MC
11585static void bnxt_fw_echo_reply(struct bnxt *bp)
11586{
11587 struct bnxt_fw_health *fw_health = bp->fw_health;
11588 struct hwrm_func_echo_response_input req = {0};
11589
11590 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_ECHO_RESPONSE, -1, -1);
11591 req.event_data1 = cpu_to_le32(fw_health->echo_req_data1);
11592 req.event_data2 = cpu_to_le32(fw_health->echo_req_data2);
11593 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11594}
11595
c0c050c5
MC
11596static void bnxt_sp_task(struct work_struct *work)
11597{
11598 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
c0c050c5 11599
4cebdcec
MC
11600 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11601 smp_mb__after_atomic();
11602 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11603 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5 11604 return;
4cebdcec 11605 }
c0c050c5
MC
11606
11607 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
11608 bnxt_cfg_rx_mode(bp);
11609
11610 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
11611 bnxt_cfg_ntp_filters(bp);
c0c050c5
MC
11612 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
11613 bnxt_hwrm_exec_fwd_req(bp);
00db3cba 11614 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
531d1d26
MC
11615 bnxt_hwrm_port_qstats(bp, 0);
11616 bnxt_hwrm_port_qstats_ext(bp, 0);
fea6b333 11617 bnxt_accumulate_all_stats(bp);
00db3cba 11618 }
3bdf56c4 11619
0eaa24b9 11620 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
e2dc9b6e 11621 int rc;
0eaa24b9 11622
e2dc9b6e 11623 mutex_lock(&bp->link_lock);
0eaa24b9
MC
11624 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
11625 &bp->sp_event))
11626 bnxt_hwrm_phy_qcaps(bp);
11627
e2dc9b6e 11628 rc = bnxt_update_link(bp, true);
0eaa24b9
MC
11629 if (rc)
11630 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
11631 rc);
ca0c7538
VV
11632
11633 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
11634 &bp->sp_event))
11635 bnxt_init_ethtool_link_settings(bp);
11636 mutex_unlock(&bp->link_lock);
0eaa24b9 11637 }
a1ef4a79
MC
11638 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
11639 int rc;
11640
11641 mutex_lock(&bp->link_lock);
11642 rc = bnxt_update_phy_setting(bp);
11643 mutex_unlock(&bp->link_lock);
11644 if (rc) {
11645 netdev_warn(bp->dev, "update phy settings retry failed\n");
11646 } else {
11647 bp->link_info.phy_retry = false;
11648 netdev_info(bp->dev, "update phy settings retry succeeded\n");
11649 }
11650 }
90c694bb 11651 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
e2dc9b6e
MC
11652 mutex_lock(&bp->link_lock);
11653 bnxt_get_port_module_status(bp);
11654 mutex_unlock(&bp->link_lock);
90c694bb 11655 }
5a84acbe
SP
11656
11657 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
11658 bnxt_tc_flow_stats_work(bp);
11659
ffd77621
MC
11660 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
11661 bnxt_chk_missed_irq(bp);
11662
df97b34d
MC
11663 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
11664 bnxt_fw_echo_reply(bp);
11665
e2dc9b6e
MC
11666 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
11667 * must be the last functions to be called before exiting.
11668 */
6988bd92
MC
11669 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
11670 bnxt_reset(bp, false);
4cebdcec 11671
fc0f1929
MC
11672 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
11673 bnxt_reset(bp, true);
11674
8fbf58e1
MC
11675 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
11676 bnxt_rx_ring_reset(bp);
11677
657a33c8
VV
11678 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
11679 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
11680
acfb50e4
VV
11681 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
11682 if (!is_bnxt_fw_ok(bp))
11683 bnxt_devlink_health_report(bp,
11684 BNXT_FW_EXCEPTION_SP_EVENT);
11685 }
11686
4cebdcec
MC
11687 smp_mb__before_atomic();
11688 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5
MC
11689}
11690
d1e7925e 11691/* Under rtnl_lock */
98fdbe73
MC
11692int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
11693 int tx_xdp)
d1e7925e
MC
11694{
11695 int max_rx, max_tx, tx_sets = 1;
780baad4 11696 int tx_rings_needed, stats;
8f23d638 11697 int rx_rings = rx;
6fc2ffdf 11698 int cp, vnics, rc;
d1e7925e 11699
d1e7925e
MC
11700 if (tcs)
11701 tx_sets = tcs;
11702
11703 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
11704 if (rc)
11705 return rc;
11706
11707 if (max_rx < rx)
11708 return -ENOMEM;
11709
5f449249 11710 tx_rings_needed = tx * tx_sets + tx_xdp;
d1e7925e
MC
11711 if (max_tx < tx_rings_needed)
11712 return -ENOMEM;
11713
6fc2ffdf 11714 vnics = 1;
9b3d15e6 11715 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
6fc2ffdf
EW
11716 vnics += rx_rings;
11717
8f23d638
MC
11718 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11719 rx_rings <<= 1;
11720 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
780baad4
VV
11721 stats = cp;
11722 if (BNXT_NEW_RM(bp)) {
11c3ec7b 11723 cp += bnxt_get_ulp_msix_num(bp);
780baad4
VV
11724 stats += bnxt_get_ulp_stat_ctxs(bp);
11725 }
6fc2ffdf 11726 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
780baad4 11727 stats, vnics);
d1e7925e
MC
11728}
11729
17086399
SP
11730static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
11731{
11732 if (bp->bar2) {
11733 pci_iounmap(pdev, bp->bar2);
11734 bp->bar2 = NULL;
11735 }
11736
11737 if (bp->bar1) {
11738 pci_iounmap(pdev, bp->bar1);
11739 bp->bar1 = NULL;
11740 }
11741
11742 if (bp->bar0) {
11743 pci_iounmap(pdev, bp->bar0);
11744 bp->bar0 = NULL;
11745 }
11746}
11747
11748static void bnxt_cleanup_pci(struct bnxt *bp)
11749{
11750 bnxt_unmap_bars(bp, bp->pdev);
11751 pci_release_regions(bp->pdev);
f6824308
VV
11752 if (pci_is_enabled(bp->pdev))
11753 pci_disable_device(bp->pdev);
17086399
SP
11754}
11755
18775aa8
MC
11756static void bnxt_init_dflt_coal(struct bnxt *bp)
11757{
11758 struct bnxt_coal *coal;
11759
11760 /* Tick values in micro seconds.
11761 * 1 coal_buf x bufs_per_record = 1 completion record.
11762 */
11763 coal = &bp->rx_coal;
0c2ff8d7 11764 coal->coal_ticks = 10;
18775aa8
MC
11765 coal->coal_bufs = 30;
11766 coal->coal_ticks_irq = 1;
11767 coal->coal_bufs_irq = 2;
05abe4dd 11768 coal->idle_thresh = 50;
18775aa8
MC
11769 coal->bufs_per_record = 2;
11770 coal->budget = 64; /* NAPI budget */
11771
11772 coal = &bp->tx_coal;
11773 coal->coal_ticks = 28;
11774 coal->coal_bufs = 30;
11775 coal->coal_ticks_irq = 2;
11776 coal->coal_bufs_irq = 2;
11777 coal->bufs_per_record = 1;
11778
11779 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
11780}
11781
7c380918
MC
11782static int bnxt_fw_init_one_p1(struct bnxt *bp)
11783{
11784 int rc;
11785
11786 bp->fw_cap = 0;
11787 rc = bnxt_hwrm_ver_get(bp);
ba02629f
EP
11788 bnxt_try_map_fw_health_reg(bp);
11789 if (rc) {
b187e4ba
EP
11790 rc = bnxt_try_recover_fw(bp);
11791 if (rc)
11792 return rc;
11793 rc = bnxt_hwrm_ver_get(bp);
87f7ab8d
EP
11794 if (rc)
11795 return rc;
ba02629f 11796 }
7c380918
MC
11797
11798 if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
11799 rc = bnxt_alloc_kong_hwrm_resources(bp);
11800 if (rc)
11801 bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
11802 }
11803
11804 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
11805 bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
11806 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
11807 if (rc)
11808 return rc;
11809 }
4933f675
VV
11810 bnxt_nvm_cfg_ver_get(bp);
11811
7c380918
MC
11812 rc = bnxt_hwrm_func_reset(bp);
11813 if (rc)
11814 return -ENODEV;
11815
11816 bnxt_hwrm_fw_set_time(bp);
11817 return 0;
11818}
11819
11820static int bnxt_fw_init_one_p2(struct bnxt *bp)
11821{
11822 int rc;
11823
11824 /* Get the MAX capabilities for this function */
11825 rc = bnxt_hwrm_func_qcaps(bp);
11826 if (rc) {
11827 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
11828 rc);
11829 return -ENODEV;
11830 }
11831
11832 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
11833 if (rc)
11834 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
11835 rc);
11836
3e9ec2bb
EP
11837 if (bnxt_alloc_fw_health(bp)) {
11838 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
11839 } else {
11840 rc = bnxt_hwrm_error_recovery_qcfg(bp);
11841 if (rc)
11842 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
11843 rc);
11844 }
07f83d72 11845
2e882468 11846 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
7c380918
MC
11847 if (rc)
11848 return -ENODEV;
11849
11850 bnxt_hwrm_func_qcfg(bp);
11851 bnxt_hwrm_vnic_qcaps(bp);
11852 bnxt_hwrm_port_led_qcaps(bp);
11853 bnxt_ethtool_init(bp);
11854 bnxt_dcb_init(bp);
11855 return 0;
11856}
11857
ba642ab7
MC
11858static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
11859{
11860 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
11861 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
11862 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
11863 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
11864 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
c66c06c5 11865 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
ba642ab7
MC
11866 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
11867 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
11868 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
11869 }
11870}
11871
11872static void bnxt_set_dflt_rfs(struct bnxt *bp)
11873{
11874 struct net_device *dev = bp->dev;
11875
11876 dev->hw_features &= ~NETIF_F_NTUPLE;
11877 dev->features &= ~NETIF_F_NTUPLE;
11878 bp->flags &= ~BNXT_FLAG_RFS;
11879 if (bnxt_rfs_supported(bp)) {
11880 dev->hw_features |= NETIF_F_NTUPLE;
11881 if (bnxt_rfs_capable(bp)) {
11882 bp->flags |= BNXT_FLAG_RFS;
11883 dev->features |= NETIF_F_NTUPLE;
11884 }
11885 }
11886}
11887
11888static void bnxt_fw_init_one_p3(struct bnxt *bp)
11889{
11890 struct pci_dev *pdev = bp->pdev;
11891
11892 bnxt_set_dflt_rss_hash_type(bp);
11893 bnxt_set_dflt_rfs(bp);
11894
11895 bnxt_get_wol_settings(bp);
11896 if (bp->flags & BNXT_FLAG_WOL_CAP)
11897 device_set_wakeup_enable(&pdev->dev, bp->wol);
11898 else
11899 device_set_wakeup_capable(&pdev->dev, false);
11900
11901 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
11902 bnxt_hwrm_coal_params_qcaps(bp);
11903}
11904
0afd6a4e
MC
11905static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
11906
ec5d31e3
MC
11907static int bnxt_fw_init_one(struct bnxt *bp)
11908{
11909 int rc;
11910
11911 rc = bnxt_fw_init_one_p1(bp);
11912 if (rc) {
11913 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
11914 return rc;
11915 }
11916 rc = bnxt_fw_init_one_p2(bp);
11917 if (rc) {
11918 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
11919 return rc;
11920 }
0afd6a4e
MC
11921 rc = bnxt_probe_phy(bp, false);
11922 if (rc)
11923 return rc;
ec5d31e3
MC
11924 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
11925 if (rc)
11926 return rc;
937f188c
VV
11927
11928 /* In case fw capabilities have changed, destroy the unneeded
11929 * reporters and create newly capable ones.
11930 */
11931 bnxt_dl_fw_reporters_destroy(bp, false);
11932 bnxt_dl_fw_reporters_create(bp);
ec5d31e3
MC
11933 bnxt_fw_init_one_p3(bp);
11934 return 0;
11935}
11936
cbb51067
MC
11937static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
11938{
11939 struct bnxt_fw_health *fw_health = bp->fw_health;
11940 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
11941 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
11942 u32 reg_type, reg_off, delay_msecs;
11943
11944 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
11945 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
11946 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
11947 switch (reg_type) {
11948 case BNXT_FW_HEALTH_REG_TYPE_CFG:
11949 pci_write_config_dword(bp->pdev, reg_off, val);
11950 break;
11951 case BNXT_FW_HEALTH_REG_TYPE_GRC:
11952 writel(reg_off & BNXT_GRC_BASE_MASK,
11953 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
11954 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
df561f66 11955 fallthrough;
cbb51067
MC
11956 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
11957 writel(val, bp->bar0 + reg_off);
11958 break;
11959 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
11960 writel(val, bp->bar1 + reg_off);
11961 break;
11962 }
11963 if (delay_msecs) {
11964 pci_read_config_dword(bp->pdev, 0, &val);
11965 msleep(delay_msecs);
11966 }
11967}
11968
11969static void bnxt_reset_all(struct bnxt *bp)
11970{
11971 struct bnxt_fw_health *fw_health = bp->fw_health;
e07ab202
VV
11972 int i, rc;
11973
11974 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
87f7ab8d 11975 bnxt_fw_reset_via_optee(bp);
e07ab202 11976 bp->fw_reset_timestamp = jiffies;
e07ab202
VV
11977 return;
11978 }
cbb51067
MC
11979
11980 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
11981 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
11982 bnxt_fw_reset_writel(bp, i);
11983 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
11984 struct hwrm_fw_reset_input req = {0};
cbb51067
MC
11985
11986 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
11987 req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
11988 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
11989 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
11990 req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
11991 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
a2f3835c 11992 if (rc != -ENODEV)
cbb51067
MC
11993 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
11994 }
11995 bp->fw_reset_timestamp = jiffies;
11996}
11997
339eeb4b
MC
11998static bool bnxt_fw_reset_timeout(struct bnxt *bp)
11999{
12000 return time_after(jiffies, bp->fw_reset_timestamp +
12001 (bp->fw_reset_max_dsecs * HZ / 10));
12002}
12003
3958b1da
SK
12004static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
12005{
12006 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12007 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
12008 bnxt_ulp_start(bp, rc);
12009 bnxt_dl_health_status_update(bp, false);
12010 }
12011 bp->fw_reset_state = 0;
12012 dev_close(bp->dev);
12013}
12014
230d1f0d
MC
12015static void bnxt_fw_reset_task(struct work_struct *work)
12016{
12017 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
3958b1da 12018 int rc = 0;
230d1f0d
MC
12019
12020 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12021 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
12022 return;
12023 }
12024
12025 switch (bp->fw_reset_state) {
e72cb7d6
MC
12026 case BNXT_FW_RESET_STATE_POLL_VF: {
12027 int n = bnxt_get_registered_vfs(bp);
4037eb71 12028 int tmo;
e72cb7d6
MC
12029
12030 if (n < 0) {
230d1f0d 12031 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
e72cb7d6 12032 n, jiffies_to_msecs(jiffies -
230d1f0d
MC
12033 bp->fw_reset_timestamp));
12034 goto fw_reset_abort;
e72cb7d6 12035 } else if (n > 0) {
339eeb4b 12036 if (bnxt_fw_reset_timeout(bp)) {
230d1f0d
MC
12037 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12038 bp->fw_reset_state = 0;
e72cb7d6
MC
12039 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
12040 n);
230d1f0d
MC
12041 return;
12042 }
12043 bnxt_queue_fw_reset_work(bp, HZ / 10);
12044 return;
12045 }
12046 bp->fw_reset_timestamp = jiffies;
12047 rtnl_lock();
6cd657cb 12048 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
3958b1da 12049 bnxt_fw_reset_abort(bp, rc);
6cd657cb 12050 rtnl_unlock();
3958b1da 12051 return;
6cd657cb 12052 }
230d1f0d 12053 bnxt_fw_reset_close(bp);
4037eb71
VV
12054 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12055 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
12056 tmo = HZ / 10;
12057 } else {
12058 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12059 tmo = bp->fw_reset_min_dsecs * HZ / 10;
12060 }
230d1f0d 12061 rtnl_unlock();
4037eb71 12062 bnxt_queue_fw_reset_work(bp, tmo);
230d1f0d 12063 return;
e72cb7d6 12064 }
4037eb71
VV
12065 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
12066 u32 val;
12067
12068 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12069 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
339eeb4b 12070 !bnxt_fw_reset_timeout(bp)) {
4037eb71
VV
12071 bnxt_queue_fw_reset_work(bp, HZ / 5);
12072 return;
12073 }
12074
12075 if (!bp->fw_health->master) {
12076 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
12077
12078 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12079 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
12080 return;
12081 }
12082 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
12083 }
df561f66 12084 fallthrough;
c6a9e7aa 12085 case BNXT_FW_RESET_STATE_RESET_FW:
cbb51067
MC
12086 bnxt_reset_all(bp);
12087 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
c6a9e7aa 12088 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
cbb51067 12089 return;
230d1f0d 12090 case BNXT_FW_RESET_STATE_ENABLE_DEV:
43a440c4 12091 bnxt_inv_fw_health_reg(bp);
bae8a003
VV
12092 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
12093 !bp->fw_reset_min_dsecs) {
12094 u16 val;
12095
12096 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
12097 if (val == 0xffff) {
12098 if (bnxt_fw_reset_timeout(bp)) {
12099 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
3958b1da 12100 rc = -ETIMEDOUT;
bae8a003 12101 goto fw_reset_abort;
dab62e7c 12102 }
bae8a003
VV
12103 bnxt_queue_fw_reset_work(bp, HZ / 1000);
12104 return;
dab62e7c 12105 }
d1db9e16 12106 }
b4fff207 12107 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
230d1f0d
MC
12108 if (pci_enable_device(bp->pdev)) {
12109 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
3958b1da 12110 rc = -ENODEV;
230d1f0d
MC
12111 goto fw_reset_abort;
12112 }
12113 pci_set_master(bp->pdev);
12114 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
df561f66 12115 fallthrough;
230d1f0d
MC
12116 case BNXT_FW_RESET_STATE_POLL_FW:
12117 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
12118 rc = __bnxt_hwrm_ver_get(bp, true);
12119 if (rc) {
339eeb4b 12120 if (bnxt_fw_reset_timeout(bp)) {
230d1f0d 12121 netdev_err(bp->dev, "Firmware reset aborted\n");
fc8864e0 12122 goto fw_reset_abort_status;
230d1f0d
MC
12123 }
12124 bnxt_queue_fw_reset_work(bp, HZ / 5);
12125 return;
12126 }
12127 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
12128 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
df561f66 12129 fallthrough;
230d1f0d
MC
12130 case BNXT_FW_RESET_STATE_OPENING:
12131 while (!rtnl_trylock()) {
12132 bnxt_queue_fw_reset_work(bp, HZ / 10);
12133 return;
12134 }
12135 rc = bnxt_open(bp->dev);
12136 if (rc) {
3958b1da
SK
12137 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
12138 bnxt_fw_reset_abort(bp, rc);
12139 rtnl_unlock();
12140 return;
230d1f0d 12141 }
230d1f0d
MC
12142
12143 bp->fw_reset_state = 0;
12144 /* Make sure fw_reset_state is 0 before clearing the flag */
12145 smp_mb__before_atomic();
12146 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
758684e4
SK
12147 bnxt_ulp_start(bp, 0);
12148 bnxt_reenable_sriov(bp);
ac797ced
SB
12149 bnxt_vf_reps_alloc(bp);
12150 bnxt_vf_reps_open(bp);
737d7a6c 12151 bnxt_dl_health_recovery_done(bp);
e4e38237 12152 bnxt_dl_health_status_update(bp, true);
f3a6d206 12153 rtnl_unlock();
230d1f0d
MC
12154 break;
12155 }
12156 return;
12157
fc8864e0
MC
12158fw_reset_abort_status:
12159 if (bp->fw_health->status_reliable ||
12160 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
12161 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12162
12163 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
12164 }
230d1f0d 12165fw_reset_abort:
230d1f0d 12166 rtnl_lock();
3958b1da 12167 bnxt_fw_reset_abort(bp, rc);
230d1f0d
MC
12168 rtnl_unlock();
12169}
12170
c0c050c5
MC
12171static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
12172{
12173 int rc;
12174 struct bnxt *bp = netdev_priv(dev);
12175
12176 SET_NETDEV_DEV(dev, &pdev->dev);
12177
12178 /* enable device (incl. PCI PM wakeup), and bus-mastering */
12179 rc = pci_enable_device(pdev);
12180 if (rc) {
12181 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
12182 goto init_err;
12183 }
12184
12185 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12186 dev_err(&pdev->dev,
12187 "Cannot find PCI device base address, aborting\n");
12188 rc = -ENODEV;
12189 goto init_err_disable;
12190 }
12191
12192 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12193 if (rc) {
12194 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
12195 goto init_err_disable;
12196 }
12197
12198 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
12199 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12200 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
3383176e 12201 rc = -EIO;
c54bc3ce 12202 goto init_err_release;
c0c050c5
MC
12203 }
12204
12205 pci_set_master(pdev);
12206
12207 bp->dev = dev;
12208 bp->pdev = pdev;
12209
8ae24738
MC
12210 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
12211 * determines the BAR size.
12212 */
c0c050c5
MC
12213 bp->bar0 = pci_ioremap_bar(pdev, 0);
12214 if (!bp->bar0) {
12215 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
12216 rc = -ENOMEM;
12217 goto init_err_release;
12218 }
12219
c0c050c5
MC
12220 bp->bar2 = pci_ioremap_bar(pdev, 4);
12221 if (!bp->bar2) {
12222 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
12223 rc = -ENOMEM;
12224 goto init_err_release;
12225 }
12226
6316ea6d
SB
12227 pci_enable_pcie_error_reporting(pdev);
12228
c0c050c5 12229 INIT_WORK(&bp->sp_task, bnxt_sp_task);
230d1f0d 12230 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
c0c050c5
MC
12231
12232 spin_lock_init(&bp->ntp_fltr_lock);
697197e5
MC
12233#if BITS_PER_LONG == 32
12234 spin_lock_init(&bp->db_lock);
12235#endif
c0c050c5
MC
12236
12237 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
12238 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
12239
18775aa8 12240 bnxt_init_dflt_coal(bp);
51f30785 12241
e99e88a9 12242 timer_setup(&bp->timer, bnxt_timer, 0);
c0c050c5
MC
12243 bp->current_interval = BNXT_TIMER_INTERVAL;
12244
442a35a5
JK
12245 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
12246 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
12247
caefe526 12248 clear_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
12249 return 0;
12250
12251init_err_release:
17086399 12252 bnxt_unmap_bars(bp, pdev);
c0c050c5
MC
12253 pci_release_regions(pdev);
12254
12255init_err_disable:
12256 pci_disable_device(pdev);
12257
12258init_err:
12259 return rc;
12260}
12261
12262/* rtnl_lock held */
12263static int bnxt_change_mac_addr(struct net_device *dev, void *p)
12264{
12265 struct sockaddr *addr = p;
1fc2cfd0
JH
12266 struct bnxt *bp = netdev_priv(dev);
12267 int rc = 0;
c0c050c5
MC
12268
12269 if (!is_valid_ether_addr(addr->sa_data))
12270 return -EADDRNOTAVAIL;
12271
c1a7bdff
MC
12272 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
12273 return 0;
12274
28ea334b 12275 rc = bnxt_approve_mac(bp, addr->sa_data, true);
84c33dd3
MC
12276 if (rc)
12277 return rc;
bdd4347b 12278
c0c050c5 12279 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1fc2cfd0
JH
12280 if (netif_running(dev)) {
12281 bnxt_close_nic(bp, false, false);
12282 rc = bnxt_open_nic(bp, false, false);
12283 }
c0c050c5 12284
1fc2cfd0 12285 return rc;
c0c050c5
MC
12286}
12287
12288/* rtnl_lock held */
12289static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
12290{
12291 struct bnxt *bp = netdev_priv(dev);
12292
c0c050c5 12293 if (netif_running(dev))
a9b952d2 12294 bnxt_close_nic(bp, true, false);
c0c050c5
MC
12295
12296 dev->mtu = new_mtu;
12297 bnxt_set_ring_params(bp);
12298
12299 if (netif_running(dev))
a9b952d2 12300 return bnxt_open_nic(bp, true, false);
c0c050c5
MC
12301
12302 return 0;
12303}
12304
c5e3deb8 12305int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
c0c050c5
MC
12306{
12307 struct bnxt *bp = netdev_priv(dev);
3ffb6a39 12308 bool sh = false;
d1e7925e 12309 int rc;
16e5cc64 12310
c0c050c5 12311 if (tc > bp->max_tc) {
b451c8b6 12312 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
c0c050c5
MC
12313 tc, bp->max_tc);
12314 return -EINVAL;
12315 }
12316
12317 if (netdev_get_num_tc(dev) == tc)
12318 return 0;
12319
3ffb6a39
MC
12320 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
12321 sh = true;
12322
98fdbe73
MC
12323 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
12324 sh, tc, bp->tx_nr_rings_xdp);
d1e7925e
MC
12325 if (rc)
12326 return rc;
c0c050c5
MC
12327
12328 /* Needs to close the device and do hw resource re-allocations */
12329 if (netif_running(bp->dev))
12330 bnxt_close_nic(bp, true, false);
12331
12332 if (tc) {
12333 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
12334 netdev_set_num_tc(dev, tc);
12335 } else {
12336 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12337 netdev_reset_tc(dev);
12338 }
87e9b377 12339 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
3ffb6a39
MC
12340 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
12341 bp->tx_nr_rings + bp->rx_nr_rings;
c0c050c5
MC
12342
12343 if (netif_running(bp->dev))
12344 return bnxt_open_nic(bp, true, false);
12345
12346 return 0;
12347}
12348
9e0fd15d
JP
12349static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
12350 void *cb_priv)
c5e3deb8 12351{
9e0fd15d 12352 struct bnxt *bp = cb_priv;
de4784ca 12353
312324f1
JK
12354 if (!bnxt_tc_flower_enabled(bp) ||
12355 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
38cf0426 12356 return -EOPNOTSUPP;
c5e3deb8 12357
9e0fd15d
JP
12358 switch (type) {
12359 case TC_SETUP_CLSFLOWER:
12360 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
12361 default:
12362 return -EOPNOTSUPP;
12363 }
12364}
12365
627c89d0 12366LIST_HEAD(bnxt_block_cb_list);
955bcb6e 12367
2ae7408f
SP
12368static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
12369 void *type_data)
12370{
4e95bc26
PNA
12371 struct bnxt *bp = netdev_priv(dev);
12372
2ae7408f 12373 switch (type) {
9e0fd15d 12374 case TC_SETUP_BLOCK:
955bcb6e
PNA
12375 return flow_block_cb_setup_simple(type_data,
12376 &bnxt_block_cb_list,
4e95bc26
PNA
12377 bnxt_setup_tc_block_cb,
12378 bp, bp, true);
575ed7d3 12379 case TC_SETUP_QDISC_MQPRIO: {
2ae7408f
SP
12380 struct tc_mqprio_qopt *mqprio = type_data;
12381
12382 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
56f36acd 12383
2ae7408f
SP
12384 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
12385 }
12386 default:
12387 return -EOPNOTSUPP;
12388 }
c5e3deb8
MC
12389}
12390
c0c050c5
MC
12391#ifdef CONFIG_RFS_ACCEL
12392static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
12393 struct bnxt_ntuple_filter *f2)
12394{
12395 struct flow_keys *keys1 = &f1->fkeys;
12396 struct flow_keys *keys2 = &f2->fkeys;
12397
6fc7caa8
MC
12398 if (keys1->basic.n_proto != keys2->basic.n_proto ||
12399 keys1->basic.ip_proto != keys2->basic.ip_proto)
12400 return false;
12401
12402 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
12403 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
12404 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
12405 return false;
12406 } else {
12407 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
12408 sizeof(keys1->addrs.v6addrs.src)) ||
12409 memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
12410 sizeof(keys1->addrs.v6addrs.dst)))
12411 return false;
12412 }
12413
12414 if (keys1->ports.ports == keys2->ports.ports &&
61aad724 12415 keys1->control.flags == keys2->control.flags &&
a54c4d74
MC
12416 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
12417 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
c0c050c5
MC
12418 return true;
12419
12420 return false;
12421}
12422
12423static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
12424 u16 rxq_index, u32 flow_id)
12425{
12426 struct bnxt *bp = netdev_priv(dev);
12427 struct bnxt_ntuple_filter *fltr, *new_fltr;
12428 struct flow_keys *fkeys;
12429 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
a54c4d74 12430 int rc = 0, idx, bit_id, l2_idx = 0;
c0c050c5 12431 struct hlist_head *head;
f47d0e19 12432 u32 flags;
c0c050c5 12433
a54c4d74
MC
12434 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
12435 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12436 int off = 0, j;
12437
12438 netif_addr_lock_bh(dev);
12439 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
12440 if (ether_addr_equal(eth->h_dest,
12441 vnic->uc_list + off)) {
12442 l2_idx = j + 1;
12443 break;
12444 }
12445 }
12446 netif_addr_unlock_bh(dev);
12447 if (!l2_idx)
12448 return -EINVAL;
12449 }
c0c050c5
MC
12450 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
12451 if (!new_fltr)
12452 return -ENOMEM;
12453
12454 fkeys = &new_fltr->fkeys;
12455 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
12456 rc = -EPROTONOSUPPORT;
12457 goto err_free;
12458 }
12459
dda0e746
MC
12460 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
12461 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
c0c050c5
MC
12462 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
12463 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
12464 rc = -EPROTONOSUPPORT;
12465 goto err_free;
12466 }
dda0e746
MC
12467 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
12468 bp->hwrm_spec_code < 0x10601) {
12469 rc = -EPROTONOSUPPORT;
12470 goto err_free;
12471 }
f47d0e19
MC
12472 flags = fkeys->control.flags;
12473 if (((flags & FLOW_DIS_ENCAPSULATION) &&
12474 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
61aad724
MC
12475 rc = -EPROTONOSUPPORT;
12476 goto err_free;
12477 }
c0c050c5 12478
a54c4d74 12479 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
c0c050c5
MC
12480 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
12481
12482 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
12483 head = &bp->ntp_fltr_hash_tbl[idx];
12484 rcu_read_lock();
12485 hlist_for_each_entry_rcu(fltr, head, hash) {
12486 if (bnxt_fltr_match(fltr, new_fltr)) {
12487 rcu_read_unlock();
12488 rc = 0;
12489 goto err_free;
12490 }
12491 }
12492 rcu_read_unlock();
12493
12494 spin_lock_bh(&bp->ntp_fltr_lock);
84e86b98
MC
12495 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
12496 BNXT_NTP_FLTR_MAX_FLTR, 0);
12497 if (bit_id < 0) {
c0c050c5
MC
12498 spin_unlock_bh(&bp->ntp_fltr_lock);
12499 rc = -ENOMEM;
12500 goto err_free;
12501 }
12502
84e86b98 12503 new_fltr->sw_id = (u16)bit_id;
c0c050c5 12504 new_fltr->flow_id = flow_id;
a54c4d74 12505 new_fltr->l2_fltr_idx = l2_idx;
c0c050c5
MC
12506 new_fltr->rxq = rxq_index;
12507 hlist_add_head_rcu(&new_fltr->hash, head);
12508 bp->ntp_fltr_count++;
12509 spin_unlock_bh(&bp->ntp_fltr_lock);
12510
12511 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
c213eae8 12512 bnxt_queue_sp_work(bp);
c0c050c5
MC
12513
12514 return new_fltr->sw_id;
12515
12516err_free:
12517 kfree(new_fltr);
12518 return rc;
12519}
12520
12521static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12522{
12523 int i;
12524
12525 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
12526 struct hlist_head *head;
12527 struct hlist_node *tmp;
12528 struct bnxt_ntuple_filter *fltr;
12529 int rc;
12530
12531 head = &bp->ntp_fltr_hash_tbl[i];
12532 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
12533 bool del = false;
12534
12535 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
12536 if (rps_may_expire_flow(bp->dev, fltr->rxq,
12537 fltr->flow_id,
12538 fltr->sw_id)) {
12539 bnxt_hwrm_cfa_ntuple_filter_free(bp,
12540 fltr);
12541 del = true;
12542 }
12543 } else {
12544 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
12545 fltr);
12546 if (rc)
12547 del = true;
12548 else
12549 set_bit(BNXT_FLTR_VALID, &fltr->state);
12550 }
12551
12552 if (del) {
12553 spin_lock_bh(&bp->ntp_fltr_lock);
12554 hlist_del_rcu(&fltr->hash);
12555 bp->ntp_fltr_count--;
12556 spin_unlock_bh(&bp->ntp_fltr_lock);
12557 synchronize_rcu();
12558 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
12559 kfree(fltr);
12560 }
12561 }
12562 }
19241368 12563 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
9a005c38 12564 netdev_info(bp->dev, "Receive PF driver unload event!\n");
c0c050c5
MC
12565}
12566
12567#else
12568
12569static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12570{
12571}
12572
12573#endif /* CONFIG_RFS_ACCEL */
12574
442a35a5 12575static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
c0c050c5 12576{
442a35a5
JK
12577 struct bnxt *bp = netdev_priv(netdev);
12578 struct udp_tunnel_info ti;
12579 unsigned int cmd;
c0c050c5 12580
442a35a5 12581 udp_tunnel_nic_get_port(netdev, table, 0, &ti);
1698d600
MC
12582 if (ti.type == UDP_TUNNEL_TYPE_VXLAN) {
12583 bp->vxlan_port = ti.port;
442a35a5 12584 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
1698d600
MC
12585 } else {
12586 bp->nge_port = ti.port;
442a35a5 12587 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
1698d600 12588 }
7cdd5fc3 12589
442a35a5
JK
12590 if (ti.port)
12591 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
ad51b8e9 12592
442a35a5 12593 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
c0c050c5
MC
12594}
12595
442a35a5
JK
12596static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
12597 .sync_table = bnxt_udp_tunnel_sync,
12598 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
12599 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
12600 .tables = {
12601 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
12602 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
12603 },
12604};
c0c050c5 12605
39d8ba2e
MC
12606static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12607 struct net_device *dev, u32 filter_mask,
12608 int nlflags)
12609{
12610 struct bnxt *bp = netdev_priv(dev);
12611
12612 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
12613 nlflags, filter_mask, NULL);
12614}
12615
12616static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
2fd527b7 12617 u16 flags, struct netlink_ext_ack *extack)
39d8ba2e
MC
12618{
12619 struct bnxt *bp = netdev_priv(dev);
12620 struct nlattr *attr, *br_spec;
12621 int rem, rc = 0;
12622
12623 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
12624 return -EOPNOTSUPP;
12625
12626 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12627 if (!br_spec)
12628 return -EINVAL;
12629
12630 nla_for_each_nested(attr, br_spec, rem) {
12631 u16 mode;
12632
12633 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12634 continue;
12635
12636 if (nla_len(attr) < sizeof(mode))
12637 return -EINVAL;
12638
12639 mode = nla_get_u16(attr);
12640 if (mode == bp->br_mode)
12641 break;
12642
12643 rc = bnxt_hwrm_set_br_mode(bp, mode);
12644 if (!rc)
12645 bp->br_mode = mode;
12646 break;
12647 }
12648 return rc;
12649}
12650
52d5254a
FF
12651int bnxt_get_port_parent_id(struct net_device *dev,
12652 struct netdev_phys_item_id *ppid)
c124a62f 12653{
52d5254a
FF
12654 struct bnxt *bp = netdev_priv(dev);
12655
c124a62f
SP
12656 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
12657 return -EOPNOTSUPP;
12658
12659 /* The PF and it's VF-reps only support the switchdev framework */
d061b241 12660 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
c124a62f
SP
12661 return -EOPNOTSUPP;
12662
b014232f
VV
12663 ppid->id_len = sizeof(bp->dsn);
12664 memcpy(ppid->id, bp->dsn, ppid->id_len);
c124a62f 12665
52d5254a 12666 return 0;
c124a62f
SP
12667}
12668
c9c49a65
JP
12669static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
12670{
12671 struct bnxt *bp = netdev_priv(dev);
12672
12673 return &bp->dl_port;
12674}
12675
c0c050c5
MC
12676static const struct net_device_ops bnxt_netdev_ops = {
12677 .ndo_open = bnxt_open,
12678 .ndo_start_xmit = bnxt_start_xmit,
12679 .ndo_stop = bnxt_close,
12680 .ndo_get_stats64 = bnxt_get_stats64,
12681 .ndo_set_rx_mode = bnxt_set_rx_mode,
12682 .ndo_do_ioctl = bnxt_ioctl,
12683 .ndo_validate_addr = eth_validate_addr,
12684 .ndo_set_mac_address = bnxt_change_mac_addr,
12685 .ndo_change_mtu = bnxt_change_mtu,
12686 .ndo_fix_features = bnxt_fix_features,
12687 .ndo_set_features = bnxt_set_features,
1698d600 12688 .ndo_features_check = bnxt_features_check,
c0c050c5
MC
12689 .ndo_tx_timeout = bnxt_tx_timeout,
12690#ifdef CONFIG_BNXT_SRIOV
12691 .ndo_get_vf_config = bnxt_get_vf_config,
12692 .ndo_set_vf_mac = bnxt_set_vf_mac,
12693 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
12694 .ndo_set_vf_rate = bnxt_set_vf_bw,
12695 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
12696 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
746df139 12697 .ndo_set_vf_trust = bnxt_set_vf_trust,
c0c050c5
MC
12698#endif
12699 .ndo_setup_tc = bnxt_setup_tc,
12700#ifdef CONFIG_RFS_ACCEL
12701 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
12702#endif
f4e63525 12703 .ndo_bpf = bnxt_xdp,
f18c2b77 12704 .ndo_xdp_xmit = bnxt_xdp_xmit,
39d8ba2e
MC
12705 .ndo_bridge_getlink = bnxt_bridge_getlink,
12706 .ndo_bridge_setlink = bnxt_bridge_setlink,
c9c49a65 12707 .ndo_get_devlink_port = bnxt_get_devlink_port,
c0c050c5
MC
12708};
12709
12710static void bnxt_remove_one(struct pci_dev *pdev)
12711{
12712 struct net_device *dev = pci_get_drvdata(pdev);
12713 struct bnxt *bp = netdev_priv(dev);
12714
7e334fc8 12715 if (BNXT_PF(bp))
c0c050c5
MC
12716 bnxt_sriov_disable(bp);
12717
21d6a11e
VV
12718 if (BNXT_PF(bp))
12719 devlink_port_type_clear(&bp->dl_port);
93cb62d9 12720
21d6a11e
VV
12721 pci_disable_pcie_error_reporting(pdev);
12722 unregister_netdev(dev);
b16939b5 12723 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
21d6a11e 12724 /* Flush any pending tasks */
631ce27a
VV
12725 cancel_work_sync(&bp->sp_task);
12726 cancel_delayed_work_sync(&bp->fw_reset_task);
b16939b5
VV
12727 bp->sp_event = 0;
12728
7e334fc8 12729 bnxt_dl_fw_reporters_destroy(bp, true);
cda2cab0 12730 bnxt_dl_unregister(bp);
2ae7408f 12731 bnxt_shutdown_tc(bp);
c0c050c5 12732
7809592d 12733 bnxt_clear_int_mode(bp);
be58a0da 12734 bnxt_hwrm_func_drv_unrgtr(bp);
c0c050c5 12735 bnxt_free_hwrm_resources(bp);
e605db80 12736 bnxt_free_hwrm_short_cmd_req(bp);
eb513658 12737 bnxt_ethtool_free(bp);
7df4ae9f 12738 bnxt_dcb_free(bp);
a588e458
MC
12739 kfree(bp->edev);
12740 bp->edev = NULL;
ae5c42f0
MC
12741 kfree(bp->ptp_cfg);
12742 bp->ptp_cfg = NULL;
8280b38e
VV
12743 kfree(bp->fw_health);
12744 bp->fw_health = NULL;
c20dc142 12745 bnxt_cleanup_pci(bp);
98f04cf0
MC
12746 bnxt_free_ctx_mem(bp);
12747 kfree(bp->ctx);
12748 bp->ctx = NULL;
1667cbf6
MC
12749 kfree(bp->rss_indir_tbl);
12750 bp->rss_indir_tbl = NULL;
fd3ab1c7 12751 bnxt_free_port_stats(bp);
c0c050c5 12752 free_netdev(dev);
c0c050c5
MC
12753}
12754
ba642ab7 12755static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
c0c050c5
MC
12756{
12757 int rc = 0;
12758 struct bnxt_link_info *link_info = &bp->link_info;
c0c050c5 12759
b0d28207 12760 bp->phy_flags = 0;
170ce013
MC
12761 rc = bnxt_hwrm_phy_qcaps(bp);
12762 if (rc) {
12763 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
12764 rc);
12765 return rc;
12766 }
dade5e15
MC
12767 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
12768 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
12769 else
12770 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
43a5107d
MC
12771 if (!fw_dflt)
12772 return 0;
12773
c0c050c5
MC
12774 rc = bnxt_update_link(bp, false);
12775 if (rc) {
12776 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
12777 rc);
12778 return rc;
12779 }
12780
93ed8117
MC
12781 /* Older firmware does not have supported_auto_speeds, so assume
12782 * that all supported speeds can be autonegotiated.
12783 */
12784 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
12785 link_info->support_auto_speeds = link_info->support_speeds;
12786
8119e49b 12787 bnxt_init_ethtool_link_settings(bp);
ba642ab7 12788 return 0;
c0c050c5
MC
12789}
12790
12791static int bnxt_get_max_irq(struct pci_dev *pdev)
12792{
12793 u16 ctrl;
12794
12795 if (!pdev->msix_cap)
12796 return 1;
12797
12798 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
12799 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
12800}
12801
6e6c5a57
MC
12802static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12803 int *max_cp)
c0c050c5 12804{
6a4f2947 12805 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
e30fbc33 12806 int max_ring_grps = 0, max_irq;
c0c050c5 12807
6a4f2947
MC
12808 *max_tx = hw_resc->max_tx_rings;
12809 *max_rx = hw_resc->max_rx_rings;
e30fbc33
MC
12810 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
12811 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
12812 bnxt_get_ulp_msix_num(bp),
c027c6b4 12813 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
e30fbc33
MC
12814 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
12815 *max_cp = min_t(int, *max_cp, max_irq);
6a4f2947 12816 max_ring_grps = hw_resc->max_hw_ring_grps;
76595193
PS
12817 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
12818 *max_cp -= 1;
12819 *max_rx -= 2;
12820 }
c0c050c5
MC
12821 if (bp->flags & BNXT_FLAG_AGG_RINGS)
12822 *max_rx >>= 1;
e30fbc33
MC
12823 if (bp->flags & BNXT_FLAG_CHIP_P5) {
12824 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
12825 /* On P5 chips, max_cp output param should be available NQs */
12826 *max_cp = max_irq;
12827 }
b72d4a68 12828 *max_rx = min_t(int, *max_rx, max_ring_grps);
6e6c5a57
MC
12829}
12830
12831int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
12832{
12833 int rx, tx, cp;
12834
12835 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
78f058a4
MC
12836 *max_rx = rx;
12837 *max_tx = tx;
6e6c5a57
MC
12838 if (!rx || !tx || !cp)
12839 return -ENOMEM;
12840
6e6c5a57
MC
12841 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
12842}
12843
e4060d30
MC
12844static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12845 bool shared)
12846{
12847 int rc;
12848
12849 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
bdbd1eb5
MC
12850 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
12851 /* Not enough rings, try disabling agg rings. */
12852 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
12853 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
07f4fde5
MC
12854 if (rc) {
12855 /* set BNXT_FLAG_AGG_RINGS back for consistency */
12856 bp->flags |= BNXT_FLAG_AGG_RINGS;
bdbd1eb5 12857 return rc;
07f4fde5 12858 }
bdbd1eb5 12859 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
1054aee8
MC
12860 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12861 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
bdbd1eb5
MC
12862 bnxt_set_ring_params(bp);
12863 }
e4060d30
MC
12864
12865 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
12866 int max_cp, max_stat, max_irq;
12867
12868 /* Reserve minimum resources for RoCE */
12869 max_cp = bnxt_get_max_func_cp_rings(bp);
12870 max_stat = bnxt_get_max_func_stat_ctxs(bp);
12871 max_irq = bnxt_get_max_func_irqs(bp);
12872 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
12873 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
12874 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
12875 return 0;
12876
12877 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
12878 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
12879 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
12880 max_cp = min_t(int, max_cp, max_irq);
12881 max_cp = min_t(int, max_cp, max_stat);
12882 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
12883 if (rc)
12884 rc = 0;
12885 }
12886 return rc;
12887}
12888
58ea801a
MC
12889/* In initial default shared ring setting, each shared ring must have a
12890 * RX/TX ring pair.
12891 */
12892static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
12893{
12894 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
12895 bp->rx_nr_rings = bp->cp_nr_rings;
12896 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
12897 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12898}
12899
702c221c 12900static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
6e6c5a57
MC
12901{
12902 int dflt_rings, max_rx_rings, max_tx_rings, rc;
6e6c5a57 12903
2773dfb2
MC
12904 if (!bnxt_can_reserve_rings(bp))
12905 return 0;
12906
6e6c5a57
MC
12907 if (sh)
12908 bp->flags |= BNXT_FLAG_SHARED_RINGS;
d629522e 12909 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
1d3ef13d
MC
12910 /* Reduce default rings on multi-port cards so that total default
12911 * rings do not exceed CPU count.
12912 */
12913 if (bp->port_count > 1) {
12914 int max_rings =
12915 max_t(int, num_online_cpus() / bp->port_count, 1);
12916
12917 dflt_rings = min_t(int, dflt_rings, max_rings);
12918 }
e4060d30 12919 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
6e6c5a57
MC
12920 if (rc)
12921 return rc;
12922 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
12923 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
58ea801a
MC
12924 if (sh)
12925 bnxt_trim_dflt_sh_rings(bp);
12926 else
12927 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
12928 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
391be5c2 12929
674f50a5 12930 rc = __bnxt_reserve_rings(bp);
391be5c2
MC
12931 if (rc)
12932 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
58ea801a
MC
12933 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12934 if (sh)
12935 bnxt_trim_dflt_sh_rings(bp);
391be5c2 12936
674f50a5
MC
12937 /* Rings may have been trimmed, re-reserve the trimmed rings. */
12938 if (bnxt_need_reserve_rings(bp)) {
12939 rc = __bnxt_reserve_rings(bp);
12940 if (rc)
12941 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
12942 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12943 }
76595193
PS
12944 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
12945 bp->rx_nr_rings++;
12946 bp->cp_nr_rings++;
12947 }
5d765a5e
VV
12948 if (rc) {
12949 bp->tx_nr_rings = 0;
12950 bp->rx_nr_rings = 0;
12951 }
6e6c5a57 12952 return rc;
c0c050c5
MC
12953}
12954
47558acd
MC
12955static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
12956{
12957 int rc;
12958
12959 if (bp->tx_nr_rings)
12960 return 0;
12961
6b95c3e9
MC
12962 bnxt_ulp_irq_stop(bp);
12963 bnxt_clear_int_mode(bp);
47558acd
MC
12964 rc = bnxt_set_dflt_rings(bp, true);
12965 if (rc) {
12966 netdev_err(bp->dev, "Not enough rings available.\n");
6b95c3e9 12967 goto init_dflt_ring_err;
47558acd
MC
12968 }
12969 rc = bnxt_init_int_mode(bp);
12970 if (rc)
6b95c3e9
MC
12971 goto init_dflt_ring_err;
12972
47558acd
MC
12973 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12974 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
12975 bp->flags |= BNXT_FLAG_RFS;
12976 bp->dev->features |= NETIF_F_NTUPLE;
12977 }
6b95c3e9
MC
12978init_dflt_ring_err:
12979 bnxt_ulp_irq_restart(bp, rc);
12980 return rc;
47558acd
MC
12981}
12982
80fcaf46 12983int bnxt_restore_pf_fw_resources(struct bnxt *bp)
7b08f661 12984{
80fcaf46
MC
12985 int rc;
12986
7b08f661
MC
12987 ASSERT_RTNL();
12988 bnxt_hwrm_func_qcaps(bp);
1a037782
VD
12989
12990 if (netif_running(bp->dev))
12991 __bnxt_close_nic(bp, true, false);
12992
ec86f14e 12993 bnxt_ulp_irq_stop(bp);
80fcaf46
MC
12994 bnxt_clear_int_mode(bp);
12995 rc = bnxt_init_int_mode(bp);
ec86f14e 12996 bnxt_ulp_irq_restart(bp, rc);
1a037782
VD
12997
12998 if (netif_running(bp->dev)) {
12999 if (rc)
13000 dev_close(bp->dev);
13001 else
13002 rc = bnxt_open_nic(bp, true, false);
13003 }
13004
80fcaf46 13005 return rc;
7b08f661
MC
13006}
13007
a22a6ac2
MC
13008static int bnxt_init_mac_addr(struct bnxt *bp)
13009{
13010 int rc = 0;
13011
13012 if (BNXT_PF(bp)) {
13013 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
13014 } else {
13015#ifdef CONFIG_BNXT_SRIOV
13016 struct bnxt_vf_info *vf = &bp->vf;
28ea334b 13017 bool strict_approval = true;
a22a6ac2
MC
13018
13019 if (is_valid_ether_addr(vf->mac_addr)) {
91cdda40 13020 /* overwrite netdev dev_addr with admin VF MAC */
a22a6ac2 13021 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
28ea334b
MC
13022 /* Older PF driver or firmware may not approve this
13023 * correctly.
13024 */
13025 strict_approval = false;
a22a6ac2
MC
13026 } else {
13027 eth_hw_addr_random(bp->dev);
a22a6ac2 13028 }
28ea334b 13029 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
a22a6ac2
MC
13030#endif
13031 }
13032 return rc;
13033}
13034
a0d0fd70
VV
13035#define BNXT_VPD_LEN 512
13036static void bnxt_vpd_read_info(struct bnxt *bp)
13037{
13038 struct pci_dev *pdev = bp->pdev;
492adcf4 13039 int i, len, pos, ro_size, size;
a0d0fd70
VV
13040 ssize_t vpd_size;
13041 u8 *vpd_data;
13042
13043 vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL);
13044 if (!vpd_data)
13045 return;
13046
13047 vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data);
13048 if (vpd_size <= 0) {
13049 netdev_err(bp->dev, "Unable to read VPD\n");
13050 goto exit;
13051 }
13052
4cf0abbc 13053 i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
a0d0fd70
VV
13054 if (i < 0) {
13055 netdev_err(bp->dev, "VPD READ-Only not found\n");
13056 goto exit;
13057 }
13058
13059 ro_size = pci_vpd_lrdt_size(&vpd_data[i]);
13060 i += PCI_VPD_LRDT_TAG_SIZE;
13061 if (i + ro_size > vpd_size)
13062 goto exit;
13063
13064 pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
13065 PCI_VPD_RO_KEYWORD_PARTNO);
13066 if (pos < 0)
13067 goto read_sn;
13068
13069 len = pci_vpd_info_field_size(&vpd_data[pos]);
13070 pos += PCI_VPD_INFO_FLD_HDR_SIZE;
13071 if (len + pos > vpd_size)
13072 goto read_sn;
13073
492adcf4
VV
13074 size = min(len, BNXT_VPD_FLD_LEN - 1);
13075 memcpy(bp->board_partno, &vpd_data[pos], size);
a0d0fd70
VV
13076
13077read_sn:
13078 pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
13079 PCI_VPD_RO_KEYWORD_SERIALNO);
13080 if (pos < 0)
13081 goto exit;
13082
13083 len = pci_vpd_info_field_size(&vpd_data[pos]);
13084 pos += PCI_VPD_INFO_FLD_HDR_SIZE;
13085 if (len + pos > vpd_size)
13086 goto exit;
13087
492adcf4
VV
13088 size = min(len, BNXT_VPD_FLD_LEN - 1);
13089 memcpy(bp->board_serialno, &vpd_data[pos], size);
a0d0fd70
VV
13090exit:
13091 kfree(vpd_data);
13092}
13093
03213a99
JP
13094static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
13095{
13096 struct pci_dev *pdev = bp->pdev;
8d85b75b 13097 u64 qword;
03213a99 13098
8d85b75b
JK
13099 qword = pci_get_dsn(pdev);
13100 if (!qword) {
13101 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
03213a99
JP
13102 return -EOPNOTSUPP;
13103 }
13104
8d85b75b
JK
13105 put_unaligned_le64(qword, dsn);
13106
d061b241 13107 bp->flags |= BNXT_FLAG_DSN_VALID;
03213a99
JP
13108 return 0;
13109}
13110
8ae24738
MC
13111static int bnxt_map_db_bar(struct bnxt *bp)
13112{
13113 if (!bp->db_size)
13114 return -ENODEV;
13115 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
13116 if (!bp->bar1)
13117 return -ENOMEM;
13118 return 0;
13119}
13120
c0c050c5
MC
13121static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
13122{
c0c050c5
MC
13123 struct net_device *dev;
13124 struct bnxt *bp;
6e6c5a57 13125 int rc, max_irqs;
c0c050c5 13126
4e00338a 13127 if (pci_is_bridge(pdev))
fa853dda
PS
13128 return -ENODEV;
13129
8743db4a
VV
13130 /* Clear any pending DMA transactions from crash kernel
13131 * while loading driver in capture kernel.
13132 */
13133 if (is_kdump_kernel()) {
13134 pci_clear_master(pdev);
13135 pcie_flr(pdev);
13136 }
13137
c0c050c5
MC
13138 max_irqs = bnxt_get_max_irq(pdev);
13139 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
13140 if (!dev)
13141 return -ENOMEM;
13142
13143 bp = netdev_priv(dev);
8fb35cd3 13144 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
9c1fabdf 13145 bnxt_set_max_func_irqs(bp, max_irqs);
c0c050c5
MC
13146
13147 if (bnxt_vf_pciid(ent->driver_data))
13148 bp->flags |= BNXT_FLAG_VF;
13149
2bcfa6f6 13150 if (pdev->msix_cap)
c0c050c5 13151 bp->flags |= BNXT_FLAG_MSIX_CAP;
c0c050c5
MC
13152
13153 rc = bnxt_init_board(pdev, dev);
13154 if (rc < 0)
13155 goto init_err_free;
13156
13157 dev->netdev_ops = &bnxt_netdev_ops;
13158 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
13159 dev->ethtool_ops = &bnxt_ethtool_ops;
c0c050c5
MC
13160 pci_set_drvdata(pdev, dev);
13161
3e8060fa
PS
13162 rc = bnxt_alloc_hwrm_resources(bp);
13163 if (rc)
17086399 13164 goto init_err_pci_clean;
3e8060fa
PS
13165
13166 mutex_init(&bp->hwrm_cmd_lock);
ba642ab7 13167 mutex_init(&bp->link_lock);
7c380918
MC
13168
13169 rc = bnxt_fw_init_one_p1(bp);
3e8060fa 13170 if (rc)
17086399 13171 goto init_err_pci_clean;
3e8060fa 13172
3e3c09b0
VV
13173 if (BNXT_PF(bp))
13174 bnxt_vpd_read_info(bp);
13175
9d6b648c 13176 if (BNXT_CHIP_P5(bp)) {
e38287b7 13177 bp->flags |= BNXT_FLAG_CHIP_P5;
9d6b648c
MC
13178 if (BNXT_CHIP_SR2(bp))
13179 bp->flags |= BNXT_FLAG_CHIP_SR2;
13180 }
e38287b7 13181
5fa65524
EP
13182 rc = bnxt_alloc_rss_indir_tbl(bp);
13183 if (rc)
13184 goto init_err_pci_clean;
13185
7c380918 13186 rc = bnxt_fw_init_one_p2(bp);
3c2217a6
MC
13187 if (rc)
13188 goto init_err_pci_clean;
13189
8ae24738
MC
13190 rc = bnxt_map_db_bar(bp);
13191 if (rc) {
13192 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
13193 rc);
13194 goto init_err_pci_clean;
13195 }
13196
c0c050c5
MC
13197 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13198 NETIF_F_TSO | NETIF_F_TSO6 |
13199 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
7e13318d 13200 NETIF_F_GSO_IPXIP4 |
152971ee
AD
13201 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13202 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
3e8060fa
PS
13203 NETIF_F_RXCSUM | NETIF_F_GRO;
13204
e38287b7 13205 if (BNXT_SUPPORTS_TPA(bp))
3e8060fa 13206 dev->hw_features |= NETIF_F_LRO;
c0c050c5 13207
c0c050c5
MC
13208 dev->hw_enc_features =
13209 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13210 NETIF_F_TSO | NETIF_F_TSO6 |
13211 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
152971ee 13212 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
7e13318d 13213 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
442a35a5
JK
13214 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
13215
152971ee
AD
13216 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
13217 NETIF_F_GSO_GRE_CSUM;
c0c050c5 13218 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
1da63ddd
EP
13219 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
13220 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13221 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
13222 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
e38287b7 13223 if (BNXT_SUPPORTS_TPA(bp))
1054aee8 13224 dev->hw_features |= NETIF_F_GRO_HW;
c0c050c5 13225 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
1054aee8
MC
13226 if (dev->features & NETIF_F_GRO_HW)
13227 dev->features &= ~NETIF_F_LRO;
c0c050c5
MC
13228 dev->priv_flags |= IFF_UNICAST_FLT;
13229
13230#ifdef CONFIG_BNXT_SRIOV
13231 init_waitqueue_head(&bp->sriov_cfg_wait);
4ab0c6a8 13232 mutex_init(&bp->sriov_lock);
c0c050c5 13233#endif
e38287b7
MC
13234 if (BNXT_SUPPORTS_TPA(bp)) {
13235 bp->gro_func = bnxt_gro_func_5730x;
67912c36 13236 if (BNXT_CHIP_P4(bp))
e38287b7 13237 bp->gro_func = bnxt_gro_func_5731x;
67912c36
MC
13238 else if (BNXT_CHIP_P5(bp))
13239 bp->gro_func = bnxt_gro_func_5750x;
e38287b7
MC
13240 }
13241 if (!BNXT_CHIP_P4_PLUS(bp))
434c975a 13242 bp->flags |= BNXT_FLAG_DOUBLE_DB;
309369c9 13243
a22a6ac2
MC
13244 rc = bnxt_init_mac_addr(bp);
13245 if (rc) {
13246 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
13247 rc = -EADDRNOTAVAIL;
13248 goto init_err_pci_clean;
13249 }
c0c050c5 13250
2e9217d1
VV
13251 if (BNXT_PF(bp)) {
13252 /* Read the adapter's DSN to use as the eswitch switch_id */
b014232f 13253 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
2e9217d1 13254 }
567b2abe 13255
7eb9bb3a
MC
13256 /* MTU range: 60 - FW defined max */
13257 dev->min_mtu = ETH_ZLEN;
13258 dev->max_mtu = bp->max_mtu;
13259
ba642ab7 13260 rc = bnxt_probe_phy(bp, true);
d5430d31
MC
13261 if (rc)
13262 goto init_err_pci_clean;
13263
c61fb99c 13264 bnxt_set_rx_skb_mode(bp, false);
c0c050c5
MC
13265 bnxt_set_tpa_flags(bp);
13266 bnxt_set_ring_params(bp);
702c221c 13267 rc = bnxt_set_dflt_rings(bp, true);
bdbd1eb5
MC
13268 if (rc) {
13269 netdev_err(bp->dev, "Not enough rings available.\n");
13270 rc = -ENOMEM;
17086399 13271 goto init_err_pci_clean;
bdbd1eb5 13272 }
c0c050c5 13273
ba642ab7 13274 bnxt_fw_init_one_p3(bp);
2bcfa6f6 13275
a196e96b 13276 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
c0c050c5
MC
13277 bp->flags |= BNXT_FLAG_STRIP_VLAN;
13278
7809592d 13279 rc = bnxt_init_int_mode(bp);
c0c050c5 13280 if (rc)
17086399 13281 goto init_err_pci_clean;
c0c050c5 13282
832aed16
MC
13283 /* No TC has been set yet and rings may have been trimmed due to
13284 * limited MSIX, so we re-initialize the TX rings per TC.
13285 */
13286 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13287
c213eae8
MC
13288 if (BNXT_PF(bp)) {
13289 if (!bnxt_pf_wq) {
13290 bnxt_pf_wq =
13291 create_singlethread_workqueue("bnxt_pf_wq");
13292 if (!bnxt_pf_wq) {
13293 dev_err(&pdev->dev, "Unable to create workqueue.\n");
b5f796b6 13294 rc = -ENOMEM;
c213eae8
MC
13295 goto init_err_pci_clean;
13296 }
13297 }
18c7015c
JK
13298 rc = bnxt_init_tc(bp);
13299 if (rc)
13300 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
13301 rc);
c213eae8 13302 }
2ae7408f 13303
190eda1a 13304 bnxt_inv_fw_health_reg(bp);
cda2cab0
VV
13305 bnxt_dl_register(bp);
13306
7809592d
MC
13307 rc = register_netdev(dev);
13308 if (rc)
cda2cab0 13309 goto init_err_cleanup;
7809592d 13310
cda2cab0
VV
13311 if (BNXT_PF(bp))
13312 devlink_port_type_eth_set(&bp->dl_port, bp->dev);
7e334fc8 13313 bnxt_dl_fw_reporters_create(bp);
4ab0c6a8 13314
c0c050c5
MC
13315 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
13316 board_info[ent->driver_data].name,
13317 (long)pci_resource_start(pdev, 0), dev->dev_addr);
af125b75 13318 pcie_print_link_status(pdev);
90c4f788 13319
df3875ec 13320 pci_save_state(pdev);
c0c050c5
MC
13321 return 0;
13322
cda2cab0
VV
13323init_err_cleanup:
13324 bnxt_dl_unregister(bp);
2ae7408f 13325 bnxt_shutdown_tc(bp);
7809592d
MC
13326 bnxt_clear_int_mode(bp);
13327
17086399 13328init_err_pci_clean:
bdb38602 13329 bnxt_hwrm_func_drv_unrgtr(bp);
f9099d61 13330 bnxt_free_hwrm_short_cmd_req(bp);
a2bf74f4 13331 bnxt_free_hwrm_resources(bp);
03400aaa 13332 bnxt_ethtool_free(bp);
ae5c42f0
MC
13333 kfree(bp->ptp_cfg);
13334 bp->ptp_cfg = NULL;
07f83d72
MC
13335 kfree(bp->fw_health);
13336 bp->fw_health = NULL;
17086399 13337 bnxt_cleanup_pci(bp);
62bfb932
MC
13338 bnxt_free_ctx_mem(bp);
13339 kfree(bp->ctx);
13340 bp->ctx = NULL;
1667cbf6
MC
13341 kfree(bp->rss_indir_tbl);
13342 bp->rss_indir_tbl = NULL;
c0c050c5
MC
13343
13344init_err_free:
13345 free_netdev(dev);
13346 return rc;
13347}
13348
d196ece7
MC
13349static void bnxt_shutdown(struct pci_dev *pdev)
13350{
13351 struct net_device *dev = pci_get_drvdata(pdev);
13352 struct bnxt *bp;
13353
13354 if (!dev)
13355 return;
13356
13357 rtnl_lock();
13358 bp = netdev_priv(dev);
13359 if (!bp)
13360 goto shutdown_exit;
13361
13362 if (netif_running(dev))
13363 dev_close(dev);
13364
a7f3f939 13365 bnxt_ulp_shutdown(bp);
5567ae4a
VV
13366 bnxt_clear_int_mode(bp);
13367 pci_disable_device(pdev);
a7f3f939 13368
d196ece7 13369 if (system_state == SYSTEM_POWER_OFF) {
d196ece7
MC
13370 pci_wake_from_d3(pdev, bp->wol);
13371 pci_set_power_state(pdev, PCI_D3hot);
13372 }
13373
13374shutdown_exit:
13375 rtnl_unlock();
13376}
13377
f65a2044
MC
13378#ifdef CONFIG_PM_SLEEP
13379static int bnxt_suspend(struct device *device)
13380{
f521eaa9 13381 struct net_device *dev = dev_get_drvdata(device);
f65a2044
MC
13382 struct bnxt *bp = netdev_priv(dev);
13383 int rc = 0;
13384
13385 rtnl_lock();
6a68749d 13386 bnxt_ulp_stop(bp);
f65a2044
MC
13387 if (netif_running(dev)) {
13388 netif_device_detach(dev);
13389 rc = bnxt_close(dev);
13390 }
13391 bnxt_hwrm_func_drv_unrgtr(bp);
ef02af8c 13392 pci_disable_device(bp->pdev);
f9b69d7f
VV
13393 bnxt_free_ctx_mem(bp);
13394 kfree(bp->ctx);
13395 bp->ctx = NULL;
f65a2044
MC
13396 rtnl_unlock();
13397 return rc;
13398}
13399
13400static int bnxt_resume(struct device *device)
13401{
f521eaa9 13402 struct net_device *dev = dev_get_drvdata(device);
f65a2044
MC
13403 struct bnxt *bp = netdev_priv(dev);
13404 int rc = 0;
13405
13406 rtnl_lock();
ef02af8c
MC
13407 rc = pci_enable_device(bp->pdev);
13408 if (rc) {
13409 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
13410 rc);
13411 goto resume_exit;
13412 }
13413 pci_set_master(bp->pdev);
f92335d8 13414 if (bnxt_hwrm_ver_get(bp)) {
f65a2044
MC
13415 rc = -ENODEV;
13416 goto resume_exit;
13417 }
13418 rc = bnxt_hwrm_func_reset(bp);
13419 if (rc) {
13420 rc = -EBUSY;
13421 goto resume_exit;
13422 }
f92335d8 13423
2084ccf6
MC
13424 rc = bnxt_hwrm_func_qcaps(bp);
13425 if (rc)
f9b69d7f 13426 goto resume_exit;
f92335d8
VV
13427
13428 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
13429 rc = -ENODEV;
13430 goto resume_exit;
13431 }
13432
f65a2044
MC
13433 bnxt_get_wol_settings(bp);
13434 if (netif_running(dev)) {
13435 rc = bnxt_open(dev);
13436 if (!rc)
13437 netif_device_attach(dev);
13438 }
13439
13440resume_exit:
6a68749d 13441 bnxt_ulp_start(bp, rc);
59ae2101
MC
13442 if (!rc)
13443 bnxt_reenable_sriov(bp);
f65a2044
MC
13444 rtnl_unlock();
13445 return rc;
13446}
13447
13448static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
13449#define BNXT_PM_OPS (&bnxt_pm_ops)
13450
13451#else
13452
13453#define BNXT_PM_OPS NULL
13454
13455#endif /* CONFIG_PM_SLEEP */
13456
6316ea6d
SB
13457/**
13458 * bnxt_io_error_detected - called when PCI error is detected
13459 * @pdev: Pointer to PCI device
13460 * @state: The current pci connection state
13461 *
13462 * This function is called after a PCI bus error affecting
13463 * this device has been detected.
13464 */
13465static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
13466 pci_channel_state_t state)
13467{
13468 struct net_device *netdev = pci_get_drvdata(pdev);
a588e458 13469 struct bnxt *bp = netdev_priv(netdev);
6316ea6d
SB
13470
13471 netdev_info(netdev, "PCI I/O error detected\n");
13472
13473 rtnl_lock();
13474 netif_device_detach(netdev);
13475
a588e458
MC
13476 bnxt_ulp_stop(bp);
13477
6316ea6d
SB
13478 if (state == pci_channel_io_perm_failure) {
13479 rtnl_unlock();
13480 return PCI_ERS_RESULT_DISCONNECT;
13481 }
13482
f75d9a0a
VV
13483 if (state == pci_channel_io_frozen)
13484 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
13485
6316ea6d
SB
13486 if (netif_running(netdev))
13487 bnxt_close(netdev);
13488
c81cfb62
KA
13489 if (pci_is_enabled(pdev))
13490 pci_disable_device(pdev);
6e2f8388
MC
13491 bnxt_free_ctx_mem(bp);
13492 kfree(bp->ctx);
13493 bp->ctx = NULL;
6316ea6d
SB
13494 rtnl_unlock();
13495
13496 /* Request a slot slot reset. */
13497 return PCI_ERS_RESULT_NEED_RESET;
13498}
13499
13500/**
13501 * bnxt_io_slot_reset - called after the pci bus has been reset.
13502 * @pdev: Pointer to PCI device
13503 *
13504 * Restart the card from scratch, as if from a cold-boot.
13505 * At this point, the card has exprienced a hard reset,
13506 * followed by fixups by BIOS, and has its config space
13507 * set up identically to what it was at cold boot.
13508 */
13509static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
13510{
fb1e6e56 13511 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
6316ea6d
SB
13512 struct net_device *netdev = pci_get_drvdata(pdev);
13513 struct bnxt *bp = netdev_priv(netdev);
f75d9a0a 13514 int err = 0, off;
6316ea6d
SB
13515
13516 netdev_info(bp->dev, "PCI Slot Reset\n");
13517
13518 rtnl_lock();
13519
13520 if (pci_enable_device(pdev)) {
13521 dev_err(&pdev->dev,
13522 "Cannot re-enable PCI device after reset.\n");
13523 } else {
13524 pci_set_master(pdev);
f75d9a0a
VV
13525 /* Upon fatal error, our device internal logic that latches to
13526 * BAR value is getting reset and will restore only upon
13527 * rewritting the BARs.
13528 *
13529 * As pci_restore_state() does not re-write the BARs if the
13530 * value is same as saved value earlier, driver needs to
13531 * write the BARs to 0 to force restore, in case of fatal error.
13532 */
13533 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
13534 &bp->state)) {
13535 for (off = PCI_BASE_ADDRESS_0;
13536 off <= PCI_BASE_ADDRESS_5; off += 4)
13537 pci_write_config_dword(bp->pdev, off, 0);
13538 }
df3875ec
VV
13539 pci_restore_state(pdev);
13540 pci_save_state(pdev);
6316ea6d 13541
aa8ed021 13542 err = bnxt_hwrm_func_reset(bp);
fb1e6e56 13543 if (!err)
6e2f8388 13544 result = PCI_ERS_RESULT_RECOVERED;
bae361c5 13545 }
6316ea6d
SB
13546
13547 rtnl_unlock();
13548
bae361c5 13549 return result;
6316ea6d
SB
13550}
13551
13552/**
13553 * bnxt_io_resume - called when traffic can start flowing again.
13554 * @pdev: Pointer to PCI device
13555 *
13556 * This callback is called when the error recovery driver tells
13557 * us that its OK to resume normal operation.
13558 */
13559static void bnxt_io_resume(struct pci_dev *pdev)
13560{
13561 struct net_device *netdev = pci_get_drvdata(pdev);
fb1e6e56
VV
13562 struct bnxt *bp = netdev_priv(netdev);
13563 int err;
6316ea6d 13564
fb1e6e56 13565 netdev_info(bp->dev, "PCI Slot Resume\n");
6316ea6d
SB
13566 rtnl_lock();
13567
fb1e6e56
VV
13568 err = bnxt_hwrm_func_qcaps(bp);
13569 if (!err && netif_running(netdev))
13570 err = bnxt_open(netdev);
13571
13572 bnxt_ulp_start(bp, err);
13573 if (!err) {
13574 bnxt_reenable_sriov(bp);
13575 netif_device_attach(netdev);
13576 }
6316ea6d
SB
13577
13578 rtnl_unlock();
13579}
13580
13581static const struct pci_error_handlers bnxt_err_handler = {
13582 .error_detected = bnxt_io_error_detected,
13583 .slot_reset = bnxt_io_slot_reset,
13584 .resume = bnxt_io_resume
13585};
13586
c0c050c5
MC
13587static struct pci_driver bnxt_pci_driver = {
13588 .name = DRV_MODULE_NAME,
13589 .id_table = bnxt_pci_tbl,
13590 .probe = bnxt_init_one,
13591 .remove = bnxt_remove_one,
d196ece7 13592 .shutdown = bnxt_shutdown,
f65a2044 13593 .driver.pm = BNXT_PM_OPS,
6316ea6d 13594 .err_handler = &bnxt_err_handler,
c0c050c5
MC
13595#if defined(CONFIG_BNXT_SRIOV)
13596 .sriov_configure = bnxt_sriov_configure,
13597#endif
13598};
13599
c213eae8
MC
13600static int __init bnxt_init(void)
13601{
cabfb09d 13602 bnxt_debug_init();
c213eae8
MC
13603 return pci_register_driver(&bnxt_pci_driver);
13604}
13605
13606static void __exit bnxt_exit(void)
13607{
13608 pci_unregister_driver(&bnxt_pci_driver);
13609 if (bnxt_pf_wq)
13610 destroy_workqueue(bnxt_pf_wq);
cabfb09d 13611 bnxt_debug_exit();
c213eae8
MC
13612}
13613
13614module_init(bnxt_init);
13615module_exit(bnxt_exit);