]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/broadcom/bnxt/bnxt.c
bnxt_en: Move bnxt_ptp_init() from bnxt_open() back to bnxt_init_one()
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
CommitLineData
c0c050c5
MC
1/* Broadcom NetXtreme-C/E network driver.
2 *
11f15ed3 3 * Copyright (c) 2014-2016 Broadcom Corporation
c6cc32a2 4 * Copyright (c) 2016-2019 Broadcom Limited
c0c050c5
MC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12
13#include <linux/stringify.h>
14#include <linux/kernel.h>
15#include <linux/timer.h>
16#include <linux/errno.h>
17#include <linux/ioport.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/interrupt.h>
21#include <linux/pci.h>
22#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/skbuff.h>
25#include <linux/dma-mapping.h>
26#include <linux/bitops.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29#include <linux/delay.h>
30#include <asm/byteorder.h>
31#include <asm/page.h>
32#include <linux/time.h>
33#include <linux/mii.h>
0ca12be9 34#include <linux/mdio.h>
c0c050c5
MC
35#include <linux/if.h>
36#include <linux/if_vlan.h>
32e8239c 37#include <linux/if_bridge.h>
5ac67d8b 38#include <linux/rtc.h>
c6d30e83 39#include <linux/bpf.h>
c0c050c5
MC
40#include <net/ip.h>
41#include <net/tcp.h>
42#include <net/udp.h>
43#include <net/checksum.h>
44#include <net/ip6_checksum.h>
ad51b8e9 45#include <net/udp_tunnel.h>
c0c050c5
MC
46#include <linux/workqueue.h>
47#include <linux/prefetch.h>
48#include <linux/cache.h>
49#include <linux/log2.h>
50#include <linux/aer.h>
51#include <linux/bitmap.h>
ae5c42f0
MC
52#include <linux/ptp_clock_kernel.h>
53#include <linux/timecounter.h>
c0c050c5 54#include <linux/cpu_rmap.h>
56f0fd80 55#include <linux/cpumask.h>
2ae7408f 56#include <net/pkt_cls.h>
cde49a42
VV
57#include <linux/hwmon.h>
58#include <linux/hwmon-sysfs.h>
322b87ca 59#include <net/page_pool.h>
c0c050c5
MC
60
61#include "bnxt_hsi.h"
62#include "bnxt.h"
a588e458 63#include "bnxt_ulp.h"
c0c050c5
MC
64#include "bnxt_sriov.h"
65#include "bnxt_ethtool.h"
7df4ae9f 66#include "bnxt_dcb.h"
c6d30e83 67#include "bnxt_xdp.h"
ae5c42f0 68#include "bnxt_ptp.h"
4ab0c6a8 69#include "bnxt_vfr.h"
2ae7408f 70#include "bnxt_tc.h"
3c467bf3 71#include "bnxt_devlink.h"
cabfb09d 72#include "bnxt_debugfs.h"
c0c050c5
MC
73
74#define BNXT_TX_TIMEOUT (5 * HZ)
8fb35cd3 75#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW)
c0c050c5 76
c0c050c5
MC
77MODULE_LICENSE("GPL");
78MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
c0c050c5
MC
79
80#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
81#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
82#define BNXT_RX_COPY_THRESH 256
83
4419dbe6 84#define BNXT_TX_PUSH_THRESH 164
c0c050c5
MC
85
86enum board_idx {
fbc9a523 87 BCM57301,
c0c050c5
MC
88 BCM57302,
89 BCM57304,
1f681688 90 BCM57417_NPAR,
fa853dda 91 BCM58700,
b24eb6ae
MC
92 BCM57311,
93 BCM57312,
fbc9a523 94 BCM57402,
c0c050c5
MC
95 BCM57404,
96 BCM57406,
1f681688
MC
97 BCM57402_NPAR,
98 BCM57407,
b24eb6ae
MC
99 BCM57412,
100 BCM57414,
101 BCM57416,
102 BCM57417,
1f681688 103 BCM57412_NPAR,
5049e33b 104 BCM57314,
1f681688
MC
105 BCM57417_SFP,
106 BCM57416_SFP,
107 BCM57404_NPAR,
108 BCM57406_NPAR,
109 BCM57407_SFP,
adbc8305 110 BCM57407_NPAR,
1f681688
MC
111 BCM57414_NPAR,
112 BCM57416_NPAR,
32b40798
DK
113 BCM57452,
114 BCM57454,
92abef36 115 BCM5745x_NPAR,
1ab968d2 116 BCM57508,
c6cc32a2 117 BCM57504,
51fec80d 118 BCM57502,
49c98421
MC
119 BCM57508_NPAR,
120 BCM57504_NPAR,
121 BCM57502_NPAR,
4a58139b 122 BCM58802,
8ed693b7 123 BCM58804,
4a58139b 124 BCM58808,
adbc8305
MC
125 NETXTREME_E_VF,
126 NETXTREME_C_VF,
618784e3 127 NETXTREME_S_VF,
7fbf359b
MC
128 NETXTREME_C_VF_HV,
129 NETXTREME_E_VF_HV,
b16b6891 130 NETXTREME_E_P5_VF,
7fbf359b 131 NETXTREME_E_P5_VF_HV,
c0c050c5
MC
132};
133
134/* indexed by enum above */
135static const struct {
136 char *name;
137} board_info[] = {
27573a7d
SB
138 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
139 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
140 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
141 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
142 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
143 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
144 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
145 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
146 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
147 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
148 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
149 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
150 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
151 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
152 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
153 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
154 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
155 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
156 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
157 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
158 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
159 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
160 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
161 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
162 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
163 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
164 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
165 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
92abef36 166 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
1ab968d2 167 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
c6cc32a2 168 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
51fec80d 169 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
49c98421
MC
170 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
171 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
172 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
27573a7d 173 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
8ed693b7 174 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
27573a7d
SB
175 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
176 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
177 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
618784e3 178 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
7fbf359b
MC
179 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
180 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
b16b6891 181 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
7fbf359b 182 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
c0c050c5
MC
183};
184
185static const struct pci_device_id bnxt_pci_tbl[] = {
92abef36
VV
186 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
187 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
4a58139b 188 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
adbc8305 189 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
fbc9a523 190 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
c0c050c5
MC
191 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
192 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
1f681688 193 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
fa853dda 194 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
b24eb6ae
MC
195 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
196 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
fbc9a523 197 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
c0c050c5
MC
198 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
199 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
1f681688
MC
200 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
201 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
b24eb6ae
MC
202 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
203 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
204 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
205 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
1f681688 206 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
5049e33b 207 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
1f681688
MC
208 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
209 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
210 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
211 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
212 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
adbc8305
MC
213 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
214 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
1f681688 215 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
adbc8305 216 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
1f681688 217 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
adbc8305 218 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
4a58139b 219 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
32b40798 220 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
1ab968d2 221 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
c6cc32a2 222 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
51fec80d 223 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
49c98421
MC
224 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
225 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
226 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
227 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
228 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
229 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
4a58139b 230 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
8ed693b7 231 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
c0c050c5 232#ifdef CONFIG_BNXT_SRIOV
c7ef35eb 233 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
7fbf359b
MC
234 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
235 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
c7ef35eb 236 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
7fbf359b 237 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
adbc8305 238 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
7fbf359b
MC
239 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
240 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
241 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
242 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
adbc8305
MC
243 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
244 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
245 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
246 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
247 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
7fbf359b 248 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
51fec80d 249 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
b16b6891 250 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
7fbf359b
MC
251 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
252 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
618784e3 253 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
c0c050c5
MC
254#endif
255 { 0 }
256};
257
258MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
259
260static const u16 bnxt_vf_req_snif[] = {
261 HWRM_FUNC_CFG,
91cdda40 262 HWRM_FUNC_VF_CFG,
c0c050c5
MC
263 HWRM_PORT_PHY_QCFG,
264 HWRM_CFA_L2_FILTER_ALLOC,
265};
266
25be8623 267static const u16 bnxt_async_events_arr[] = {
87c374de 268 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
b1613e78 269 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
87c374de
MC
270 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
271 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
272 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
273 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
b1613e78 274 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
2151fe08 275 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
7e914027 276 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
a44daa8f 277 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
8d4bd96b 278 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
df97b34d 279 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
25be8623
MC
280};
281
c213eae8
MC
282static struct workqueue_struct *bnxt_pf_wq;
283
c0c050c5
MC
284static bool bnxt_vf_pciid(enum board_idx idx)
285{
618784e3 286 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
7fbf359b 287 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
ab21494b
AG
288 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
289 idx == NETXTREME_E_P5_VF_HV);
c0c050c5
MC
290}
291
292#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
293#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
294#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
295
c0c050c5
MC
296#define BNXT_CP_DB_IRQ_DIS(db) \
297 writel(DB_CP_IRQ_DIS_FLAGS, db)
298
697197e5
MC
299#define BNXT_DB_CQ(db, idx) \
300 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
301
302#define BNXT_DB_NQ_P5(db, idx) \
303 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
304
305#define BNXT_DB_CQ_ARM(db, idx) \
306 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
307
308#define BNXT_DB_NQ_ARM_P5(db, idx) \
309 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
310
311static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
312{
313 if (bp->flags & BNXT_FLAG_CHIP_P5)
314 BNXT_DB_NQ_P5(db, idx);
315 else
316 BNXT_DB_CQ(db, idx);
317}
318
319static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
320{
321 if (bp->flags & BNXT_FLAG_CHIP_P5)
322 BNXT_DB_NQ_ARM_P5(db, idx);
323 else
324 BNXT_DB_CQ_ARM(db, idx);
325}
326
327static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
328{
329 if (bp->flags & BNXT_FLAG_CHIP_P5)
330 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
331 db->doorbell);
332 else
333 BNXT_DB_CQ(db, idx);
334}
335
38413406 336const u16 bnxt_lhint_arr[] = {
c0c050c5
MC
337 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
338 TX_BD_FLAGS_LHINT_512_TO_1023,
339 TX_BD_FLAGS_LHINT_1024_TO_2047,
340 TX_BD_FLAGS_LHINT_1024_TO_2047,
341 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
342 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
343 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
344 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
345 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
346 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
347 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
348 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
349 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
350 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
351 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
352 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
353 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
354 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
355 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
356};
357
ee5c7fb3
SP
358static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
359{
360 struct metadata_dst *md_dst = skb_metadata_dst(skb);
361
362 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
363 return 0;
364
365 return md_dst->u.port_info.port_id;
366}
367
c0c050c5
MC
368static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
369{
370 struct bnxt *bp = netdev_priv(dev);
371 struct tx_bd *txbd;
372 struct tx_bd_ext *txbd1;
373 struct netdev_queue *txq;
374 int i;
375 dma_addr_t mapping;
376 unsigned int length, pad = 0;
377 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
378 u16 prod, last_frag;
379 struct pci_dev *pdev = bp->pdev;
c0c050c5
MC
380 struct bnxt_tx_ring_info *txr;
381 struct bnxt_sw_tx_bd *tx_buf;
dade5e15 382 __le32 lflags = 0;
c0c050c5
MC
383
384 i = skb_get_queue_mapping(skb);
385 if (unlikely(i >= bp->tx_nr_rings)) {
386 dev_kfree_skb_any(skb);
387 return NETDEV_TX_OK;
388 }
389
c0c050c5 390 txq = netdev_get_tx_queue(dev, i);
a960dec9 391 txr = &bp->tx_ring[bp->tx_ring_map[i]];
c0c050c5
MC
392 prod = txr->tx_prod;
393
394 free_size = bnxt_tx_avail(bp, txr);
395 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
396 netif_tx_stop_queue(txq);
397 return NETDEV_TX_BUSY;
398 }
399
400 length = skb->len;
401 len = skb_headlen(skb);
402 last_frag = skb_shinfo(skb)->nr_frags;
403
404 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
405
406 txbd->tx_bd_opaque = prod;
407
408 tx_buf = &txr->tx_buf_ring[prod];
409 tx_buf->skb = skb;
410 tx_buf->nr_frags = last_frag;
411
412 vlan_tag_flags = 0;
ee5c7fb3 413 cfa_action = bnxt_xmit_get_cfa_action(skb);
c0c050c5
MC
414 if (skb_vlan_tag_present(skb)) {
415 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
416 skb_vlan_tag_get(skb);
417 /* Currently supports 8021Q, 8021AD vlan offloads
418 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
419 */
420 if (skb->vlan_proto == htons(ETH_P_8021Q))
421 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
422 }
423
83bb623c
PC
424 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
425 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
426
427 if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
428 atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
429 if (!bnxt_ptp_parse(skb, &ptp->tx_seqid)) {
430 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
431 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
432 } else {
433 atomic_inc(&bp->ptp_cfg->tx_avail);
434 }
435 }
dade5e15
MC
436 }
437
83bb623c
PC
438 if (unlikely(skb->no_fcs))
439 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
440
441 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
442 !lflags) {
4419dbe6
MC
443 struct tx_push_buffer *tx_push_buf = txr->tx_push;
444 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
445 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
697197e5 446 void __iomem *db = txr->tx_db.doorbell;
4419dbe6
MC
447 void *pdata = tx_push_buf->data;
448 u64 *end;
449 int j, push_len;
c0c050c5
MC
450
451 /* Set COAL_NOW to be ready quickly for the next push */
452 tx_push->tx_bd_len_flags_type =
453 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
454 TX_BD_TYPE_LONG_TX_BD |
455 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
456 TX_BD_FLAGS_COAL_NOW |
457 TX_BD_FLAGS_PACKET_END |
458 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
459
460 if (skb->ip_summed == CHECKSUM_PARTIAL)
461 tx_push1->tx_bd_hsize_lflags =
462 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
463 else
464 tx_push1->tx_bd_hsize_lflags = 0;
465
466 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
ee5c7fb3
SP
467 tx_push1->tx_bd_cfa_action =
468 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
c0c050c5 469
fbb0fa8b
MC
470 end = pdata + length;
471 end = PTR_ALIGN(end, 8) - 1;
4419dbe6
MC
472 *end = 0;
473
c0c050c5
MC
474 skb_copy_from_linear_data(skb, pdata, len);
475 pdata += len;
476 for (j = 0; j < last_frag; j++) {
477 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
478 void *fptr;
479
480 fptr = skb_frag_address_safe(frag);
481 if (!fptr)
482 goto normal_tx;
483
484 memcpy(pdata, fptr, skb_frag_size(frag));
485 pdata += skb_frag_size(frag);
486 }
487
4419dbe6
MC
488 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
489 txbd->tx_bd_haddr = txr->data_mapping;
c0c050c5
MC
490 prod = NEXT_TX(prod);
491 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
492 memcpy(txbd, tx_push1, sizeof(*txbd));
493 prod = NEXT_TX(prod);
4419dbe6 494 tx_push->doorbell =
c0c050c5
MC
495 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
496 txr->tx_prod = prod;
497
b9a8460a 498 tx_buf->is_push = 1;
c0c050c5 499 netdev_tx_sent_queue(txq, skb->len);
b9a8460a 500 wmb(); /* Sync is_push and byte queue before pushing data */
c0c050c5 501
4419dbe6
MC
502 push_len = (length + sizeof(*tx_push) + 7) / 8;
503 if (push_len > 16) {
697197e5
MC
504 __iowrite64_copy(db, tx_push_buf, 16);
505 __iowrite32_copy(db + 4, tx_push_buf + 1,
9d13744b 506 (push_len - 16) << 1);
4419dbe6 507 } else {
697197e5 508 __iowrite64_copy(db, tx_push_buf, push_len);
4419dbe6 509 }
c0c050c5 510
c0c050c5
MC
511 goto tx_done;
512 }
513
514normal_tx:
515 if (length < BNXT_MIN_PKT_SIZE) {
516 pad = BNXT_MIN_PKT_SIZE - length;
517 if (skb_pad(skb, pad)) {
518 /* SKB already freed. */
519 tx_buf->skb = NULL;
520 return NETDEV_TX_OK;
521 }
522 length = BNXT_MIN_PKT_SIZE;
523 }
524
525 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
526
527 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
528 dev_kfree_skb_any(skb);
529 tx_buf->skb = NULL;
530 return NETDEV_TX_OK;
531 }
532
533 dma_unmap_addr_set(tx_buf, mapping, mapping);
534 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
535 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
536
537 txbd->tx_bd_haddr = cpu_to_le64(mapping);
538
539 prod = NEXT_TX(prod);
540 txbd1 = (struct tx_bd_ext *)
541 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
542
dade5e15 543 txbd1->tx_bd_hsize_lflags = lflags;
c0c050c5
MC
544 if (skb_is_gso(skb)) {
545 u32 hdr_len;
546
547 if (skb->encapsulation)
548 hdr_len = skb_inner_network_offset(skb) +
549 skb_inner_network_header_len(skb) +
550 inner_tcp_hdrlen(skb);
551 else
552 hdr_len = skb_transport_offset(skb) +
553 tcp_hdrlen(skb);
554
dade5e15 555 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
c0c050c5
MC
556 TX_BD_FLAGS_T_IPID |
557 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
558 length = skb_shinfo(skb)->gso_size;
559 txbd1->tx_bd_mss = cpu_to_le32(length);
560 length += hdr_len;
561 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
dade5e15 562 txbd1->tx_bd_hsize_lflags |=
c0c050c5
MC
563 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
564 txbd1->tx_bd_mss = 0;
565 }
566
567 length >>= 9;
2b3c6885
MC
568 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
569 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
570 skb->len);
571 i = 0;
572 goto tx_dma_error;
573 }
c0c050c5
MC
574 flags |= bnxt_lhint_arr[length];
575 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
576
577 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
ee5c7fb3
SP
578 txbd1->tx_bd_cfa_action =
579 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
c0c050c5
MC
580 for (i = 0; i < last_frag; i++) {
581 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
582
583 prod = NEXT_TX(prod);
584 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
585
586 len = skb_frag_size(frag);
587 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
588 DMA_TO_DEVICE);
589
590 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
591 goto tx_dma_error;
592
593 tx_buf = &txr->tx_buf_ring[prod];
594 dma_unmap_addr_set(tx_buf, mapping, mapping);
595
596 txbd->tx_bd_haddr = cpu_to_le64(mapping);
597
598 flags = len << TX_BD_LEN_SHIFT;
599 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
600 }
601
602 flags &= ~TX_BD_LEN;
603 txbd->tx_bd_len_flags_type =
604 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
605 TX_BD_FLAGS_PACKET_END);
606
607 netdev_tx_sent_queue(txq, skb->len);
608
83bb623c
PC
609 skb_tx_timestamp(skb);
610
c0c050c5
MC
611 /* Sync BD data before updating doorbell */
612 wmb();
613
614 prod = NEXT_TX(prod);
615 txr->tx_prod = prod;
616
6b16f9ee 617 if (!netdev_xmit_more() || netif_xmit_stopped(txq))
697197e5 618 bnxt_db_write(bp, &txr->tx_db, prod);
c0c050c5
MC
619
620tx_done:
621
c0c050c5 622 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
6b16f9ee 623 if (netdev_xmit_more() && !tx_buf->is_push)
697197e5 624 bnxt_db_write(bp, &txr->tx_db, prod);
4d172f21 625
c0c050c5
MC
626 netif_tx_stop_queue(txq);
627
628 /* netif_tx_stop_queue() must be done before checking
629 * tx index in bnxt_tx_avail() below, because in
630 * bnxt_tx_int(), we update tx index before checking for
631 * netif_tx_queue_stopped().
632 */
633 smp_mb();
634 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
635 netif_tx_wake_queue(txq);
636 }
637 return NETDEV_TX_OK;
638
639tx_dma_error:
83bb623c
PC
640 if (BNXT_TX_PTP_IS_SET(lflags))
641 atomic_inc(&bp->ptp_cfg->tx_avail);
642
c0c050c5
MC
643 last_frag = i;
644
645 /* start back at beginning and unmap skb */
646 prod = txr->tx_prod;
647 tx_buf = &txr->tx_buf_ring[prod];
648 tx_buf->skb = NULL;
649 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
650 skb_headlen(skb), PCI_DMA_TODEVICE);
651 prod = NEXT_TX(prod);
652
653 /* unmap remaining mapped pages */
654 for (i = 0; i < last_frag; i++) {
655 prod = NEXT_TX(prod);
656 tx_buf = &txr->tx_buf_ring[prod];
657 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
658 skb_frag_size(&skb_shinfo(skb)->frags[i]),
659 PCI_DMA_TODEVICE);
660 }
661
662 dev_kfree_skb_any(skb);
663 return NETDEV_TX_OK;
664}
665
666static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
667{
b6ab4b01 668 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
a960dec9 669 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
c0c050c5
MC
670 u16 cons = txr->tx_cons;
671 struct pci_dev *pdev = bp->pdev;
672 int i;
673 unsigned int tx_bytes = 0;
674
675 for (i = 0; i < nr_pkts; i++) {
676 struct bnxt_sw_tx_bd *tx_buf;
83bb623c 677 bool compl_deferred = false;
c0c050c5
MC
678 struct sk_buff *skb;
679 int j, last;
680
681 tx_buf = &txr->tx_buf_ring[cons];
682 cons = NEXT_TX(cons);
683 skb = tx_buf->skb;
684 tx_buf->skb = NULL;
685
686 if (tx_buf->is_push) {
687 tx_buf->is_push = 0;
688 goto next_tx_int;
689 }
690
691 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
692 skb_headlen(skb), PCI_DMA_TODEVICE);
693 last = tx_buf->nr_frags;
694
695 for (j = 0; j < last; j++) {
696 cons = NEXT_TX(cons);
697 tx_buf = &txr->tx_buf_ring[cons];
698 dma_unmap_page(
699 &pdev->dev,
700 dma_unmap_addr(tx_buf, mapping),
701 skb_frag_size(&skb_shinfo(skb)->frags[j]),
702 PCI_DMA_TODEVICE);
703 }
83bb623c
PC
704 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
705 if (bp->flags & BNXT_FLAG_CHIP_P5) {
706 if (!bnxt_get_tx_ts_p5(bp, skb))
707 compl_deferred = true;
708 else
709 atomic_inc(&bp->ptp_cfg->tx_avail);
710 }
711 }
c0c050c5
MC
712
713next_tx_int:
714 cons = NEXT_TX(cons);
715
716 tx_bytes += skb->len;
83bb623c
PC
717 if (!compl_deferred)
718 dev_kfree_skb_any(skb);
c0c050c5
MC
719 }
720
721 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
722 txr->tx_cons = cons;
723
724 /* Need to make the tx_cons update visible to bnxt_start_xmit()
725 * before checking for netif_tx_queue_stopped(). Without the
726 * memory barrier, there is a small possibility that bnxt_start_xmit()
727 * will miss it and cause the queue to be stopped forever.
728 */
729 smp_mb();
730
731 if (unlikely(netif_tx_queue_stopped(txq)) &&
732 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
733 __netif_tx_lock(txq, smp_processor_id());
734 if (netif_tx_queue_stopped(txq) &&
735 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
736 txr->dev_state != BNXT_DEV_STATE_CLOSING)
737 netif_tx_wake_queue(txq);
738 __netif_tx_unlock(txq);
739 }
740}
741
c61fb99c 742static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
322b87ca 743 struct bnxt_rx_ring_info *rxr,
c61fb99c
MC
744 gfp_t gfp)
745{
746 struct device *dev = &bp->pdev->dev;
747 struct page *page;
748
322b87ca 749 page = page_pool_dev_alloc_pages(rxr->page_pool);
c61fb99c
MC
750 if (!page)
751 return NULL;
752
c519fe9a
SN
753 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
754 DMA_ATTR_WEAK_ORDERING);
c61fb99c 755 if (dma_mapping_error(dev, *mapping)) {
322b87ca 756 page_pool_recycle_direct(rxr->page_pool, page);
c61fb99c
MC
757 return NULL;
758 }
759 *mapping += bp->rx_dma_offset;
760 return page;
761}
762
c0c050c5
MC
763static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
764 gfp_t gfp)
765{
766 u8 *data;
767 struct pci_dev *pdev = bp->pdev;
768
769 data = kmalloc(bp->rx_buf_size, gfp);
770 if (!data)
771 return NULL;
772
c519fe9a
SN
773 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
774 bp->rx_buf_use_size, bp->rx_dir,
775 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
776
777 if (dma_mapping_error(&pdev->dev, *mapping)) {
778 kfree(data);
779 data = NULL;
780 }
781 return data;
782}
783
38413406
MC
784int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
785 u16 prod, gfp_t gfp)
c0c050c5
MC
786{
787 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
788 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
c0c050c5
MC
789 dma_addr_t mapping;
790
c61fb99c 791 if (BNXT_RX_PAGE_MODE(bp)) {
322b87ca
AG
792 struct page *page =
793 __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
c0c050c5 794
c61fb99c
MC
795 if (!page)
796 return -ENOMEM;
797
798 rx_buf->data = page;
799 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
800 } else {
801 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
802
803 if (!data)
804 return -ENOMEM;
805
806 rx_buf->data = data;
807 rx_buf->data_ptr = data + bp->rx_offset;
808 }
11cd119d 809 rx_buf->mapping = mapping;
c0c050c5
MC
810
811 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
c0c050c5
MC
812 return 0;
813}
814
c6d30e83 815void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
c0c050c5
MC
816{
817 u16 prod = rxr->rx_prod;
818 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
819 struct rx_bd *cons_bd, *prod_bd;
820
821 prod_rx_buf = &rxr->rx_buf_ring[prod];
822 cons_rx_buf = &rxr->rx_buf_ring[cons];
823
824 prod_rx_buf->data = data;
6bb19474 825 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
c0c050c5 826
11cd119d 827 prod_rx_buf->mapping = cons_rx_buf->mapping;
c0c050c5
MC
828
829 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
830 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
831
832 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
833}
834
835static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
836{
837 u16 next, max = rxr->rx_agg_bmap_size;
838
839 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
840 if (next >= max)
841 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
842 return next;
843}
844
845static inline int bnxt_alloc_rx_page(struct bnxt *bp,
846 struct bnxt_rx_ring_info *rxr,
847 u16 prod, gfp_t gfp)
848{
849 struct rx_bd *rxbd =
850 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
851 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
852 struct pci_dev *pdev = bp->pdev;
853 struct page *page;
854 dma_addr_t mapping;
855 u16 sw_prod = rxr->rx_sw_agg_prod;
89d0a06c 856 unsigned int offset = 0;
c0c050c5 857
89d0a06c
MC
858 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
859 page = rxr->rx_page;
860 if (!page) {
861 page = alloc_page(gfp);
862 if (!page)
863 return -ENOMEM;
864 rxr->rx_page = page;
865 rxr->rx_page_offset = 0;
866 }
867 offset = rxr->rx_page_offset;
868 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
869 if (rxr->rx_page_offset == PAGE_SIZE)
870 rxr->rx_page = NULL;
871 else
872 get_page(page);
873 } else {
874 page = alloc_page(gfp);
875 if (!page)
876 return -ENOMEM;
877 }
c0c050c5 878
c519fe9a
SN
879 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
880 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
881 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
882 if (dma_mapping_error(&pdev->dev, mapping)) {
883 __free_page(page);
884 return -EIO;
885 }
886
887 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
888 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
889
890 __set_bit(sw_prod, rxr->rx_agg_bmap);
891 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
892 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
893
894 rx_agg_buf->page = page;
89d0a06c 895 rx_agg_buf->offset = offset;
c0c050c5
MC
896 rx_agg_buf->mapping = mapping;
897 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
898 rxbd->rx_bd_opaque = sw_prod;
899 return 0;
900}
901
4a228a3a
MC
902static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
903 struct bnxt_cp_ring_info *cpr,
904 u16 cp_cons, u16 curr)
905{
906 struct rx_agg_cmp *agg;
907
908 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
909 agg = (struct rx_agg_cmp *)
910 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
911 return agg;
912}
913
bfcd8d79
MC
914static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
915 struct bnxt_rx_ring_info *rxr,
916 u16 agg_id, u16 curr)
917{
918 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
919
920 return &tpa_info->agg_arr[curr];
921}
922
4a228a3a
MC
923static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
924 u16 start, u32 agg_bufs, bool tpa)
c0c050c5 925{
e44758b7 926 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5 927 struct bnxt *bp = bnapi->bp;
b6ab4b01 928 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
929 u16 prod = rxr->rx_agg_prod;
930 u16 sw_prod = rxr->rx_sw_agg_prod;
bfcd8d79 931 bool p5_tpa = false;
c0c050c5
MC
932 u32 i;
933
bfcd8d79
MC
934 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
935 p5_tpa = true;
936
c0c050c5
MC
937 for (i = 0; i < agg_bufs; i++) {
938 u16 cons;
939 struct rx_agg_cmp *agg;
940 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
941 struct rx_bd *prod_bd;
942 struct page *page;
943
bfcd8d79
MC
944 if (p5_tpa)
945 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
946 else
947 agg = bnxt_get_agg(bp, cpr, idx, start + i);
c0c050c5
MC
948 cons = agg->rx_agg_cmp_opaque;
949 __clear_bit(cons, rxr->rx_agg_bmap);
950
951 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
952 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
953
954 __set_bit(sw_prod, rxr->rx_agg_bmap);
955 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
956 cons_rx_buf = &rxr->rx_agg_ring[cons];
957
958 /* It is possible for sw_prod to be equal to cons, so
959 * set cons_rx_buf->page to NULL first.
960 */
961 page = cons_rx_buf->page;
962 cons_rx_buf->page = NULL;
963 prod_rx_buf->page = page;
89d0a06c 964 prod_rx_buf->offset = cons_rx_buf->offset;
c0c050c5
MC
965
966 prod_rx_buf->mapping = cons_rx_buf->mapping;
967
968 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
969
970 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
971 prod_bd->rx_bd_opaque = sw_prod;
972
973 prod = NEXT_RX_AGG(prod);
974 sw_prod = NEXT_RX_AGG(sw_prod);
c0c050c5
MC
975 }
976 rxr->rx_agg_prod = prod;
977 rxr->rx_sw_agg_prod = sw_prod;
978}
979
c61fb99c
MC
980static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
981 struct bnxt_rx_ring_info *rxr,
982 u16 cons, void *data, u8 *data_ptr,
983 dma_addr_t dma_addr,
984 unsigned int offset_and_len)
985{
986 unsigned int payload = offset_and_len >> 16;
987 unsigned int len = offset_and_len & 0xffff;
d7840976 988 skb_frag_t *frag;
c61fb99c
MC
989 struct page *page = data;
990 u16 prod = rxr->rx_prod;
991 struct sk_buff *skb;
992 int off, err;
993
994 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
995 if (unlikely(err)) {
996 bnxt_reuse_rx_data(rxr, cons, data);
997 return NULL;
998 }
999 dma_addr -= bp->rx_dma_offset;
c519fe9a
SN
1000 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
1001 DMA_ATTR_WEAK_ORDERING);
3071c517 1002 page_pool_release_page(rxr->page_pool, page);
c61fb99c
MC
1003
1004 if (unlikely(!payload))
c43f1255 1005 payload = eth_get_headlen(bp->dev, data_ptr, len);
c61fb99c
MC
1006
1007 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1008 if (!skb) {
1009 __free_page(page);
1010 return NULL;
1011 }
1012
1013 off = (void *)data_ptr - page_address(page);
1014 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
1015 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1016 payload + NET_IP_ALIGN);
1017
1018 frag = &skb_shinfo(skb)->frags[0];
1019 skb_frag_size_sub(frag, payload);
b54c9d5b 1020 skb_frag_off_add(frag, payload);
c61fb99c
MC
1021 skb->data_len -= payload;
1022 skb->tail += payload;
1023
1024 return skb;
1025}
1026
c0c050c5
MC
1027static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1028 struct bnxt_rx_ring_info *rxr, u16 cons,
6bb19474
MC
1029 void *data, u8 *data_ptr,
1030 dma_addr_t dma_addr,
1031 unsigned int offset_and_len)
c0c050c5 1032{
6bb19474 1033 u16 prod = rxr->rx_prod;
c0c050c5 1034 struct sk_buff *skb;
6bb19474 1035 int err;
c0c050c5
MC
1036
1037 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1038 if (unlikely(err)) {
1039 bnxt_reuse_rx_data(rxr, cons, data);
1040 return NULL;
1041 }
1042
1043 skb = build_skb(data, 0);
c519fe9a
SN
1044 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1045 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
1046 if (!skb) {
1047 kfree(data);
1048 return NULL;
1049 }
1050
b3dba77c 1051 skb_reserve(skb, bp->rx_offset);
6bb19474 1052 skb_put(skb, offset_and_len & 0xffff);
c0c050c5
MC
1053 return skb;
1054}
1055
e44758b7
MC
1056static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
1057 struct bnxt_cp_ring_info *cpr,
4a228a3a
MC
1058 struct sk_buff *skb, u16 idx,
1059 u32 agg_bufs, bool tpa)
c0c050c5 1060{
e44758b7 1061 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5 1062 struct pci_dev *pdev = bp->pdev;
b6ab4b01 1063 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5 1064 u16 prod = rxr->rx_agg_prod;
bfcd8d79 1065 bool p5_tpa = false;
c0c050c5
MC
1066 u32 i;
1067
bfcd8d79
MC
1068 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1069 p5_tpa = true;
1070
c0c050c5
MC
1071 for (i = 0; i < agg_bufs; i++) {
1072 u16 cons, frag_len;
1073 struct rx_agg_cmp *agg;
1074 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1075 struct page *page;
1076 dma_addr_t mapping;
1077
bfcd8d79
MC
1078 if (p5_tpa)
1079 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1080 else
1081 agg = bnxt_get_agg(bp, cpr, idx, i);
c0c050c5
MC
1082 cons = agg->rx_agg_cmp_opaque;
1083 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1084 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1085
1086 cons_rx_buf = &rxr->rx_agg_ring[cons];
89d0a06c
MC
1087 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1088 cons_rx_buf->offset, frag_len);
c0c050c5
MC
1089 __clear_bit(cons, rxr->rx_agg_bmap);
1090
1091 /* It is possible for bnxt_alloc_rx_page() to allocate
1092 * a sw_prod index that equals the cons index, so we
1093 * need to clear the cons entry now.
1094 */
11cd119d 1095 mapping = cons_rx_buf->mapping;
c0c050c5
MC
1096 page = cons_rx_buf->page;
1097 cons_rx_buf->page = NULL;
1098
1099 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1100 struct skb_shared_info *shinfo;
1101 unsigned int nr_frags;
1102
1103 shinfo = skb_shinfo(skb);
1104 nr_frags = --shinfo->nr_frags;
1105 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1106
1107 dev_kfree_skb(skb);
1108
1109 cons_rx_buf->page = page;
1110
1111 /* Update prod since possibly some pages have been
1112 * allocated already.
1113 */
1114 rxr->rx_agg_prod = prod;
4a228a3a 1115 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
c0c050c5
MC
1116 return NULL;
1117 }
1118
c519fe9a
SN
1119 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1120 PCI_DMA_FROMDEVICE,
1121 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
1122
1123 skb->data_len += frag_len;
1124 skb->len += frag_len;
1125 skb->truesize += PAGE_SIZE;
1126
1127 prod = NEXT_RX_AGG(prod);
c0c050c5
MC
1128 }
1129 rxr->rx_agg_prod = prod;
1130 return skb;
1131}
1132
1133static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1134 u8 agg_bufs, u32 *raw_cons)
1135{
1136 u16 last;
1137 struct rx_agg_cmp *agg;
1138
1139 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1140 last = RING_CMP(*raw_cons);
1141 agg = (struct rx_agg_cmp *)
1142 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1143 return RX_AGG_CMP_VALID(agg, *raw_cons);
1144}
1145
1146static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1147 unsigned int len,
1148 dma_addr_t mapping)
1149{
1150 struct bnxt *bp = bnapi->bp;
1151 struct pci_dev *pdev = bp->pdev;
1152 struct sk_buff *skb;
1153
1154 skb = napi_alloc_skb(&bnapi->napi, len);
1155 if (!skb)
1156 return NULL;
1157
745fc05c
MC
1158 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1159 bp->rx_dir);
c0c050c5 1160
6bb19474
MC
1161 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1162 len + NET_IP_ALIGN);
c0c050c5 1163
745fc05c
MC
1164 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1165 bp->rx_dir);
c0c050c5
MC
1166
1167 skb_put(skb, len);
1168 return skb;
1169}
1170
e44758b7 1171static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
fa7e2812
MC
1172 u32 *raw_cons, void *cmp)
1173{
fa7e2812
MC
1174 struct rx_cmp *rxcmp = cmp;
1175 u32 tmp_raw_cons = *raw_cons;
1176 u8 cmp_type, agg_bufs = 0;
1177
1178 cmp_type = RX_CMP_TYPE(rxcmp);
1179
1180 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1181 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1182 RX_CMP_AGG_BUFS) >>
1183 RX_CMP_AGG_BUFS_SHIFT;
1184 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1185 struct rx_tpa_end_cmp *tpa_end = cmp;
1186
bfcd8d79
MC
1187 if (bp->flags & BNXT_FLAG_CHIP_P5)
1188 return 0;
1189
4a228a3a 1190 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
fa7e2812
MC
1191 }
1192
1193 if (agg_bufs) {
1194 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1195 return -EBUSY;
1196 }
1197 *raw_cons = tmp_raw_cons;
1198 return 0;
1199}
1200
230d1f0d
MC
1201static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1202{
b148bb23
MC
1203 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1204 return;
1205
230d1f0d
MC
1206 if (BNXT_PF(bp))
1207 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1208 else
1209 schedule_delayed_work(&bp->fw_reset_task, delay);
1210}
1211
c213eae8
MC
1212static void bnxt_queue_sp_work(struct bnxt *bp)
1213{
1214 if (BNXT_PF(bp))
1215 queue_work(bnxt_pf_wq, &bp->sp_task);
1216 else
1217 schedule_work(&bp->sp_task);
1218}
1219
fa7e2812
MC
1220static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1221{
1222 if (!rxr->bnapi->in_reset) {
1223 rxr->bnapi->in_reset = true;
8fbf58e1
MC
1224 if (bp->flags & BNXT_FLAG_CHIP_P5)
1225 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1226 else
1227 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
c213eae8 1228 bnxt_queue_sp_work(bp);
fa7e2812
MC
1229 }
1230 rxr->rx_next_cons = 0xffff;
1231}
1232
ec4d8e7c
MC
1233static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1234{
1235 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1236 u16 idx = agg_id & MAX_TPA_P5_MASK;
1237
1238 if (test_bit(idx, map->agg_idx_bmap))
1239 idx = find_first_zero_bit(map->agg_idx_bmap,
1240 BNXT_AGG_IDX_BMAP_SIZE);
1241 __set_bit(idx, map->agg_idx_bmap);
1242 map->agg_id_tbl[agg_id] = idx;
1243 return idx;
1244}
1245
1246static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1247{
1248 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1249
1250 __clear_bit(idx, map->agg_idx_bmap);
1251}
1252
1253static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1254{
1255 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1256
1257 return map->agg_id_tbl[agg_id];
1258}
1259
c0c050c5
MC
1260static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1261 struct rx_tpa_start_cmp *tpa_start,
1262 struct rx_tpa_start_cmp_ext *tpa_start1)
1263{
c0c050c5 1264 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
bfcd8d79
MC
1265 struct bnxt_tpa_info *tpa_info;
1266 u16 cons, prod, agg_id;
c0c050c5
MC
1267 struct rx_bd *prod_bd;
1268 dma_addr_t mapping;
1269
ec4d8e7c 1270 if (bp->flags & BNXT_FLAG_CHIP_P5) {
bfcd8d79 1271 agg_id = TPA_START_AGG_ID_P5(tpa_start);
ec4d8e7c
MC
1272 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1273 } else {
bfcd8d79 1274 agg_id = TPA_START_AGG_ID(tpa_start);
ec4d8e7c 1275 }
c0c050c5
MC
1276 cons = tpa_start->rx_tpa_start_cmp_opaque;
1277 prod = rxr->rx_prod;
1278 cons_rx_buf = &rxr->rx_buf_ring[cons];
1279 prod_rx_buf = &rxr->rx_buf_ring[prod];
1280 tpa_info = &rxr->rx_tpa[agg_id];
1281
bfcd8d79
MC
1282 if (unlikely(cons != rxr->rx_next_cons ||
1283 TPA_START_ERROR(tpa_start))) {
1284 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1285 cons, rxr->rx_next_cons,
1286 TPA_START_ERROR_CODE(tpa_start1));
fa7e2812
MC
1287 bnxt_sched_reset(bp, rxr);
1288 return;
1289 }
ee5c7fb3
SP
1290 /* Store cfa_code in tpa_info to use in tpa_end
1291 * completion processing.
1292 */
1293 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
c0c050c5 1294 prod_rx_buf->data = tpa_info->data;
6bb19474 1295 prod_rx_buf->data_ptr = tpa_info->data_ptr;
c0c050c5
MC
1296
1297 mapping = tpa_info->mapping;
11cd119d 1298 prod_rx_buf->mapping = mapping;
c0c050c5
MC
1299
1300 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1301
1302 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1303
1304 tpa_info->data = cons_rx_buf->data;
6bb19474 1305 tpa_info->data_ptr = cons_rx_buf->data_ptr;
c0c050c5 1306 cons_rx_buf->data = NULL;
11cd119d 1307 tpa_info->mapping = cons_rx_buf->mapping;
c0c050c5
MC
1308
1309 tpa_info->len =
1310 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1311 RX_TPA_START_CMP_LEN_SHIFT;
1312 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1313 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1314
1315 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1316 tpa_info->gso_type = SKB_GSO_TCPV4;
1317 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
50f011b6 1318 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
c0c050c5
MC
1319 tpa_info->gso_type = SKB_GSO_TCPV6;
1320 tpa_info->rss_hash =
1321 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1322 } else {
1323 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1324 tpa_info->gso_type = 0;
871127e6 1325 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
c0c050c5
MC
1326 }
1327 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1328 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
94758f8d 1329 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
bfcd8d79 1330 tpa_info->agg_count = 0;
c0c050c5
MC
1331
1332 rxr->rx_prod = NEXT_RX(prod);
1333 cons = NEXT_RX(cons);
376a5b86 1334 rxr->rx_next_cons = NEXT_RX(cons);
c0c050c5
MC
1335 cons_rx_buf = &rxr->rx_buf_ring[cons];
1336
1337 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1338 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1339 cons_rx_buf->data = NULL;
1340}
1341
4a228a3a 1342static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
c0c050c5
MC
1343{
1344 if (agg_bufs)
4a228a3a 1345 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
c0c050c5
MC
1346}
1347
bee5a188
MC
1348#ifdef CONFIG_INET
1349static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1350{
1351 struct udphdr *uh = NULL;
1352
1353 if (ip_proto == htons(ETH_P_IP)) {
1354 struct iphdr *iph = (struct iphdr *)skb->data;
1355
1356 if (iph->protocol == IPPROTO_UDP)
1357 uh = (struct udphdr *)(iph + 1);
1358 } else {
1359 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1360
1361 if (iph->nexthdr == IPPROTO_UDP)
1362 uh = (struct udphdr *)(iph + 1);
1363 }
1364 if (uh) {
1365 if (uh->check)
1366 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1367 else
1368 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1369 }
1370}
1371#endif
1372
94758f8d
MC
1373static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1374 int payload_off, int tcp_ts,
1375 struct sk_buff *skb)
1376{
1377#ifdef CONFIG_INET
1378 struct tcphdr *th;
1379 int len, nw_off;
1380 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1381 u32 hdr_info = tpa_info->hdr_info;
1382 bool loopback = false;
1383
1384 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1385 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1386 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1387
1388 /* If the packet is an internal loopback packet, the offsets will
1389 * have an extra 4 bytes.
1390 */
1391 if (inner_mac_off == 4) {
1392 loopback = true;
1393 } else if (inner_mac_off > 4) {
1394 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1395 ETH_HLEN - 2));
1396
1397 /* We only support inner iPv4/ipv6. If we don't see the
1398 * correct protocol ID, it must be a loopback packet where
1399 * the offsets are off by 4.
1400 */
09a7636a 1401 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
94758f8d
MC
1402 loopback = true;
1403 }
1404 if (loopback) {
1405 /* internal loopback packet, subtract all offsets by 4 */
1406 inner_ip_off -= 4;
1407 inner_mac_off -= 4;
1408 outer_ip_off -= 4;
1409 }
1410
1411 nw_off = inner_ip_off - ETH_HLEN;
1412 skb_set_network_header(skb, nw_off);
1413 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1414 struct ipv6hdr *iph = ipv6_hdr(skb);
1415
1416 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1417 len = skb->len - skb_transport_offset(skb);
1418 th = tcp_hdr(skb);
1419 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1420 } else {
1421 struct iphdr *iph = ip_hdr(skb);
1422
1423 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1424 len = skb->len - skb_transport_offset(skb);
1425 th = tcp_hdr(skb);
1426 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1427 }
1428
1429 if (inner_mac_off) { /* tunnel */
94758f8d
MC
1430 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1431 ETH_HLEN - 2));
1432
bee5a188 1433 bnxt_gro_tunnel(skb, proto);
94758f8d
MC
1434 }
1435#endif
1436 return skb;
1437}
1438
67912c36
MC
1439static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1440 int payload_off, int tcp_ts,
1441 struct sk_buff *skb)
1442{
1443#ifdef CONFIG_INET
1444 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1445 u32 hdr_info = tpa_info->hdr_info;
1446 int iphdr_len, nw_off;
1447
1448 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1449 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1450 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1451
1452 nw_off = inner_ip_off - ETH_HLEN;
1453 skb_set_network_header(skb, nw_off);
1454 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1455 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1456 skb_set_transport_header(skb, nw_off + iphdr_len);
1457
1458 if (inner_mac_off) { /* tunnel */
1459 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1460 ETH_HLEN - 2));
1461
1462 bnxt_gro_tunnel(skb, proto);
1463 }
1464#endif
1465 return skb;
1466}
1467
c0c050c5
MC
1468#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1469#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1470
309369c9
MC
1471static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1472 int payload_off, int tcp_ts,
c0c050c5
MC
1473 struct sk_buff *skb)
1474{
d1611c3a 1475#ifdef CONFIG_INET
c0c050c5 1476 struct tcphdr *th;
719ca811 1477 int len, nw_off, tcp_opt_len = 0;
27e24189 1478
309369c9 1479 if (tcp_ts)
c0c050c5
MC
1480 tcp_opt_len = 12;
1481
c0c050c5
MC
1482 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1483 struct iphdr *iph;
1484
1485 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1486 ETH_HLEN;
1487 skb_set_network_header(skb, nw_off);
1488 iph = ip_hdr(skb);
1489 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1490 len = skb->len - skb_transport_offset(skb);
1491 th = tcp_hdr(skb);
1492 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1493 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1494 struct ipv6hdr *iph;
1495
1496 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1497 ETH_HLEN;
1498 skb_set_network_header(skb, nw_off);
1499 iph = ipv6_hdr(skb);
1500 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1501 len = skb->len - skb_transport_offset(skb);
1502 th = tcp_hdr(skb);
1503 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1504 } else {
1505 dev_kfree_skb_any(skb);
1506 return NULL;
1507 }
c0c050c5 1508
bee5a188
MC
1509 if (nw_off) /* tunnel */
1510 bnxt_gro_tunnel(skb, skb->protocol);
c0c050c5
MC
1511#endif
1512 return skb;
1513}
1514
309369c9
MC
1515static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1516 struct bnxt_tpa_info *tpa_info,
1517 struct rx_tpa_end_cmp *tpa_end,
1518 struct rx_tpa_end_cmp_ext *tpa_end1,
1519 struct sk_buff *skb)
1520{
1521#ifdef CONFIG_INET
1522 int payload_off;
1523 u16 segs;
1524
1525 segs = TPA_END_TPA_SEGS(tpa_end);
1526 if (segs == 1)
1527 return skb;
1528
1529 NAPI_GRO_CB(skb)->count = segs;
1530 skb_shinfo(skb)->gso_size =
1531 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1532 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
bfcd8d79
MC
1533 if (bp->flags & BNXT_FLAG_CHIP_P5)
1534 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1535 else
1536 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
309369c9 1537 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
5910906c
MC
1538 if (likely(skb))
1539 tcp_gro_complete(skb);
309369c9
MC
1540#endif
1541 return skb;
1542}
1543
ee5c7fb3
SP
1544/* Given the cfa_code of a received packet determine which
1545 * netdev (vf-rep or PF) the packet is destined to.
1546 */
1547static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1548{
1549 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1550
1551 /* if vf-rep dev is NULL, the must belongs to the PF */
1552 return dev ? dev : bp->dev;
1553}
1554
c0c050c5 1555static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
e44758b7 1556 struct bnxt_cp_ring_info *cpr,
c0c050c5
MC
1557 u32 *raw_cons,
1558 struct rx_tpa_end_cmp *tpa_end,
1559 struct rx_tpa_end_cmp_ext *tpa_end1,
4e5dbbda 1560 u8 *event)
c0c050c5 1561{
e44758b7 1562 struct bnxt_napi *bnapi = cpr->bnapi;
b6ab4b01 1563 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
6bb19474 1564 u8 *data_ptr, agg_bufs;
c0c050c5
MC
1565 unsigned int len;
1566 struct bnxt_tpa_info *tpa_info;
1567 dma_addr_t mapping;
1568 struct sk_buff *skb;
bfcd8d79 1569 u16 idx = 0, agg_id;
6bb19474 1570 void *data;
bfcd8d79 1571 bool gro;
c0c050c5 1572
fa7e2812 1573 if (unlikely(bnapi->in_reset)) {
e44758b7 1574 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
fa7e2812
MC
1575
1576 if (rc < 0)
1577 return ERR_PTR(-EBUSY);
1578 return NULL;
1579 }
1580
bfcd8d79
MC
1581 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1582 agg_id = TPA_END_AGG_ID_P5(tpa_end);
ec4d8e7c 1583 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
bfcd8d79
MC
1584 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1585 tpa_info = &rxr->rx_tpa[agg_id];
1586 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1587 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1588 agg_bufs, tpa_info->agg_count);
1589 agg_bufs = tpa_info->agg_count;
1590 }
1591 tpa_info->agg_count = 0;
1592 *event |= BNXT_AGG_EVENT;
ec4d8e7c 1593 bnxt_free_agg_idx(rxr, agg_id);
bfcd8d79
MC
1594 idx = agg_id;
1595 gro = !!(bp->flags & BNXT_FLAG_GRO);
1596 } else {
1597 agg_id = TPA_END_AGG_ID(tpa_end);
1598 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1599 tpa_info = &rxr->rx_tpa[agg_id];
1600 idx = RING_CMP(*raw_cons);
1601 if (agg_bufs) {
1602 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1603 return ERR_PTR(-EBUSY);
1604
1605 *event |= BNXT_AGG_EVENT;
1606 idx = NEXT_CMP(idx);
1607 }
1608 gro = !!TPA_END_GRO(tpa_end);
1609 }
c0c050c5 1610 data = tpa_info->data;
6bb19474
MC
1611 data_ptr = tpa_info->data_ptr;
1612 prefetch(data_ptr);
c0c050c5
MC
1613 len = tpa_info->len;
1614 mapping = tpa_info->mapping;
1615
69c149e2 1616 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
4a228a3a 1617 bnxt_abort_tpa(cpr, idx, agg_bufs);
69c149e2
MC
1618 if (agg_bufs > MAX_SKB_FRAGS)
1619 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1620 agg_bufs, (int)MAX_SKB_FRAGS);
c0c050c5
MC
1621 return NULL;
1622 }
1623
1624 if (len <= bp->rx_copy_thresh) {
6bb19474 1625 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
c0c050c5 1626 if (!skb) {
4a228a3a 1627 bnxt_abort_tpa(cpr, idx, agg_bufs);
c0c050c5
MC
1628 return NULL;
1629 }
1630 } else {
1631 u8 *new_data;
1632 dma_addr_t new_mapping;
1633
1634 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1635 if (!new_data) {
4a228a3a 1636 bnxt_abort_tpa(cpr, idx, agg_bufs);
c0c050c5
MC
1637 return NULL;
1638 }
1639
1640 tpa_info->data = new_data;
b3dba77c 1641 tpa_info->data_ptr = new_data + bp->rx_offset;
c0c050c5
MC
1642 tpa_info->mapping = new_mapping;
1643
1644 skb = build_skb(data, 0);
c519fe9a
SN
1645 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1646 bp->rx_buf_use_size, bp->rx_dir,
1647 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
1648
1649 if (!skb) {
1650 kfree(data);
4a228a3a 1651 bnxt_abort_tpa(cpr, idx, agg_bufs);
c0c050c5
MC
1652 return NULL;
1653 }
b3dba77c 1654 skb_reserve(skb, bp->rx_offset);
c0c050c5
MC
1655 skb_put(skb, len);
1656 }
1657
1658 if (agg_bufs) {
4a228a3a 1659 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
c0c050c5
MC
1660 if (!skb) {
1661 /* Page reuse already handled by bnxt_rx_pages(). */
1662 return NULL;
1663 }
1664 }
ee5c7fb3
SP
1665
1666 skb->protocol =
1667 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
c0c050c5
MC
1668
1669 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1670 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1671
8852ddb4 1672 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
a196e96b 1673 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
96bdd4b9
MC
1674 __be16 vlan_proto = htons(tpa_info->metadata >>
1675 RX_CMP_FLAGS2_METADATA_TPID_SFT);
ed7bc602 1676 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
c0c050c5 1677
96bdd4b9
MC
1678 if (eth_type_vlan(vlan_proto)) {
1679 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1680 } else {
1681 dev_kfree_skb(skb);
1682 return NULL;
1683 }
c0c050c5
MC
1684 }
1685
1686 skb_checksum_none_assert(skb);
1687 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1688 skb->ip_summed = CHECKSUM_UNNECESSARY;
1689 skb->csum_level =
1690 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1691 }
1692
bfcd8d79 1693 if (gro)
309369c9 1694 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
c0c050c5
MC
1695
1696 return skb;
1697}
1698
8fe88ce7
MC
1699static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1700 struct rx_agg_cmp *rx_agg)
1701{
1702 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1703 struct bnxt_tpa_info *tpa_info;
1704
ec4d8e7c 1705 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
8fe88ce7
MC
1706 tpa_info = &rxr->rx_tpa[agg_id];
1707 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1708 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1709}
1710
ee5c7fb3
SP
1711static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1712 struct sk_buff *skb)
1713{
1714 if (skb->dev != bp->dev) {
1715 /* this packet belongs to a vf-rep */
1716 bnxt_vf_rep_rx(bp, skb);
1717 return;
1718 }
1719 skb_record_rx_queue(skb, bnapi->index);
1720 napi_gro_receive(&bnapi->napi, skb);
1721}
1722
c0c050c5
MC
1723/* returns the following:
1724 * 1 - 1 packet successfully received
1725 * 0 - successful TPA_START, packet not completed yet
1726 * -EBUSY - completion ring does not have all the agg buffers yet
1727 * -ENOMEM - packet aborted due to out of memory
1728 * -EIO - packet aborted due to hw error indicated in BD
1729 */
e44758b7
MC
1730static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1731 u32 *raw_cons, u8 *event)
c0c050c5 1732{
e44758b7 1733 struct bnxt_napi *bnapi = cpr->bnapi;
b6ab4b01 1734 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
1735 struct net_device *dev = bp->dev;
1736 struct rx_cmp *rxcmp;
1737 struct rx_cmp_ext *rxcmp1;
1738 u32 tmp_raw_cons = *raw_cons;
ee5c7fb3 1739 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
c0c050c5
MC
1740 struct bnxt_sw_rx_bd *rx_buf;
1741 unsigned int len;
6bb19474 1742 u8 *data_ptr, agg_bufs, cmp_type;
c0c050c5
MC
1743 dma_addr_t dma_addr;
1744 struct sk_buff *skb;
7f5515d1 1745 u32 flags, misc;
6bb19474 1746 void *data;
c0c050c5
MC
1747 int rc = 0;
1748
1749 rxcmp = (struct rx_cmp *)
1750 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1751
8fe88ce7
MC
1752 cmp_type = RX_CMP_TYPE(rxcmp);
1753
1754 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1755 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1756 goto next_rx_no_prod_no_len;
1757 }
1758
c0c050c5
MC
1759 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1760 cp_cons = RING_CMP(tmp_raw_cons);
1761 rxcmp1 = (struct rx_cmp_ext *)
1762 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1763
1764 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1765 return -EBUSY;
1766
c0c050c5
MC
1767 prod = rxr->rx_prod;
1768
1769 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1770 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1771 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1772
4e5dbbda 1773 *event |= BNXT_RX_EVENT;
e7e70fa6 1774 goto next_rx_no_prod_no_len;
c0c050c5
MC
1775
1776 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
e44758b7 1777 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
c0c050c5 1778 (struct rx_tpa_end_cmp *)rxcmp,
4e5dbbda 1779 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
c0c050c5 1780
1fac4b2f 1781 if (IS_ERR(skb))
c0c050c5
MC
1782 return -EBUSY;
1783
1784 rc = -ENOMEM;
1785 if (likely(skb)) {
ee5c7fb3 1786 bnxt_deliver_skb(bp, bnapi, skb);
c0c050c5
MC
1787 rc = 1;
1788 }
4e5dbbda 1789 *event |= BNXT_RX_EVENT;
e7e70fa6 1790 goto next_rx_no_prod_no_len;
c0c050c5
MC
1791 }
1792
1793 cons = rxcmp->rx_cmp_opaque;
fa7e2812 1794 if (unlikely(cons != rxr->rx_next_cons)) {
bbd6f0a9 1795 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
fa7e2812 1796
1b5c8b63
MC
1797 /* 0xffff is forced error, don't print it */
1798 if (rxr->rx_next_cons != 0xffff)
1799 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1800 cons, rxr->rx_next_cons);
fa7e2812 1801 bnxt_sched_reset(bp, rxr);
bbd6f0a9
MC
1802 if (rc1)
1803 return rc1;
1804 goto next_rx_no_prod_no_len;
fa7e2812 1805 }
a1b0e4e6
MC
1806 rx_buf = &rxr->rx_buf_ring[cons];
1807 data = rx_buf->data;
1808 data_ptr = rx_buf->data_ptr;
6bb19474 1809 prefetch(data_ptr);
c0c050c5 1810
c61fb99c
MC
1811 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1812 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
c0c050c5
MC
1813
1814 if (agg_bufs) {
1815 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1816 return -EBUSY;
1817
1818 cp_cons = NEXT_CMP(cp_cons);
4e5dbbda 1819 *event |= BNXT_AGG_EVENT;
c0c050c5 1820 }
4e5dbbda 1821 *event |= BNXT_RX_EVENT;
c0c050c5
MC
1822
1823 rx_buf->data = NULL;
1824 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
8e44e96c
MC
1825 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1826
c0c050c5
MC
1827 bnxt_reuse_rx_data(rxr, cons, data);
1828 if (agg_bufs)
4a228a3a
MC
1829 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1830 false);
c0c050c5
MC
1831
1832 rc = -EIO;
8e44e96c 1833 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
9d8b5f05 1834 bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
8d4bd96b
MC
1835 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1836 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
8fbf58e1
MC
1837 netdev_warn_once(bp->dev, "RX buffer error %x\n",
1838 rx_err);
19b3751f
MC
1839 bnxt_sched_reset(bp, rxr);
1840 }
8e44e96c 1841 }
0b397b17 1842 goto next_rx_no_len;
c0c050c5
MC
1843 }
1844
7f5515d1
PC
1845 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
1846 len = flags >> RX_CMP_LEN_SHIFT;
11cd119d 1847 dma_addr = rx_buf->mapping;
c0c050c5 1848
c6d30e83
MC
1849 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1850 rc = 1;
1851 goto next_rx;
1852 }
1853
c0c050c5 1854 if (len <= bp->rx_copy_thresh) {
6bb19474 1855 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
c0c050c5
MC
1856 bnxt_reuse_rx_data(rxr, cons, data);
1857 if (!skb) {
296d5b54 1858 if (agg_bufs)
4a228a3a
MC
1859 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1860 agg_bufs, false);
c0c050c5
MC
1861 rc = -ENOMEM;
1862 goto next_rx;
1863 }
1864 } else {
c61fb99c
MC
1865 u32 payload;
1866
c6d30e83
MC
1867 if (rx_buf->data_ptr == data_ptr)
1868 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1869 else
1870 payload = 0;
6bb19474 1871 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
c61fb99c 1872 payload | len);
c0c050c5
MC
1873 if (!skb) {
1874 rc = -ENOMEM;
1875 goto next_rx;
1876 }
1877 }
1878
1879 if (agg_bufs) {
4a228a3a 1880 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
c0c050c5
MC
1881 if (!skb) {
1882 rc = -ENOMEM;
1883 goto next_rx;
1884 }
1885 }
1886
1887 if (RX_CMP_HASH_VALID(rxcmp)) {
1888 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1889 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1890
1891 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1892 if (hash_type != 1 && hash_type != 3)
1893 type = PKT_HASH_TYPE_L3;
1894 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1895 }
1896
ee5c7fb3
SP
1897 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1898 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
c0c050c5 1899
8852ddb4
MC
1900 if ((rxcmp1->rx_cmp_flags2 &
1901 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
a196e96b 1902 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
c0c050c5 1903 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
ed7bc602 1904 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
96bdd4b9
MC
1905 __be16 vlan_proto = htons(meta_data >>
1906 RX_CMP_FLAGS2_METADATA_TPID_SFT);
c0c050c5 1907
96bdd4b9
MC
1908 if (eth_type_vlan(vlan_proto)) {
1909 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1910 } else {
1911 dev_kfree_skb(skb);
1912 goto next_rx;
1913 }
c0c050c5
MC
1914 }
1915
1916 skb_checksum_none_assert(skb);
1917 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1918 if (dev->features & NETIF_F_RXCSUM) {
1919 skb->ip_summed = CHECKSUM_UNNECESSARY;
1920 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1921 }
1922 } else {
665e350d
SB
1923 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1924 if (dev->features & NETIF_F_RXCSUM)
9d8b5f05 1925 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
665e350d 1926 }
c0c050c5
MC
1927 }
1928
7f5515d1
PC
1929 if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
1930 RX_CMP_FLAGS_ITYPE_PTP_W_TS)) {
1931 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1932 u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
1933 u64 ns, ts;
1934
1935 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
1936 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
1937
1938 spin_lock_bh(&ptp->ptp_lock);
1939 ns = timecounter_cyc2time(&ptp->tc, ts);
1940 spin_unlock_bh(&ptp->ptp_lock);
1941 memset(skb_hwtstamps(skb), 0,
1942 sizeof(*skb_hwtstamps(skb)));
1943 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
1944 }
1945 }
1946 }
ee5c7fb3 1947 bnxt_deliver_skb(bp, bnapi, skb);
c0c050c5
MC
1948 rc = 1;
1949
1950next_rx:
6a8788f2
AG
1951 cpr->rx_packets += 1;
1952 cpr->rx_bytes += len;
e7e70fa6 1953
0b397b17
MC
1954next_rx_no_len:
1955 rxr->rx_prod = NEXT_RX(prod);
1956 rxr->rx_next_cons = NEXT_RX(cons);
1957
e7e70fa6 1958next_rx_no_prod_no_len:
c0c050c5
MC
1959 *raw_cons = tmp_raw_cons;
1960
1961 return rc;
1962}
1963
2270bc5d
MC
1964/* In netpoll mode, if we are using a combined completion ring, we need to
1965 * discard the rx packets and recycle the buffers.
1966 */
e44758b7
MC
1967static int bnxt_force_rx_discard(struct bnxt *bp,
1968 struct bnxt_cp_ring_info *cpr,
2270bc5d
MC
1969 u32 *raw_cons, u8 *event)
1970{
2270bc5d
MC
1971 u32 tmp_raw_cons = *raw_cons;
1972 struct rx_cmp_ext *rxcmp1;
1973 struct rx_cmp *rxcmp;
1974 u16 cp_cons;
1975 u8 cmp_type;
1976
1977 cp_cons = RING_CMP(tmp_raw_cons);
1978 rxcmp = (struct rx_cmp *)
1979 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1980
1981 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1982 cp_cons = RING_CMP(tmp_raw_cons);
1983 rxcmp1 = (struct rx_cmp_ext *)
1984 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1985
1986 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1987 return -EBUSY;
1988
1989 cmp_type = RX_CMP_TYPE(rxcmp);
1990 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1991 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1992 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1993 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1994 struct rx_tpa_end_cmp_ext *tpa_end1;
1995
1996 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1997 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1998 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1999 }
e44758b7 2000 return bnxt_rx_pkt(bp, cpr, raw_cons, event);
2270bc5d
MC
2001}
2002
7e914027
MC
2003u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2004{
2005 struct bnxt_fw_health *fw_health = bp->fw_health;
2006 u32 reg = fw_health->regs[reg_idx];
2007 u32 reg_type, reg_off, val = 0;
2008
2009 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2010 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2011 switch (reg_type) {
2012 case BNXT_FW_HEALTH_REG_TYPE_CFG:
2013 pci_read_config_dword(bp->pdev, reg_off, &val);
2014 break;
2015 case BNXT_FW_HEALTH_REG_TYPE_GRC:
2016 reg_off = fw_health->mapped_regs[reg_idx];
df561f66 2017 fallthrough;
7e914027
MC
2018 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2019 val = readl(bp->bar0 + reg_off);
2020 break;
2021 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2022 val = readl(bp->bar1 + reg_off);
2023 break;
2024 }
2025 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2026 val &= fw_health->fw_reset_inprog_reg_mask;
2027 return val;
2028}
2029
8d4bd96b
MC
2030static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2031{
2032 int i;
2033
2034 for (i = 0; i < bp->rx_nr_rings; i++) {
2035 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2036 struct bnxt_ring_grp_info *grp_info;
2037
2038 grp_info = &bp->grp_info[grp_idx];
2039 if (grp_info->agg_fw_ring_id == ring_id)
2040 return grp_idx;
2041 }
2042 return INVALID_HW_RING_ID;
2043}
2044
4bb13abf 2045#define BNXT_GET_EVENT_PORT(data) \
87c374de
MC
2046 ((data) & \
2047 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
4bb13abf 2048
8d4bd96b
MC
2049#define BNXT_EVENT_RING_TYPE(data2) \
2050 ((data2) & \
2051 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2052
2053#define BNXT_EVENT_RING_TYPE_RX(data2) \
2054 (BNXT_EVENT_RING_TYPE(data2) == \
2055 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2056
c0c050c5
MC
2057static int bnxt_async_event_process(struct bnxt *bp,
2058 struct hwrm_async_event_cmpl *cmpl)
2059{
2060 u16 event_id = le16_to_cpu(cmpl->event_id);
03ab8ca1
MC
2061 u32 data1 = le32_to_cpu(cmpl->event_data1);
2062 u32 data2 = le32_to_cpu(cmpl->event_data2);
c0c050c5
MC
2063
2064 /* TODO CHIMP_FW: Define event id's for link change, error etc */
2065 switch (event_id) {
87c374de 2066 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
8cbde117
MC
2067 struct bnxt_link_info *link_info = &bp->link_info;
2068
2069 if (BNXT_VF(bp))
2070 goto async_event_process_exit;
a8168b6c
MC
2071
2072 /* print unsupported speed warning in forced speed mode only */
2073 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2074 (data1 & 0x20000)) {
8cbde117
MC
2075 u16 fw_speed = link_info->force_link_speed;
2076 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2077
a8168b6c
MC
2078 if (speed != SPEED_UNKNOWN)
2079 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2080 speed);
8cbde117 2081 }
286ef9d6 2082 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
8cbde117 2083 }
df561f66 2084 fallthrough;
b1613e78
MC
2085 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2086 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2087 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
df561f66 2088 fallthrough;
87c374de 2089 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
c0c050c5 2090 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
19241368 2091 break;
87c374de 2092 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
19241368 2093 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
c0c050c5 2094 break;
87c374de 2095 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
4bb13abf
MC
2096 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2097
2098 if (BNXT_VF(bp))
2099 break;
2100
2101 if (bp->pf.port_id != port_id)
2102 break;
2103
4bb13abf
MC
2104 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2105 break;
2106 }
87c374de 2107 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
fc0f1929
MC
2108 if (BNXT_PF(bp))
2109 goto async_event_process_exit;
2110 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2111 break;
5863b10a
MC
2112 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2113 char *fatal_str = "non-fatal";
2114
8280b38e
VV
2115 if (!bp->fw_health)
2116 goto async_event_process_exit;
2117
2151fe08
MC
2118 bp->fw_reset_timestamp = jiffies;
2119 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2120 if (!bp->fw_reset_min_dsecs)
2121 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2122 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2123 if (!bp->fw_reset_max_dsecs)
2124 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
acfb50e4 2125 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
5863b10a 2126 fatal_str = "fatal";
acfb50e4 2127 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
5863b10a 2128 }
871127e6
MC
2129 netif_warn(bp, hw, bp->dev,
2130 "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2131 fatal_str, data1, data2,
2132 bp->fw_reset_min_dsecs * 100,
2133 bp->fw_reset_max_dsecs * 100);
2151fe08
MC
2134 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2135 break;
5863b10a 2136 }
7e914027
MC
2137 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2138 struct bnxt_fw_health *fw_health = bp->fw_health;
7e914027
MC
2139
2140 if (!fw_health)
2141 goto async_event_process_exit;
2142
2143 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
2144 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
f4d95c3c
MC
2145 if (!fw_health->enabled) {
2146 netif_info(bp, drv, bp->dev,
2147 "Error recovery info: error recovery[0]\n");
7e914027 2148 break;
f4d95c3c 2149 }
7e914027
MC
2150 fw_health->tmr_multiplier =
2151 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2152 bp->current_interval * 10);
2153 fw_health->tmr_counter = fw_health->tmr_multiplier;
2154 fw_health->last_fw_heartbeat =
2155 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2156 fw_health->last_fw_reset_cnt =
2157 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
f4d95c3c
MC
2158 netif_info(bp, drv, bp->dev,
2159 "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
2160 fw_health->master, fw_health->last_fw_reset_cnt,
2161 bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
7e914027
MC
2162 goto async_event_process_exit;
2163 }
a44daa8f 2164 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
871127e6
MC
2165 netif_notice(bp, hw, bp->dev,
2166 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2167 data1, data2);
a44daa8f 2168 goto async_event_process_exit;
8d4bd96b 2169 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
8d4bd96b
MC
2170 struct bnxt_rx_ring_info *rxr;
2171 u16 grp_idx;
2172
2173 if (bp->flags & BNXT_FLAG_CHIP_P5)
2174 goto async_event_process_exit;
2175
2176 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2177 BNXT_EVENT_RING_TYPE(data2), data1);
2178 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2179 goto async_event_process_exit;
2180
2181 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2182 if (grp_idx == INVALID_HW_RING_ID) {
2183 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2184 data1);
2185 goto async_event_process_exit;
2186 }
2187 rxr = bp->bnapi[grp_idx]->rx_ring;
2188 bnxt_sched_reset(bp, rxr);
2189 goto async_event_process_exit;
2190 }
df97b34d
MC
2191 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2192 struct bnxt_fw_health *fw_health = bp->fw_health;
2193
2194 netif_notice(bp, hw, bp->dev,
2195 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2196 data1, data2);
2197 if (fw_health) {
2198 fw_health->echo_req_data1 = data1;
2199 fw_health->echo_req_data2 = data2;
2200 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2201 break;
2202 }
2203 goto async_event_process_exit;
2204 }
c0c050c5 2205 default:
19241368 2206 goto async_event_process_exit;
c0c050c5 2207 }
c213eae8 2208 bnxt_queue_sp_work(bp);
19241368 2209async_event_process_exit:
a588e458 2210 bnxt_ulp_async_events(bp, cmpl);
c0c050c5
MC
2211 return 0;
2212}
2213
2214static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2215{
2216 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2217 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2218 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2219 (struct hwrm_fwd_req_cmpl *)txcmp;
2220
2221 switch (cmpl_type) {
2222 case CMPL_BASE_TYPE_HWRM_DONE:
2223 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2224 if (seq_id == bp->hwrm_intr_seq_id)
fc718bb2 2225 bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
c0c050c5
MC
2226 else
2227 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
2228 break;
2229
2230 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2231 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2232
2233 if ((vf_id < bp->pf.first_vf_id) ||
2234 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2235 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2236 vf_id);
2237 return -EINVAL;
2238 }
2239
2240 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2241 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
c213eae8 2242 bnxt_queue_sp_work(bp);
c0c050c5
MC
2243 break;
2244
2245 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2246 bnxt_async_event_process(bp,
2247 (struct hwrm_async_event_cmpl *)txcmp);
cc9fd180 2248 break;
c0c050c5
MC
2249
2250 default:
2251 break;
2252 }
2253
2254 return 0;
2255}
2256
2257static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2258{
2259 struct bnxt_napi *bnapi = dev_instance;
2260 struct bnxt *bp = bnapi->bp;
2261 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2262 u32 cons = RING_CMP(cpr->cp_raw_cons);
2263
6a8788f2 2264 cpr->event_ctr++;
c0c050c5
MC
2265 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2266 napi_schedule(&bnapi->napi);
2267 return IRQ_HANDLED;
2268}
2269
2270static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2271{
2272 u32 raw_cons = cpr->cp_raw_cons;
2273 u16 cons = RING_CMP(raw_cons);
2274 struct tx_cmp *txcmp;
2275
2276 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2277
2278 return TX_CMP_VALID(txcmp, raw_cons);
2279}
2280
c0c050c5
MC
2281static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2282{
2283 struct bnxt_napi *bnapi = dev_instance;
2284 struct bnxt *bp = bnapi->bp;
2285 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2286 u32 cons = RING_CMP(cpr->cp_raw_cons);
2287 u32 int_status;
2288
2289 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2290
2291 if (!bnxt_has_work(bp, cpr)) {
11809490 2292 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
c0c050c5
MC
2293 /* return if erroneous interrupt */
2294 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2295 return IRQ_NONE;
2296 }
2297
2298 /* disable ring IRQ */
697197e5 2299 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
c0c050c5
MC
2300
2301 /* Return here if interrupt is shared and is disabled. */
2302 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2303 return IRQ_HANDLED;
2304
2305 napi_schedule(&bnapi->napi);
2306 return IRQ_HANDLED;
2307}
2308
3675b92f
MC
2309static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2310 int budget)
c0c050c5 2311{
e44758b7 2312 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5
MC
2313 u32 raw_cons = cpr->cp_raw_cons;
2314 u32 cons;
2315 int tx_pkts = 0;
2316 int rx_pkts = 0;
4e5dbbda 2317 u8 event = 0;
c0c050c5
MC
2318 struct tx_cmp *txcmp;
2319
0fcec985 2320 cpr->has_more_work = 0;
340ac85e 2321 cpr->had_work_done = 1;
c0c050c5
MC
2322 while (1) {
2323 int rc;
2324
2325 cons = RING_CMP(raw_cons);
2326 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2327
2328 if (!TX_CMP_VALID(txcmp, raw_cons))
2329 break;
2330
67a95e20
MC
2331 /* The valid test of the entry must be done first before
2332 * reading any further.
2333 */
b67daab0 2334 dma_rmb();
c0c050c5
MC
2335 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2336 tx_pkts++;
2337 /* return full budget so NAPI will complete. */
73f21c65 2338 if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
c0c050c5 2339 rx_pkts = budget;
73f21c65 2340 raw_cons = NEXT_RAW_CMP(raw_cons);
0fcec985
MC
2341 if (budget)
2342 cpr->has_more_work = 1;
73f21c65
MC
2343 break;
2344 }
c0c050c5 2345 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2270bc5d 2346 if (likely(budget))
e44758b7 2347 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2270bc5d 2348 else
e44758b7 2349 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2270bc5d 2350 &event);
c0c050c5
MC
2351 if (likely(rc >= 0))
2352 rx_pkts += rc;
903649e7
MC
2353 /* Increment rx_pkts when rc is -ENOMEM to count towards
2354 * the NAPI budget. Otherwise, we may potentially loop
2355 * here forever if we consistently cannot allocate
2356 * buffers.
2357 */
2edbdb31 2358 else if (rc == -ENOMEM && budget)
903649e7 2359 rx_pkts++;
c0c050c5
MC
2360 else if (rc == -EBUSY) /* partial completion */
2361 break;
c0c050c5
MC
2362 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2363 CMPL_BASE_TYPE_HWRM_DONE) ||
2364 (TX_CMP_TYPE(txcmp) ==
2365 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2366 (TX_CMP_TYPE(txcmp) ==
2367 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2368 bnxt_hwrm_handler(bp, txcmp);
2369 }
2370 raw_cons = NEXT_RAW_CMP(raw_cons);
2371
0fcec985
MC
2372 if (rx_pkts && rx_pkts == budget) {
2373 cpr->has_more_work = 1;
c0c050c5 2374 break;
0fcec985 2375 }
c0c050c5
MC
2376 }
2377
f18c2b77
AG
2378 if (event & BNXT_REDIRECT_EVENT)
2379 xdp_do_flush_map();
2380
38413406
MC
2381 if (event & BNXT_TX_EVENT) {
2382 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
38413406
MC
2383 u16 prod = txr->tx_prod;
2384
2385 /* Sync BD data before updating doorbell */
2386 wmb();
2387
697197e5 2388 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
38413406
MC
2389 }
2390
c0c050c5 2391 cpr->cp_raw_cons = raw_cons;
3675b92f
MC
2392 bnapi->tx_pkts += tx_pkts;
2393 bnapi->events |= event;
2394 return rx_pkts;
2395}
c0c050c5 2396
3675b92f
MC
2397static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2398{
2399 if (bnapi->tx_pkts) {
2400 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2401 bnapi->tx_pkts = 0;
2402 }
c0c050c5 2403
8fbf58e1 2404 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
b6ab4b01 2405 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5 2406
3675b92f 2407 if (bnapi->events & BNXT_AGG_EVENT)
697197e5 2408 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
e8f267b0 2409 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
c0c050c5 2410 }
3675b92f
MC
2411 bnapi->events = 0;
2412}
2413
2414static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2415 int budget)
2416{
2417 struct bnxt_napi *bnapi = cpr->bnapi;
2418 int rx_pkts;
2419
2420 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2421
2422 /* ACK completion ring before freeing tx ring and producing new
2423 * buffers in rx/agg rings to prevent overflowing the completion
2424 * ring.
2425 */
2426 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2427
2428 __bnxt_poll_work_done(bp, bnapi);
c0c050c5
MC
2429 return rx_pkts;
2430}
2431
10bbdaf5
PS
2432static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2433{
2434 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2435 struct bnxt *bp = bnapi->bp;
2436 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2437 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2438 struct tx_cmp *txcmp;
2439 struct rx_cmp_ext *rxcmp1;
2440 u32 cp_cons, tmp_raw_cons;
2441 u32 raw_cons = cpr->cp_raw_cons;
2442 u32 rx_pkts = 0;
4e5dbbda 2443 u8 event = 0;
10bbdaf5
PS
2444
2445 while (1) {
2446 int rc;
2447
2448 cp_cons = RING_CMP(raw_cons);
2449 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2450
2451 if (!TX_CMP_VALID(txcmp, raw_cons))
2452 break;
2453
2454 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2455 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2456 cp_cons = RING_CMP(tmp_raw_cons);
2457 rxcmp1 = (struct rx_cmp_ext *)
2458 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2459
2460 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2461 break;
2462
2463 /* force an error to recycle the buffer */
2464 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2465 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2466
e44758b7 2467 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2edbdb31 2468 if (likely(rc == -EIO) && budget)
10bbdaf5
PS
2469 rx_pkts++;
2470 else if (rc == -EBUSY) /* partial completion */
2471 break;
2472 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2473 CMPL_BASE_TYPE_HWRM_DONE)) {
2474 bnxt_hwrm_handler(bp, txcmp);
2475 } else {
2476 netdev_err(bp->dev,
2477 "Invalid completion received on special ring\n");
2478 }
2479 raw_cons = NEXT_RAW_CMP(raw_cons);
2480
2481 if (rx_pkts == budget)
2482 break;
2483 }
2484
2485 cpr->cp_raw_cons = raw_cons;
697197e5
MC
2486 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2487 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
10bbdaf5 2488
434c975a 2489 if (event & BNXT_AGG_EVENT)
697197e5 2490 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
10bbdaf5
PS
2491
2492 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
6ad20165 2493 napi_complete_done(napi, rx_pkts);
697197e5 2494 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
10bbdaf5
PS
2495 }
2496 return rx_pkts;
2497}
2498
c0c050c5
MC
2499static int bnxt_poll(struct napi_struct *napi, int budget)
2500{
2501 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2502 struct bnxt *bp = bnapi->bp;
2503 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2504 int work_done = 0;
2505
0da65f49
MC
2506 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2507 napi_complete(napi);
2508 return 0;
2509 }
c0c050c5 2510 while (1) {
e44758b7 2511 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
c0c050c5 2512
73f21c65
MC
2513 if (work_done >= budget) {
2514 if (!budget)
697197e5 2515 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
c0c050c5 2516 break;
73f21c65 2517 }
c0c050c5
MC
2518
2519 if (!bnxt_has_work(bp, cpr)) {
e7b95691 2520 if (napi_complete_done(napi, work_done))
697197e5 2521 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
c0c050c5
MC
2522 break;
2523 }
2524 }
6a8788f2 2525 if (bp->flags & BNXT_FLAG_DIM) {
f06d0ca4 2526 struct dim_sample dim_sample = {};
6a8788f2 2527
8960b389
TG
2528 dim_update_sample(cpr->event_ctr,
2529 cpr->rx_packets,
2530 cpr->rx_bytes,
2531 &dim_sample);
6a8788f2
AG
2532 net_dim(&cpr->dim, dim_sample);
2533 }
c0c050c5
MC
2534 return work_done;
2535}
2536
0fcec985
MC
2537static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2538{
2539 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2540 int i, work_done = 0;
2541
2542 for (i = 0; i < 2; i++) {
2543 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2544
2545 if (cpr2) {
2546 work_done += __bnxt_poll_work(bp, cpr2,
2547 budget - work_done);
2548 cpr->has_more_work |= cpr2->has_more_work;
2549 }
2550 }
2551 return work_done;
2552}
2553
2554static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
340ac85e 2555 u64 dbr_type)
0fcec985
MC
2556{
2557 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2558 int i;
2559
2560 for (i = 0; i < 2; i++) {
2561 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2562 struct bnxt_db_info *db;
2563
340ac85e 2564 if (cpr2 && cpr2->had_work_done) {
0fcec985
MC
2565 db = &cpr2->cp_db;
2566 writeq(db->db_key64 | dbr_type |
2567 RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2568 cpr2->had_work_done = 0;
2569 }
2570 }
2571 __bnxt_poll_work_done(bp, bnapi);
2572}
2573
2574static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2575{
2576 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2577 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2578 u32 raw_cons = cpr->cp_raw_cons;
2579 struct bnxt *bp = bnapi->bp;
2580 struct nqe_cn *nqcmp;
2581 int work_done = 0;
2582 u32 cons;
2583
0da65f49
MC
2584 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2585 napi_complete(napi);
2586 return 0;
2587 }
0fcec985
MC
2588 if (cpr->has_more_work) {
2589 cpr->has_more_work = 0;
2590 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
0fcec985
MC
2591 }
2592 while (1) {
2593 cons = RING_CMP(raw_cons);
2594 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2595
2596 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
54a9062f
MC
2597 if (cpr->has_more_work)
2598 break;
2599
340ac85e 2600 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
0fcec985
MC
2601 cpr->cp_raw_cons = raw_cons;
2602 if (napi_complete_done(napi, work_done))
2603 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2604 cpr->cp_raw_cons);
2605 return work_done;
2606 }
2607
2608 /* The valid test of the entry must be done first before
2609 * reading any further.
2610 */
2611 dma_rmb();
2612
2613 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2614 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2615 struct bnxt_cp_ring_info *cpr2;
2616
2617 cpr2 = cpr->cp_ring_arr[idx];
2618 work_done += __bnxt_poll_work(bp, cpr2,
2619 budget - work_done);
54a9062f 2620 cpr->has_more_work |= cpr2->has_more_work;
0fcec985
MC
2621 } else {
2622 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2623 }
2624 raw_cons = NEXT_RAW_CMP(raw_cons);
0fcec985 2625 }
340ac85e 2626 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
389a877a
MC
2627 if (raw_cons != cpr->cp_raw_cons) {
2628 cpr->cp_raw_cons = raw_cons;
2629 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2630 }
0fcec985
MC
2631 return work_done;
2632}
2633
c0c050c5
MC
2634static void bnxt_free_tx_skbs(struct bnxt *bp)
2635{
2636 int i, max_idx;
2637 struct pci_dev *pdev = bp->pdev;
2638
b6ab4b01 2639 if (!bp->tx_ring)
c0c050c5
MC
2640 return;
2641
2642 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2643 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2644 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
2645 int j;
2646
c0c050c5
MC
2647 for (j = 0; j < max_idx;) {
2648 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
f18c2b77 2649 struct sk_buff *skb;
c0c050c5
MC
2650 int k, last;
2651
f18c2b77
AG
2652 if (i < bp->tx_nr_rings_xdp &&
2653 tx_buf->action == XDP_REDIRECT) {
2654 dma_unmap_single(&pdev->dev,
2655 dma_unmap_addr(tx_buf, mapping),
2656 dma_unmap_len(tx_buf, len),
2657 PCI_DMA_TODEVICE);
2658 xdp_return_frame(tx_buf->xdpf);
2659 tx_buf->action = 0;
2660 tx_buf->xdpf = NULL;
2661 j++;
2662 continue;
2663 }
2664
2665 skb = tx_buf->skb;
c0c050c5
MC
2666 if (!skb) {
2667 j++;
2668 continue;
2669 }
2670
2671 tx_buf->skb = NULL;
2672
2673 if (tx_buf->is_push) {
2674 dev_kfree_skb(skb);
2675 j += 2;
2676 continue;
2677 }
2678
2679 dma_unmap_single(&pdev->dev,
2680 dma_unmap_addr(tx_buf, mapping),
2681 skb_headlen(skb),
2682 PCI_DMA_TODEVICE);
2683
2684 last = tx_buf->nr_frags;
2685 j += 2;
d612a579
MC
2686 for (k = 0; k < last; k++, j++) {
2687 int ring_idx = j & bp->tx_ring_mask;
c0c050c5
MC
2688 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2689
d612a579 2690 tx_buf = &txr->tx_buf_ring[ring_idx];
c0c050c5
MC
2691 dma_unmap_page(
2692 &pdev->dev,
2693 dma_unmap_addr(tx_buf, mapping),
2694 skb_frag_size(frag), PCI_DMA_TODEVICE);
2695 }
2696 dev_kfree_skb(skb);
2697 }
2698 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2699 }
2700}
2701
975bc99a 2702static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
c0c050c5 2703{
975bc99a 2704 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
c0c050c5 2705 struct pci_dev *pdev = bp->pdev;
975bc99a
MC
2706 struct bnxt_tpa_idx_map *map;
2707 int i, max_idx, max_agg_idx;
c0c050c5
MC
2708
2709 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2710 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
975bc99a
MC
2711 if (!rxr->rx_tpa)
2712 goto skip_rx_tpa_free;
c0c050c5 2713
975bc99a
MC
2714 for (i = 0; i < bp->max_tpa; i++) {
2715 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2716 u8 *data = tpa_info->data;
c0c050c5 2717
975bc99a
MC
2718 if (!data)
2719 continue;
c0c050c5 2720
975bc99a
MC
2721 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2722 bp->rx_buf_use_size, bp->rx_dir,
2723 DMA_ATTR_WEAK_ORDERING);
c0c050c5 2724
975bc99a 2725 tpa_info->data = NULL;
c0c050c5 2726
975bc99a
MC
2727 kfree(data);
2728 }
c0c050c5 2729
975bc99a
MC
2730skip_rx_tpa_free:
2731 for (i = 0; i < max_idx; i++) {
2732 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2733 dma_addr_t mapping = rx_buf->mapping;
2734 void *data = rx_buf->data;
c0c050c5 2735
975bc99a
MC
2736 if (!data)
2737 continue;
c0c050c5 2738
975bc99a
MC
2739 rx_buf->data = NULL;
2740 if (BNXT_RX_PAGE_MODE(bp)) {
2741 mapping -= bp->rx_dma_offset;
2742 dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
2743 bp->rx_dir,
2744 DMA_ATTR_WEAK_ORDERING);
2745 page_pool_recycle_direct(rxr->page_pool, data);
2746 } else {
2747 dma_unmap_single_attrs(&pdev->dev, mapping,
2748 bp->rx_buf_use_size, bp->rx_dir,
2749 DMA_ATTR_WEAK_ORDERING);
2750 kfree(data);
c0c050c5 2751 }
975bc99a
MC
2752 }
2753 for (i = 0; i < max_agg_idx; i++) {
2754 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2755 struct page *page = rx_agg_buf->page;
c0c050c5 2756
975bc99a
MC
2757 if (!page)
2758 continue;
c0c050c5 2759
975bc99a
MC
2760 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2761 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
2762 DMA_ATTR_WEAK_ORDERING);
c0c050c5 2763
975bc99a
MC
2764 rx_agg_buf->page = NULL;
2765 __clear_bit(i, rxr->rx_agg_bmap);
c0c050c5 2766
975bc99a
MC
2767 __free_page(page);
2768 }
2769 if (rxr->rx_page) {
2770 __free_page(rxr->rx_page);
2771 rxr->rx_page = NULL;
c0c050c5 2772 }
975bc99a
MC
2773 map = rxr->rx_tpa_idx_map;
2774 if (map)
2775 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2776}
2777
2778static void bnxt_free_rx_skbs(struct bnxt *bp)
2779{
2780 int i;
2781
2782 if (!bp->rx_ring)
2783 return;
2784
2785 for (i = 0; i < bp->rx_nr_rings; i++)
2786 bnxt_free_one_rx_ring_skbs(bp, i);
c0c050c5
MC
2787}
2788
2789static void bnxt_free_skbs(struct bnxt *bp)
2790{
2791 bnxt_free_tx_skbs(bp);
2792 bnxt_free_rx_skbs(bp);
2793}
2794
41435c39
MC
2795static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
2796{
2797 u8 init_val = mem_init->init_val;
2798 u16 offset = mem_init->offset;
2799 u8 *p2 = p;
2800 int i;
2801
2802 if (!init_val)
2803 return;
2804 if (offset == BNXT_MEM_INVALID_OFFSET) {
2805 memset(p, init_val, len);
2806 return;
2807 }
2808 for (i = 0; i < len; i += mem_init->size)
2809 *(p2 + i + offset) = init_val;
2810}
2811
6fe19886 2812static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
c0c050c5
MC
2813{
2814 struct pci_dev *pdev = bp->pdev;
2815 int i;
2816
6fe19886
MC
2817 for (i = 0; i < rmem->nr_pages; i++) {
2818 if (!rmem->pg_arr[i])
c0c050c5
MC
2819 continue;
2820
6fe19886
MC
2821 dma_free_coherent(&pdev->dev, rmem->page_size,
2822 rmem->pg_arr[i], rmem->dma_arr[i]);
c0c050c5 2823
6fe19886 2824 rmem->pg_arr[i] = NULL;
c0c050c5 2825 }
6fe19886 2826 if (rmem->pg_tbl) {
4f49b2b8
MC
2827 size_t pg_tbl_size = rmem->nr_pages * 8;
2828
2829 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2830 pg_tbl_size = rmem->page_size;
2831 dma_free_coherent(&pdev->dev, pg_tbl_size,
6fe19886
MC
2832 rmem->pg_tbl, rmem->pg_tbl_map);
2833 rmem->pg_tbl = NULL;
c0c050c5 2834 }
6fe19886
MC
2835 if (rmem->vmem_size && *rmem->vmem) {
2836 vfree(*rmem->vmem);
2837 *rmem->vmem = NULL;
c0c050c5
MC
2838 }
2839}
2840
6fe19886 2841static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
c0c050c5 2842{
c0c050c5 2843 struct pci_dev *pdev = bp->pdev;
66cca20a 2844 u64 valid_bit = 0;
6fe19886 2845 int i;
c0c050c5 2846
66cca20a
MC
2847 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2848 valid_bit = PTU_PTE_VALID;
4f49b2b8
MC
2849 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2850 size_t pg_tbl_size = rmem->nr_pages * 8;
2851
2852 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2853 pg_tbl_size = rmem->page_size;
2854 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
6fe19886 2855 &rmem->pg_tbl_map,
c0c050c5 2856 GFP_KERNEL);
6fe19886 2857 if (!rmem->pg_tbl)
c0c050c5
MC
2858 return -ENOMEM;
2859 }
2860
6fe19886 2861 for (i = 0; i < rmem->nr_pages; i++) {
66cca20a
MC
2862 u64 extra_bits = valid_bit;
2863
6fe19886
MC
2864 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2865 rmem->page_size,
2866 &rmem->dma_arr[i],
c0c050c5 2867 GFP_KERNEL);
6fe19886 2868 if (!rmem->pg_arr[i])
c0c050c5
MC
2869 return -ENOMEM;
2870
41435c39
MC
2871 if (rmem->mem_init)
2872 bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
2873 rmem->page_size);
4f49b2b8 2874 if (rmem->nr_pages > 1 || rmem->depth > 0) {
66cca20a
MC
2875 if (i == rmem->nr_pages - 2 &&
2876 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2877 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2878 else if (i == rmem->nr_pages - 1 &&
2879 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2880 extra_bits |= PTU_PTE_LAST;
2881 rmem->pg_tbl[i] =
2882 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2883 }
c0c050c5
MC
2884 }
2885
6fe19886
MC
2886 if (rmem->vmem_size) {
2887 *rmem->vmem = vzalloc(rmem->vmem_size);
2888 if (!(*rmem->vmem))
c0c050c5
MC
2889 return -ENOMEM;
2890 }
2891 return 0;
2892}
2893
4a228a3a
MC
2894static void bnxt_free_tpa_info(struct bnxt *bp)
2895{
2896 int i;
2897
2898 for (i = 0; i < bp->rx_nr_rings; i++) {
2899 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2900
ec4d8e7c
MC
2901 kfree(rxr->rx_tpa_idx_map);
2902 rxr->rx_tpa_idx_map = NULL;
79632e9b
MC
2903 if (rxr->rx_tpa) {
2904 kfree(rxr->rx_tpa[0].agg_arr);
2905 rxr->rx_tpa[0].agg_arr = NULL;
2906 }
4a228a3a
MC
2907 kfree(rxr->rx_tpa);
2908 rxr->rx_tpa = NULL;
2909 }
2910}
2911
2912static int bnxt_alloc_tpa_info(struct bnxt *bp)
2913{
79632e9b
MC
2914 int i, j, total_aggs = 0;
2915
2916 bp->max_tpa = MAX_TPA;
2917 if (bp->flags & BNXT_FLAG_CHIP_P5) {
2918 if (!bp->max_tpa_v2)
2919 return 0;
2920 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2921 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
2922 }
4a228a3a
MC
2923
2924 for (i = 0; i < bp->rx_nr_rings; i++) {
2925 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
79632e9b 2926 struct rx_agg_cmp *agg;
4a228a3a 2927
79632e9b 2928 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
4a228a3a
MC
2929 GFP_KERNEL);
2930 if (!rxr->rx_tpa)
2931 return -ENOMEM;
79632e9b
MC
2932
2933 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2934 continue;
2935 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
2936 rxr->rx_tpa[0].agg_arr = agg;
2937 if (!agg)
2938 return -ENOMEM;
2939 for (j = 1; j < bp->max_tpa; j++)
2940 rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
ec4d8e7c
MC
2941 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
2942 GFP_KERNEL);
2943 if (!rxr->rx_tpa_idx_map)
2944 return -ENOMEM;
4a228a3a
MC
2945 }
2946 return 0;
2947}
2948
c0c050c5
MC
2949static void bnxt_free_rx_rings(struct bnxt *bp)
2950{
2951 int i;
2952
b6ab4b01 2953 if (!bp->rx_ring)
c0c050c5
MC
2954 return;
2955
4a228a3a 2956 bnxt_free_tpa_info(bp);
c0c050c5 2957 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 2958 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
2959 struct bnxt_ring_struct *ring;
2960
c6d30e83
MC
2961 if (rxr->xdp_prog)
2962 bpf_prog_put(rxr->xdp_prog);
2963
96a8604f
JDB
2964 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2965 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2966
12479f62 2967 page_pool_destroy(rxr->page_pool);
322b87ca
AG
2968 rxr->page_pool = NULL;
2969
c0c050c5
MC
2970 kfree(rxr->rx_agg_bmap);
2971 rxr->rx_agg_bmap = NULL;
2972
2973 ring = &rxr->rx_ring_struct;
6fe19886 2974 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
2975
2976 ring = &rxr->rx_agg_ring_struct;
6fe19886 2977 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
2978 }
2979}
2980
322b87ca
AG
2981static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
2982 struct bnxt_rx_ring_info *rxr)
2983{
2984 struct page_pool_params pp = { 0 };
2985
2986 pp.pool_size = bp->rx_ring_size;
2987 pp.nid = dev_to_node(&bp->pdev->dev);
2988 pp.dev = &bp->pdev->dev;
2989 pp.dma_dir = DMA_BIDIRECTIONAL;
2990
2991 rxr->page_pool = page_pool_create(&pp);
2992 if (IS_ERR(rxr->page_pool)) {
2993 int err = PTR_ERR(rxr->page_pool);
2994
2995 rxr->page_pool = NULL;
2996 return err;
2997 }
2998 return 0;
2999}
3000
c0c050c5
MC
3001static int bnxt_alloc_rx_rings(struct bnxt *bp)
3002{
4a228a3a 3003 int i, rc = 0, agg_rings = 0;
c0c050c5 3004
b6ab4b01
MC
3005 if (!bp->rx_ring)
3006 return -ENOMEM;
3007
c0c050c5
MC
3008 if (bp->flags & BNXT_FLAG_AGG_RINGS)
3009 agg_rings = 1;
3010
c0c050c5 3011 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 3012 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
3013 struct bnxt_ring_struct *ring;
3014
c0c050c5
MC
3015 ring = &rxr->rx_ring_struct;
3016
322b87ca
AG
3017 rc = bnxt_alloc_rx_page_pool(bp, rxr);
3018 if (rc)
3019 return rc;
3020
b02e5a0e 3021 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
12479f62 3022 if (rc < 0)
96a8604f
JDB
3023 return rc;
3024
f18c2b77 3025 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
322b87ca
AG
3026 MEM_TYPE_PAGE_POOL,
3027 rxr->page_pool);
f18c2b77
AG
3028 if (rc) {
3029 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3030 return rc;
3031 }
3032
6fe19886 3033 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
3034 if (rc)
3035 return rc;
3036
2c61d211 3037 ring->grp_idx = i;
c0c050c5
MC
3038 if (agg_rings) {
3039 u16 mem_size;
3040
3041 ring = &rxr->rx_agg_ring_struct;
6fe19886 3042 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
3043 if (rc)
3044 return rc;
3045
9899bb59 3046 ring->grp_idx = i;
c0c050c5
MC
3047 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3048 mem_size = rxr->rx_agg_bmap_size / 8;
3049 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3050 if (!rxr->rx_agg_bmap)
3051 return -ENOMEM;
c0c050c5
MC
3052 }
3053 }
4a228a3a
MC
3054 if (bp->flags & BNXT_FLAG_TPA)
3055 rc = bnxt_alloc_tpa_info(bp);
3056 return rc;
c0c050c5
MC
3057}
3058
3059static void bnxt_free_tx_rings(struct bnxt *bp)
3060{
3061 int i;
3062 struct pci_dev *pdev = bp->pdev;
3063
b6ab4b01 3064 if (!bp->tx_ring)
c0c050c5
MC
3065 return;
3066
3067 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 3068 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
3069 struct bnxt_ring_struct *ring;
3070
c0c050c5
MC
3071 if (txr->tx_push) {
3072 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3073 txr->tx_push, txr->tx_push_mapping);
3074 txr->tx_push = NULL;
3075 }
3076
3077 ring = &txr->tx_ring_struct;
3078
6fe19886 3079 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
3080 }
3081}
3082
3083static int bnxt_alloc_tx_rings(struct bnxt *bp)
3084{
3085 int i, j, rc;
3086 struct pci_dev *pdev = bp->pdev;
3087
3088 bp->tx_push_size = 0;
3089 if (bp->tx_push_thresh) {
3090 int push_size;
3091
3092 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3093 bp->tx_push_thresh);
3094
4419dbe6 3095 if (push_size > 256) {
c0c050c5
MC
3096 push_size = 0;
3097 bp->tx_push_thresh = 0;
3098 }
3099
3100 bp->tx_push_size = push_size;
3101 }
3102
3103 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 3104 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5 3105 struct bnxt_ring_struct *ring;
2e8ef77e 3106 u8 qidx;
c0c050c5 3107
c0c050c5
MC
3108 ring = &txr->tx_ring_struct;
3109
6fe19886 3110 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
3111 if (rc)
3112 return rc;
3113
9899bb59 3114 ring->grp_idx = txr->bnapi->index;
c0c050c5 3115 if (bp->tx_push_size) {
c0c050c5
MC
3116 dma_addr_t mapping;
3117
3118 /* One pre-allocated DMA buffer to backup
3119 * TX push operation
3120 */
3121 txr->tx_push = dma_alloc_coherent(&pdev->dev,
3122 bp->tx_push_size,
3123 &txr->tx_push_mapping,
3124 GFP_KERNEL);
3125
3126 if (!txr->tx_push)
3127 return -ENOMEM;
3128
c0c050c5
MC
3129 mapping = txr->tx_push_mapping +
3130 sizeof(struct tx_push_bd);
4419dbe6 3131 txr->data_mapping = cpu_to_le64(mapping);
c0c050c5 3132 }
2e8ef77e
MC
3133 qidx = bp->tc_to_qidx[j];
3134 ring->queue_id = bp->q_info[qidx].queue_id;
5f449249
MC
3135 if (i < bp->tx_nr_rings_xdp)
3136 continue;
c0c050c5
MC
3137 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3138 j++;
3139 }
3140 return 0;
3141}
3142
3143static void bnxt_free_cp_rings(struct bnxt *bp)
3144{
3145 int i;
3146
3147 if (!bp->bnapi)
3148 return;
3149
3150 for (i = 0; i < bp->cp_nr_rings; i++) {
3151 struct bnxt_napi *bnapi = bp->bnapi[i];
3152 struct bnxt_cp_ring_info *cpr;
3153 struct bnxt_ring_struct *ring;
50e3ab78 3154 int j;
c0c050c5
MC
3155
3156 if (!bnapi)
3157 continue;
3158
3159 cpr = &bnapi->cp_ring;
3160 ring = &cpr->cp_ring_struct;
3161
6fe19886 3162 bnxt_free_ring(bp, &ring->ring_mem);
50e3ab78
MC
3163
3164 for (j = 0; j < 2; j++) {
3165 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3166
3167 if (cpr2) {
3168 ring = &cpr2->cp_ring_struct;
3169 bnxt_free_ring(bp, &ring->ring_mem);
3170 kfree(cpr2);
3171 cpr->cp_ring_arr[j] = NULL;
3172 }
3173 }
c0c050c5
MC
3174 }
3175}
3176
50e3ab78
MC
3177static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3178{
3179 struct bnxt_ring_mem_info *rmem;
3180 struct bnxt_ring_struct *ring;
3181 struct bnxt_cp_ring_info *cpr;
3182 int rc;
3183
3184 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3185 if (!cpr)
3186 return NULL;
3187
3188 ring = &cpr->cp_ring_struct;
3189 rmem = &ring->ring_mem;
3190 rmem->nr_pages = bp->cp_nr_pages;
3191 rmem->page_size = HW_CMPD_RING_SIZE;
3192 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3193 rmem->dma_arr = cpr->cp_desc_mapping;
3194 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3195 rc = bnxt_alloc_ring(bp, rmem);
3196 if (rc) {
3197 bnxt_free_ring(bp, rmem);
3198 kfree(cpr);
3199 cpr = NULL;
3200 }
3201 return cpr;
3202}
3203
c0c050c5
MC
3204static int bnxt_alloc_cp_rings(struct bnxt *bp)
3205{
50e3ab78 3206 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
e5811b8c 3207 int i, rc, ulp_base_vec, ulp_msix;
c0c050c5 3208
e5811b8c
MC
3209 ulp_msix = bnxt_get_ulp_msix_num(bp);
3210 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
c0c050c5
MC
3211 for (i = 0; i < bp->cp_nr_rings; i++) {
3212 struct bnxt_napi *bnapi = bp->bnapi[i];
3213 struct bnxt_cp_ring_info *cpr;
3214 struct bnxt_ring_struct *ring;
3215
3216 if (!bnapi)
3217 continue;
3218
3219 cpr = &bnapi->cp_ring;
50e3ab78 3220 cpr->bnapi = bnapi;
c0c050c5
MC
3221 ring = &cpr->cp_ring_struct;
3222
6fe19886 3223 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
3224 if (rc)
3225 return rc;
e5811b8c
MC
3226
3227 if (ulp_msix && i >= ulp_base_vec)
3228 ring->map_idx = i + ulp_msix;
3229 else
3230 ring->map_idx = i;
50e3ab78
MC
3231
3232 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3233 continue;
3234
3235 if (i < bp->rx_nr_rings) {
3236 struct bnxt_cp_ring_info *cpr2 =
3237 bnxt_alloc_cp_sub_ring(bp);
3238
3239 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3240 if (!cpr2)
3241 return -ENOMEM;
3242 cpr2->bnapi = bnapi;
3243 }
3244 if ((sh && i < bp->tx_nr_rings) ||
3245 (!sh && i >= bp->rx_nr_rings)) {
3246 struct bnxt_cp_ring_info *cpr2 =
3247 bnxt_alloc_cp_sub_ring(bp);
3248
3249 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3250 if (!cpr2)
3251 return -ENOMEM;
3252 cpr2->bnapi = bnapi;
3253 }
c0c050c5
MC
3254 }
3255 return 0;
3256}
3257
3258static void bnxt_init_ring_struct(struct bnxt *bp)
3259{
3260 int i;
3261
3262 for (i = 0; i < bp->cp_nr_rings; i++) {
3263 struct bnxt_napi *bnapi = bp->bnapi[i];
6fe19886 3264 struct bnxt_ring_mem_info *rmem;
c0c050c5
MC
3265 struct bnxt_cp_ring_info *cpr;
3266 struct bnxt_rx_ring_info *rxr;
3267 struct bnxt_tx_ring_info *txr;
3268 struct bnxt_ring_struct *ring;
3269
3270 if (!bnapi)
3271 continue;
3272
3273 cpr = &bnapi->cp_ring;
3274 ring = &cpr->cp_ring_struct;
6fe19886
MC
3275 rmem = &ring->ring_mem;
3276 rmem->nr_pages = bp->cp_nr_pages;
3277 rmem->page_size = HW_CMPD_RING_SIZE;
3278 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3279 rmem->dma_arr = cpr->cp_desc_mapping;
3280 rmem->vmem_size = 0;
c0c050c5 3281
b6ab4b01 3282 rxr = bnapi->rx_ring;
3b2b7d9d
MC
3283 if (!rxr)
3284 goto skip_rx;
3285
c0c050c5 3286 ring = &rxr->rx_ring_struct;
6fe19886
MC
3287 rmem = &ring->ring_mem;
3288 rmem->nr_pages = bp->rx_nr_pages;
3289 rmem->page_size = HW_RXBD_RING_SIZE;
3290 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3291 rmem->dma_arr = rxr->rx_desc_mapping;
3292 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3293 rmem->vmem = (void **)&rxr->rx_buf_ring;
c0c050c5
MC
3294
3295 ring = &rxr->rx_agg_ring_struct;
6fe19886
MC
3296 rmem = &ring->ring_mem;
3297 rmem->nr_pages = bp->rx_agg_nr_pages;
3298 rmem->page_size = HW_RXBD_RING_SIZE;
3299 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3300 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3301 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3302 rmem->vmem = (void **)&rxr->rx_agg_ring;
c0c050c5 3303
3b2b7d9d 3304skip_rx:
b6ab4b01 3305 txr = bnapi->tx_ring;
3b2b7d9d
MC
3306 if (!txr)
3307 continue;
3308
c0c050c5 3309 ring = &txr->tx_ring_struct;
6fe19886
MC
3310 rmem = &ring->ring_mem;
3311 rmem->nr_pages = bp->tx_nr_pages;
3312 rmem->page_size = HW_RXBD_RING_SIZE;
3313 rmem->pg_arr = (void **)txr->tx_desc_ring;
3314 rmem->dma_arr = txr->tx_desc_mapping;
3315 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3316 rmem->vmem = (void **)&txr->tx_buf_ring;
c0c050c5
MC
3317 }
3318}
3319
3320static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3321{
3322 int i;
3323 u32 prod;
3324 struct rx_bd **rx_buf_ring;
3325
6fe19886
MC
3326 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3327 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
c0c050c5
MC
3328 int j;
3329 struct rx_bd *rxbd;
3330
3331 rxbd = rx_buf_ring[i];
3332 if (!rxbd)
3333 continue;
3334
3335 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3336 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3337 rxbd->rx_bd_opaque = prod;
3338 }
3339 }
3340}
3341
7737d325 3342static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
c0c050c5 3343{
7737d325 3344 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
c0c050c5 3345 struct net_device *dev = bp->dev;
7737d325 3346 u32 prod;
c0c050c5
MC
3347 int i;
3348
c0c050c5
MC
3349 prod = rxr->rx_prod;
3350 for (i = 0; i < bp->rx_ring_size; i++) {
7737d325 3351 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
c0c050c5
MC
3352 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3353 ring_nr, i, bp->rx_ring_size);
3354 break;
3355 }
3356 prod = NEXT_RX(prod);
3357 }
3358 rxr->rx_prod = prod;
edd0c2cc 3359
c0c050c5
MC
3360 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3361 return 0;
3362
c0c050c5
MC
3363 prod = rxr->rx_agg_prod;
3364 for (i = 0; i < bp->rx_agg_ring_size; i++) {
7737d325 3365 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
c0c050c5
MC
3366 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3367 ring_nr, i, bp->rx_ring_size);
3368 break;
3369 }
3370 prod = NEXT_RX_AGG(prod);
3371 }
3372 rxr->rx_agg_prod = prod;
c0c050c5 3373
7737d325
MC
3374 if (rxr->rx_tpa) {
3375 dma_addr_t mapping;
3376 u8 *data;
c0c050c5 3377
7737d325
MC
3378 for (i = 0; i < bp->max_tpa; i++) {
3379 data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
3380 if (!data)
3381 return -ENOMEM;
c0c050c5 3382
7737d325
MC
3383 rxr->rx_tpa[i].data = data;
3384 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3385 rxr->rx_tpa[i].mapping = mapping;
c0c050c5
MC
3386 }
3387 }
c0c050c5
MC
3388 return 0;
3389}
3390
7737d325
MC
3391static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3392{
3393 struct bnxt_rx_ring_info *rxr;
3394 struct bnxt_ring_struct *ring;
3395 u32 type;
3396
3397 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3398 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3399
3400 if (NET_IP_ALIGN == 2)
3401 type |= RX_BD_FLAGS_SOP;
3402
3403 rxr = &bp->rx_ring[ring_nr];
3404 ring = &rxr->rx_ring_struct;
3405 bnxt_init_rxbd_pages(ring, type);
3406
3407 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3408 bpf_prog_add(bp->xdp_prog, 1);
3409 rxr->xdp_prog = bp->xdp_prog;
3410 }
3411 ring->fw_ring_id = INVALID_HW_RING_ID;
3412
3413 ring = &rxr->rx_agg_ring_struct;
3414 ring->fw_ring_id = INVALID_HW_RING_ID;
3415
3416 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3417 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3418 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3419
3420 bnxt_init_rxbd_pages(ring, type);
3421 }
3422
3423 return bnxt_alloc_one_rx_ring(bp, ring_nr);
3424}
3425
2247925f
SP
3426static void bnxt_init_cp_rings(struct bnxt *bp)
3427{
3e08b184 3428 int i, j;
2247925f
SP
3429
3430 for (i = 0; i < bp->cp_nr_rings; i++) {
3431 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3432 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3433
3434 ring->fw_ring_id = INVALID_HW_RING_ID;
6a8788f2
AG
3435 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3436 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3e08b184
MC
3437 for (j = 0; j < 2; j++) {
3438 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3439
3440 if (!cpr2)
3441 continue;
3442
3443 ring = &cpr2->cp_ring_struct;
3444 ring->fw_ring_id = INVALID_HW_RING_ID;
3445 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3446 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3447 }
2247925f
SP
3448 }
3449}
3450
c0c050c5
MC
3451static int bnxt_init_rx_rings(struct bnxt *bp)
3452{
3453 int i, rc = 0;
3454
c61fb99c 3455 if (BNXT_RX_PAGE_MODE(bp)) {
c6d30e83
MC
3456 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3457 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
c61fb99c
MC
3458 } else {
3459 bp->rx_offset = BNXT_RX_OFFSET;
3460 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3461 }
b3dba77c 3462
c0c050c5
MC
3463 for (i = 0; i < bp->rx_nr_rings; i++) {
3464 rc = bnxt_init_one_rx_ring(bp, i);
3465 if (rc)
3466 break;
3467 }
3468
3469 return rc;
3470}
3471
3472static int bnxt_init_tx_rings(struct bnxt *bp)
3473{
3474 u16 i;
3475
3476 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3477 MAX_SKB_FRAGS + 1);
3478
3479 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 3480 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
3481 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3482
3483 ring->fw_ring_id = INVALID_HW_RING_ID;
3484 }
3485
3486 return 0;
3487}
3488
3489static void bnxt_free_ring_grps(struct bnxt *bp)
3490{
3491 kfree(bp->grp_info);
3492 bp->grp_info = NULL;
3493}
3494
3495static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3496{
3497 int i;
3498
3499 if (irq_re_init) {
3500 bp->grp_info = kcalloc(bp->cp_nr_rings,
3501 sizeof(struct bnxt_ring_grp_info),
3502 GFP_KERNEL);
3503 if (!bp->grp_info)
3504 return -ENOMEM;
3505 }
3506 for (i = 0; i < bp->cp_nr_rings; i++) {
3507 if (irq_re_init)
3508 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3509 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3510 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3511 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3512 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3513 }
3514 return 0;
3515}
3516
3517static void bnxt_free_vnics(struct bnxt *bp)
3518{
3519 kfree(bp->vnic_info);
3520 bp->vnic_info = NULL;
3521 bp->nr_vnics = 0;
3522}
3523
3524static int bnxt_alloc_vnics(struct bnxt *bp)
3525{
3526 int num_vnics = 1;
3527
3528#ifdef CONFIG_RFS_ACCEL
9b3d15e6 3529 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
c0c050c5
MC
3530 num_vnics += bp->rx_nr_rings;
3531#endif
3532
dc52c6c7
PS
3533 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3534 num_vnics++;
3535
c0c050c5
MC
3536 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3537 GFP_KERNEL);
3538 if (!bp->vnic_info)
3539 return -ENOMEM;
3540
3541 bp->nr_vnics = num_vnics;
3542 return 0;
3543}
3544
3545static void bnxt_init_vnics(struct bnxt *bp)
3546{
3547 int i;
3548
3549 for (i = 0; i < bp->nr_vnics; i++) {
3550 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
44c6f72a 3551 int j;
c0c050c5
MC
3552
3553 vnic->fw_vnic_id = INVALID_HW_RING_ID;
44c6f72a
MC
3554 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3555 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3556
c0c050c5
MC
3557 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3558
3559 if (bp->vnic_info[i].rss_hash_key) {
3560 if (i == 0)
3561 prandom_bytes(vnic->rss_hash_key,
3562 HW_HASH_KEY_SIZE);
3563 else
3564 memcpy(vnic->rss_hash_key,
3565 bp->vnic_info[0].rss_hash_key,
3566 HW_HASH_KEY_SIZE);
3567 }
3568 }
3569}
3570
3571static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3572{
3573 int pages;
3574
3575 pages = ring_size / desc_per_pg;
3576
3577 if (!pages)
3578 return 1;
3579
3580 pages++;
3581
3582 while (pages & (pages - 1))
3583 pages++;
3584
3585 return pages;
3586}
3587
c6d30e83 3588void bnxt_set_tpa_flags(struct bnxt *bp)
c0c050c5
MC
3589{
3590 bp->flags &= ~BNXT_FLAG_TPA;
341138c3
MC
3591 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3592 return;
c0c050c5
MC
3593 if (bp->dev->features & NETIF_F_LRO)
3594 bp->flags |= BNXT_FLAG_LRO;
1054aee8 3595 else if (bp->dev->features & NETIF_F_GRO_HW)
c0c050c5
MC
3596 bp->flags |= BNXT_FLAG_GRO;
3597}
3598
3599/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3600 * be set on entry.
3601 */
3602void bnxt_set_ring_params(struct bnxt *bp)
3603{
27640ce6 3604 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
c0c050c5
MC
3605 u32 agg_factor = 0, agg_ring_size = 0;
3606
3607 /* 8 for CRC and VLAN */
3608 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3609
3610 rx_space = rx_size + NET_SKB_PAD +
3611 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3612
3613 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3614 ring_size = bp->rx_ring_size;
3615 bp->rx_agg_ring_size = 0;
3616 bp->rx_agg_nr_pages = 0;
3617
3618 if (bp->flags & BNXT_FLAG_TPA)
2839f28b 3619 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
c0c050c5
MC
3620
3621 bp->flags &= ~BNXT_FLAG_JUMBO;
bdbd1eb5 3622 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
c0c050c5
MC
3623 u32 jumbo_factor;
3624
3625 bp->flags |= BNXT_FLAG_JUMBO;
3626 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3627 if (jumbo_factor > agg_factor)
3628 agg_factor = jumbo_factor;
3629 }
3630 agg_ring_size = ring_size * agg_factor;
3631
3632 if (agg_ring_size) {
3633 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3634 RX_DESC_CNT);
3635 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3636 u32 tmp = agg_ring_size;
3637
3638 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3639 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3640 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3641 tmp, agg_ring_size);
3642 }
3643 bp->rx_agg_ring_size = agg_ring_size;
3644 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3645 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3646 rx_space = rx_size + NET_SKB_PAD +
3647 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3648 }
3649
3650 bp->rx_buf_use_size = rx_size;
3651 bp->rx_buf_size = rx_space;
3652
3653 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3654 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3655
3656 ring_size = bp->tx_ring_size;
3657 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3658 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3659
27640ce6
MC
3660 max_rx_cmpl = bp->rx_ring_size;
3661 /* MAX TPA needs to be added because TPA_START completions are
3662 * immediately recycled, so the TPA completions are not bound by
3663 * the RX ring size.
3664 */
3665 if (bp->flags & BNXT_FLAG_TPA)
3666 max_rx_cmpl += bp->max_tpa;
3667 /* RX and TPA completions are 32-byte, all others are 16-byte */
3668 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
c0c050c5
MC
3669 bp->cp_ring_size = ring_size;
3670
3671 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3672 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3673 bp->cp_nr_pages = MAX_CP_PAGES;
3674 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3675 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3676 ring_size, bp->cp_ring_size);
3677 }
3678 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3679 bp->cp_ring_mask = bp->cp_bit - 1;
3680}
3681
96a8604f
JDB
3682/* Changing allocation mode of RX rings.
3683 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3684 */
c61fb99c 3685int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
6bb19474 3686{
c61fb99c
MC
3687 if (page_mode) {
3688 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3689 return -EOPNOTSUPP;
7eb9bb3a
MC
3690 bp->dev->max_mtu =
3691 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
c61fb99c
MC
3692 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3693 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
c61fb99c
MC
3694 bp->rx_dir = DMA_BIDIRECTIONAL;
3695 bp->rx_skb_func = bnxt_rx_page_skb;
1054aee8
MC
3696 /* Disable LRO or GRO_HW */
3697 netdev_update_features(bp->dev);
c61fb99c 3698 } else {
7eb9bb3a 3699 bp->dev->max_mtu = bp->max_mtu;
c61fb99c
MC
3700 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3701 bp->rx_dir = DMA_FROM_DEVICE;
3702 bp->rx_skb_func = bnxt_rx_skb;
3703 }
6bb19474
MC
3704 return 0;
3705}
3706
c0c050c5
MC
3707static void bnxt_free_vnic_attributes(struct bnxt *bp)
3708{
3709 int i;
3710 struct bnxt_vnic_info *vnic;
3711 struct pci_dev *pdev = bp->pdev;
3712
3713 if (!bp->vnic_info)
3714 return;
3715
3716 for (i = 0; i < bp->nr_vnics; i++) {
3717 vnic = &bp->vnic_info[i];
3718
3719 kfree(vnic->fw_grp_ids);
3720 vnic->fw_grp_ids = NULL;
3721
3722 kfree(vnic->uc_list);
3723 vnic->uc_list = NULL;
3724
3725 if (vnic->mc_list) {
3726 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3727 vnic->mc_list, vnic->mc_list_mapping);
3728 vnic->mc_list = NULL;
3729 }
3730
3731 if (vnic->rss_table) {
34370d24 3732 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
c0c050c5
MC
3733 vnic->rss_table,
3734 vnic->rss_table_dma_addr);
3735 vnic->rss_table = NULL;
3736 }
3737
3738 vnic->rss_hash_key = NULL;
3739 vnic->flags = 0;
3740 }
3741}
3742
3743static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3744{
3745 int i, rc = 0, size;
3746 struct bnxt_vnic_info *vnic;
3747 struct pci_dev *pdev = bp->pdev;
3748 int max_rings;
3749
3750 for (i = 0; i < bp->nr_vnics; i++) {
3751 vnic = &bp->vnic_info[i];
3752
3753 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3754 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3755
3756 if (mem_size > 0) {
3757 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3758 if (!vnic->uc_list) {
3759 rc = -ENOMEM;
3760 goto out;
3761 }
3762 }
3763 }
3764
3765 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3766 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3767 vnic->mc_list =
3768 dma_alloc_coherent(&pdev->dev,
3769 vnic->mc_list_size,
3770 &vnic->mc_list_mapping,
3771 GFP_KERNEL);
3772 if (!vnic->mc_list) {
3773 rc = -ENOMEM;
3774 goto out;
3775 }
3776 }
3777
44c6f72a
MC
3778 if (bp->flags & BNXT_FLAG_CHIP_P5)
3779 goto vnic_skip_grps;
3780
c0c050c5
MC
3781 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3782 max_rings = bp->rx_nr_rings;
3783 else
3784 max_rings = 1;
3785
3786 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3787 if (!vnic->fw_grp_ids) {
3788 rc = -ENOMEM;
3789 goto out;
3790 }
44c6f72a 3791vnic_skip_grps:
ae10ae74
MC
3792 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3793 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3794 continue;
3795
c0c050c5 3796 /* Allocate rss table and hash key */
34370d24
MC
3797 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3798 if (bp->flags & BNXT_FLAG_CHIP_P5)
3799 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
3800
3801 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
3802 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
3803 vnic->rss_table_size,
c0c050c5
MC
3804 &vnic->rss_table_dma_addr,
3805 GFP_KERNEL);
3806 if (!vnic->rss_table) {
3807 rc = -ENOMEM;
3808 goto out;
3809 }
3810
c0c050c5
MC
3811 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3812 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3813 }
3814 return 0;
3815
3816out:
3817 return rc;
3818}
3819
3820static void bnxt_free_hwrm_resources(struct bnxt *bp)
3821{
3822 struct pci_dev *pdev = bp->pdev;
3823
a2bf74f4
VD
3824 if (bp->hwrm_cmd_resp_addr) {
3825 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3826 bp->hwrm_cmd_resp_dma_addr);
3827 bp->hwrm_cmd_resp_addr = NULL;
3828 }
760b6d33
VD
3829
3830 if (bp->hwrm_cmd_kong_resp_addr) {
3831 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3832 bp->hwrm_cmd_kong_resp_addr,
3833 bp->hwrm_cmd_kong_resp_dma_addr);
3834 bp->hwrm_cmd_kong_resp_addr = NULL;
3835 }
3836}
3837
3838static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3839{
3840 struct pci_dev *pdev = bp->pdev;
3841
ba642ab7
MC
3842 if (bp->hwrm_cmd_kong_resp_addr)
3843 return 0;
3844
760b6d33
VD
3845 bp->hwrm_cmd_kong_resp_addr =
3846 dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3847 &bp->hwrm_cmd_kong_resp_dma_addr,
3848 GFP_KERNEL);
3849 if (!bp->hwrm_cmd_kong_resp_addr)
3850 return -ENOMEM;
3851
3852 return 0;
c0c050c5
MC
3853}
3854
3855static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3856{
3857 struct pci_dev *pdev = bp->pdev;
3858
3859 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3860 &bp->hwrm_cmd_resp_dma_addr,
3861 GFP_KERNEL);
3862 if (!bp->hwrm_cmd_resp_addr)
3863 return -ENOMEM;
c0c050c5
MC
3864
3865 return 0;
3866}
3867
e605db80
DK
3868static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3869{
3870 if (bp->hwrm_short_cmd_req_addr) {
3871 struct pci_dev *pdev = bp->pdev;
3872
1dfddc41 3873 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
e605db80
DK
3874 bp->hwrm_short_cmd_req_addr,
3875 bp->hwrm_short_cmd_req_dma_addr);
3876 bp->hwrm_short_cmd_req_addr = NULL;
3877 }
3878}
3879
3880static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3881{
3882 struct pci_dev *pdev = bp->pdev;
3883
ba642ab7
MC
3884 if (bp->hwrm_short_cmd_req_addr)
3885 return 0;
3886
e605db80 3887 bp->hwrm_short_cmd_req_addr =
1dfddc41 3888 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
e605db80
DK
3889 &bp->hwrm_short_cmd_req_dma_addr,
3890 GFP_KERNEL);
3891 if (!bp->hwrm_short_cmd_req_addr)
3892 return -ENOMEM;
3893
3894 return 0;
3895}
3896
177a6cde 3897static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
c0c050c5 3898{
a37120b2
MC
3899 kfree(stats->hw_masks);
3900 stats->hw_masks = NULL;
3901 kfree(stats->sw_stats);
3902 stats->sw_stats = NULL;
177a6cde
MC
3903 if (stats->hw_stats) {
3904 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
3905 stats->hw_stats_map);
3906 stats->hw_stats = NULL;
3907 }
3908}
c0c050c5 3909
a37120b2
MC
3910static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
3911 bool alloc_masks)
177a6cde
MC
3912{
3913 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
3914 &stats->hw_stats_map, GFP_KERNEL);
3915 if (!stats->hw_stats)
3916 return -ENOMEM;
00db3cba 3917
a37120b2
MC
3918 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
3919 if (!stats->sw_stats)
3920 goto stats_mem_err;
3921
3922 if (alloc_masks) {
3923 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
3924 if (!stats->hw_masks)
3925 goto stats_mem_err;
3926 }
177a6cde 3927 return 0;
a37120b2
MC
3928
3929stats_mem_err:
3930 bnxt_free_stats_mem(bp, stats);
3931 return -ENOMEM;
177a6cde 3932}
00db3cba 3933
d752d053
MC
3934static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
3935{
3936 int i;
3937
3938 for (i = 0; i < count; i++)
3939 mask_arr[i] = mask;
3940}
3941
3942static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
3943{
3944 int i;
3945
3946 for (i = 0; i < count; i++)
3947 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
3948}
3949
3950static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
3951 struct bnxt_stats_mem *stats)
3952{
3953 struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
3954 struct hwrm_func_qstats_ext_input req = {0};
3955 __le64 *hw_masks;
3956 int rc;
3957
3958 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
3959 !(bp->flags & BNXT_FLAG_CHIP_P5))
3960 return -EOPNOTSUPP;
3961
3962 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1);
d2b42d01 3963 req.fid = cpu_to_le16(0xffff);
d752d053
MC
3964 req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
3965 mutex_lock(&bp->hwrm_cmd_lock);
3966 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3967 if (rc)
3968 goto qstat_exit;
3969
3970 hw_masks = &resp->rx_ucast_pkts;
3971 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
3972
3973qstat_exit:
3974 mutex_unlock(&bp->hwrm_cmd_lock);
3975 return rc;
3976}
3977
531d1d26
MC
3978static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
3979static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
3980
d752d053
MC
3981static void bnxt_init_stats(struct bnxt *bp)
3982{
3983 struct bnxt_napi *bnapi = bp->bnapi[0];
3984 struct bnxt_cp_ring_info *cpr;
3985 struct bnxt_stats_mem *stats;
531d1d26
MC
3986 __le64 *rx_stats, *tx_stats;
3987 int rc, rx_count, tx_count;
3988 u64 *rx_masks, *tx_masks;
d752d053 3989 u64 mask;
531d1d26 3990 u8 flags;
d752d053
MC
3991
3992 cpr = &bnapi->cp_ring;
3993 stats = &cpr->stats;
3994 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
3995 if (rc) {
3996 if (bp->flags & BNXT_FLAG_CHIP_P5)
3997 mask = (1ULL << 48) - 1;
3998 else
3999 mask = -1ULL;
4000 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
4001 }
531d1d26
MC
4002 if (bp->flags & BNXT_FLAG_PORT_STATS) {
4003 stats = &bp->port_stats;
4004 rx_stats = stats->hw_stats;
4005 rx_masks = stats->hw_masks;
4006 rx_count = sizeof(struct rx_port_stats) / 8;
4007 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4008 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4009 tx_count = sizeof(struct tx_port_stats) / 8;
4010
4011 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
4012 rc = bnxt_hwrm_port_qstats(bp, flags);
4013 if (rc) {
4014 mask = (1ULL << 40) - 1;
4015
4016 bnxt_fill_masks(rx_masks, mask, rx_count);
4017 bnxt_fill_masks(tx_masks, mask, tx_count);
4018 } else {
4019 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4020 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
4021 bnxt_hwrm_port_qstats(bp, 0);
4022 }
4023 }
4024 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4025 stats = &bp->rx_port_stats_ext;
4026 rx_stats = stats->hw_stats;
4027 rx_masks = stats->hw_masks;
4028 rx_count = sizeof(struct rx_port_stats_ext) / 8;
4029 stats = &bp->tx_port_stats_ext;
4030 tx_stats = stats->hw_stats;
4031 tx_masks = stats->hw_masks;
4032 tx_count = sizeof(struct tx_port_stats_ext) / 8;
4033
c07fa08f 4034 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
531d1d26
MC
4035 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4036 if (rc) {
4037 mask = (1ULL << 40) - 1;
4038
4039 bnxt_fill_masks(rx_masks, mask, rx_count);
4040 if (tx_stats)
4041 bnxt_fill_masks(tx_masks, mask, tx_count);
4042 } else {
4043 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4044 if (tx_stats)
4045 bnxt_copy_hw_masks(tx_masks, tx_stats,
4046 tx_count);
4047 bnxt_hwrm_port_qstats_ext(bp, 0);
4048 }
4049 }
d752d053
MC
4050}
4051
177a6cde
MC
4052static void bnxt_free_port_stats(struct bnxt *bp)
4053{
4054 bp->flags &= ~BNXT_FLAG_PORT_STATS;
4055 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
36e53349 4056
177a6cde
MC
4057 bnxt_free_stats_mem(bp, &bp->port_stats);
4058 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4059 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
fd3ab1c7
MC
4060}
4061
4062static void bnxt_free_ring_stats(struct bnxt *bp)
4063{
177a6cde 4064 int i;
3bdf56c4 4065
c0c050c5
MC
4066 if (!bp->bnapi)
4067 return;
4068
c0c050c5
MC
4069 for (i = 0; i < bp->cp_nr_rings; i++) {
4070 struct bnxt_napi *bnapi = bp->bnapi[i];
4071 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4072
177a6cde 4073 bnxt_free_stats_mem(bp, &cpr->stats);
c0c050c5
MC
4074 }
4075}
4076
4077static int bnxt_alloc_stats(struct bnxt *bp)
4078{
4079 u32 size, i;
177a6cde 4080 int rc;
c0c050c5 4081
4e748506 4082 size = bp->hw_ring_stats_size;
c0c050c5
MC
4083
4084 for (i = 0; i < bp->cp_nr_rings; i++) {
4085 struct bnxt_napi *bnapi = bp->bnapi[i];
4086 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4087
177a6cde 4088 cpr->stats.len = size;
a37120b2 4089 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
177a6cde
MC
4090 if (rc)
4091 return rc;
c0c050c5
MC
4092
4093 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4094 }
3bdf56c4 4095
a220eabc
VV
4096 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4097 return 0;
fd3ab1c7 4098
177a6cde 4099 if (bp->port_stats.hw_stats)
a220eabc 4100 goto alloc_ext_stats;
3bdf56c4 4101
177a6cde 4102 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
a37120b2 4103 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
177a6cde
MC
4104 if (rc)
4105 return rc;
3bdf56c4 4106
a220eabc 4107 bp->flags |= BNXT_FLAG_PORT_STATS;
00db3cba 4108
fd3ab1c7 4109alloc_ext_stats:
a220eabc
VV
4110 /* Display extended statistics only if FW supports it */
4111 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
6154532f 4112 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
00db3cba
VV
4113 return 0;
4114
177a6cde 4115 if (bp->rx_port_stats_ext.hw_stats)
a220eabc 4116 goto alloc_tx_ext_stats;
fd3ab1c7 4117
177a6cde 4118 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
a37120b2 4119 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
177a6cde
MC
4120 /* Extended stats are optional */
4121 if (rc)
a220eabc 4122 return 0;
00db3cba 4123
fd3ab1c7 4124alloc_tx_ext_stats:
177a6cde 4125 if (bp->tx_port_stats_ext.hw_stats)
dfe64de9 4126 return 0;
fd3ab1c7 4127
6154532f
VV
4128 if (bp->hwrm_spec_code >= 0x10902 ||
4129 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
177a6cde 4130 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
a37120b2 4131 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
177a6cde
MC
4132 /* Extended stats are optional */
4133 if (rc)
4134 return 0;
3bdf56c4 4135 }
a220eabc 4136 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
c0c050c5
MC
4137 return 0;
4138}
4139
4140static void bnxt_clear_ring_indices(struct bnxt *bp)
4141{
4142 int i;
4143
4144 if (!bp->bnapi)
4145 return;
4146
4147 for (i = 0; i < bp->cp_nr_rings; i++) {
4148 struct bnxt_napi *bnapi = bp->bnapi[i];
4149 struct bnxt_cp_ring_info *cpr;
4150 struct bnxt_rx_ring_info *rxr;
4151 struct bnxt_tx_ring_info *txr;
4152
4153 if (!bnapi)
4154 continue;
4155
4156 cpr = &bnapi->cp_ring;
4157 cpr->cp_raw_cons = 0;
4158
b6ab4b01 4159 txr = bnapi->tx_ring;
3b2b7d9d
MC
4160 if (txr) {
4161 txr->tx_prod = 0;
4162 txr->tx_cons = 0;
4163 }
c0c050c5 4164
b6ab4b01 4165 rxr = bnapi->rx_ring;
3b2b7d9d
MC
4166 if (rxr) {
4167 rxr->rx_prod = 0;
4168 rxr->rx_agg_prod = 0;
4169 rxr->rx_sw_agg_prod = 0;
376a5b86 4170 rxr->rx_next_cons = 0;
3b2b7d9d 4171 }
c0c050c5
MC
4172 }
4173}
4174
4175static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4176{
4177#ifdef CONFIG_RFS_ACCEL
4178 int i;
4179
4180 /* Under rtnl_lock and all our NAPIs have been disabled. It's
4181 * safe to delete the hash table.
4182 */
4183 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4184 struct hlist_head *head;
4185 struct hlist_node *tmp;
4186 struct bnxt_ntuple_filter *fltr;
4187
4188 head = &bp->ntp_fltr_hash_tbl[i];
4189 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4190 hlist_del(&fltr->hash);
4191 kfree(fltr);
4192 }
4193 }
4194 if (irq_reinit) {
4195 kfree(bp->ntp_fltr_bmap);
4196 bp->ntp_fltr_bmap = NULL;
4197 }
4198 bp->ntp_fltr_count = 0;
4199#endif
4200}
4201
4202static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4203{
4204#ifdef CONFIG_RFS_ACCEL
4205 int i, rc = 0;
4206
4207 if (!(bp->flags & BNXT_FLAG_RFS))
4208 return 0;
4209
4210 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4211 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4212
4213 bp->ntp_fltr_count = 0;
ac45bd93
DC
4214 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
4215 sizeof(long),
c0c050c5
MC
4216 GFP_KERNEL);
4217
4218 if (!bp->ntp_fltr_bmap)
4219 rc = -ENOMEM;
4220
4221 return rc;
4222#else
4223 return 0;
4224#endif
4225}
4226
4227static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4228{
4229 bnxt_free_vnic_attributes(bp);
4230 bnxt_free_tx_rings(bp);
4231 bnxt_free_rx_rings(bp);
4232 bnxt_free_cp_rings(bp);
4233 bnxt_free_ntp_fltrs(bp, irq_re_init);
4234 if (irq_re_init) {
fd3ab1c7 4235 bnxt_free_ring_stats(bp);
b0d28207 4236 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
eba93de6 4237 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
fea6b333 4238 bnxt_free_port_stats(bp);
c0c050c5
MC
4239 bnxt_free_ring_grps(bp);
4240 bnxt_free_vnics(bp);
a960dec9
MC
4241 kfree(bp->tx_ring_map);
4242 bp->tx_ring_map = NULL;
b6ab4b01
MC
4243 kfree(bp->tx_ring);
4244 bp->tx_ring = NULL;
4245 kfree(bp->rx_ring);
4246 bp->rx_ring = NULL;
c0c050c5
MC
4247 kfree(bp->bnapi);
4248 bp->bnapi = NULL;
4249 } else {
4250 bnxt_clear_ring_indices(bp);
4251 }
4252}
4253
4254static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4255{
01657bcd 4256 int i, j, rc, size, arr_size;
c0c050c5
MC
4257 void *bnapi;
4258
4259 if (irq_re_init) {
4260 /* Allocate bnapi mem pointer array and mem block for
4261 * all queues
4262 */
4263 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4264 bp->cp_nr_rings);
4265 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4266 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4267 if (!bnapi)
4268 return -ENOMEM;
4269
4270 bp->bnapi = bnapi;
4271 bnapi += arr_size;
4272 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4273 bp->bnapi[i] = bnapi;
4274 bp->bnapi[i]->index = i;
4275 bp->bnapi[i]->bp = bp;
e38287b7
MC
4276 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4277 struct bnxt_cp_ring_info *cpr =
4278 &bp->bnapi[i]->cp_ring;
4279
4280 cpr->cp_ring_struct.ring_mem.flags =
4281 BNXT_RMEM_RING_PTE_FLAG;
4282 }
c0c050c5
MC
4283 }
4284
b6ab4b01
MC
4285 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4286 sizeof(struct bnxt_rx_ring_info),
4287 GFP_KERNEL);
4288 if (!bp->rx_ring)
4289 return -ENOMEM;
4290
4291 for (i = 0; i < bp->rx_nr_rings; i++) {
e38287b7
MC
4292 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4293
4294 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4295 rxr->rx_ring_struct.ring_mem.flags =
4296 BNXT_RMEM_RING_PTE_FLAG;
4297 rxr->rx_agg_ring_struct.ring_mem.flags =
4298 BNXT_RMEM_RING_PTE_FLAG;
4299 }
4300 rxr->bnapi = bp->bnapi[i];
b6ab4b01
MC
4301 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4302 }
4303
4304 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4305 sizeof(struct bnxt_tx_ring_info),
4306 GFP_KERNEL);
4307 if (!bp->tx_ring)
4308 return -ENOMEM;
4309
a960dec9
MC
4310 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4311 GFP_KERNEL);
4312
4313 if (!bp->tx_ring_map)
4314 return -ENOMEM;
4315
01657bcd
MC
4316 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4317 j = 0;
4318 else
4319 j = bp->rx_nr_rings;
4320
4321 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
e38287b7
MC
4322 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4323
4324 if (bp->flags & BNXT_FLAG_CHIP_P5)
4325 txr->tx_ring_struct.ring_mem.flags =
4326 BNXT_RMEM_RING_PTE_FLAG;
4327 txr->bnapi = bp->bnapi[j];
4328 bp->bnapi[j]->tx_ring = txr;
5f449249 4329 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
38413406 4330 if (i >= bp->tx_nr_rings_xdp) {
e38287b7 4331 txr->txq_index = i - bp->tx_nr_rings_xdp;
38413406
MC
4332 bp->bnapi[j]->tx_int = bnxt_tx_int;
4333 } else {
fa3e93e8 4334 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
38413406
MC
4335 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4336 }
b6ab4b01
MC
4337 }
4338
c0c050c5
MC
4339 rc = bnxt_alloc_stats(bp);
4340 if (rc)
4341 goto alloc_mem_err;
d752d053 4342 bnxt_init_stats(bp);
c0c050c5
MC
4343
4344 rc = bnxt_alloc_ntp_fltrs(bp);
4345 if (rc)
4346 goto alloc_mem_err;
4347
4348 rc = bnxt_alloc_vnics(bp);
4349 if (rc)
4350 goto alloc_mem_err;
4351 }
4352
4353 bnxt_init_ring_struct(bp);
4354
4355 rc = bnxt_alloc_rx_rings(bp);
4356 if (rc)
4357 goto alloc_mem_err;
4358
4359 rc = bnxt_alloc_tx_rings(bp);
4360 if (rc)
4361 goto alloc_mem_err;
4362
4363 rc = bnxt_alloc_cp_rings(bp);
4364 if (rc)
4365 goto alloc_mem_err;
4366
4367 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4368 BNXT_VNIC_UCAST_FLAG;
4369 rc = bnxt_alloc_vnic_attributes(bp);
4370 if (rc)
4371 goto alloc_mem_err;
4372 return 0;
4373
4374alloc_mem_err:
4375 bnxt_free_mem(bp, true);
4376 return rc;
4377}
4378
9d8bc097
MC
4379static void bnxt_disable_int(struct bnxt *bp)
4380{
4381 int i;
4382
4383 if (!bp->bnapi)
4384 return;
4385
4386 for (i = 0; i < bp->cp_nr_rings; i++) {
4387 struct bnxt_napi *bnapi = bp->bnapi[i];
4388 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
daf1f1e7 4389 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
9d8bc097 4390
daf1f1e7 4391 if (ring->fw_ring_id != INVALID_HW_RING_ID)
697197e5 4392 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
9d8bc097
MC
4393 }
4394}
4395
e5811b8c
MC
4396static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4397{
4398 struct bnxt_napi *bnapi = bp->bnapi[n];
4399 struct bnxt_cp_ring_info *cpr;
4400
4401 cpr = &bnapi->cp_ring;
4402 return cpr->cp_ring_struct.map_idx;
4403}
4404
9d8bc097
MC
4405static void bnxt_disable_int_sync(struct bnxt *bp)
4406{
4407 int i;
4408
38290e37
MC
4409 if (!bp->irq_tbl)
4410 return;
4411
9d8bc097
MC
4412 atomic_inc(&bp->intr_sem);
4413
4414 bnxt_disable_int(bp);
e5811b8c
MC
4415 for (i = 0; i < bp->cp_nr_rings; i++) {
4416 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4417
4418 synchronize_irq(bp->irq_tbl[map_idx].vector);
4419 }
9d8bc097
MC
4420}
4421
4422static void bnxt_enable_int(struct bnxt *bp)
4423{
4424 int i;
4425
4426 atomic_set(&bp->intr_sem, 0);
4427 for (i = 0; i < bp->cp_nr_rings; i++) {
4428 struct bnxt_napi *bnapi = bp->bnapi[i];
4429 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4430
697197e5 4431 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
9d8bc097
MC
4432 }
4433}
4434
c0c050c5
MC
4435void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
4436 u16 cmpl_ring, u16 target_id)
4437{
a8643e16 4438 struct input *req = request;
c0c050c5 4439
a8643e16
MC
4440 req->req_type = cpu_to_le16(req_type);
4441 req->cmpl_ring = cpu_to_le16(cmpl_ring);
4442 req->target_id = cpu_to_le16(target_id);
760b6d33
VD
4443 if (bnxt_kong_hwrm_message(bp, req))
4444 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
4445 else
4446 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
c0c050c5
MC
4447}
4448
d4f1420d
MC
4449static int bnxt_hwrm_to_stderr(u32 hwrm_err)
4450{
4451 switch (hwrm_err) {
4452 case HWRM_ERR_CODE_SUCCESS:
4453 return 0;
cf223bfa
VV
4454 case HWRM_ERR_CODE_RESOURCE_LOCKED:
4455 return -EROFS;
d4f1420d
MC
4456 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
4457 return -EACCES;
4458 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
4459 return -ENOSPC;
4460 case HWRM_ERR_CODE_INVALID_PARAMS:
4461 case HWRM_ERR_CODE_INVALID_FLAGS:
4462 case HWRM_ERR_CODE_INVALID_ENABLES:
4463 case HWRM_ERR_CODE_UNSUPPORTED_TLV:
4464 case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
4465 return -EINVAL;
4466 case HWRM_ERR_CODE_NO_BUFFER:
4467 return -ENOMEM;
4468 case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
3a707bed 4469 case HWRM_ERR_CODE_BUSY:
d4f1420d
MC
4470 return -EAGAIN;
4471 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
4472 return -EOPNOTSUPP;
4473 default:
4474 return -EIO;
4475 }
4476}
4477
fbfbc485
MC
4478static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
4479 int timeout, bool silent)
c0c050c5 4480{
a11fa2be 4481 int i, intr_process, rc, tmo_count;
a8643e16 4482 struct input *req = msg;
c0c050c5 4483 u32 *data = msg;
845adfe4 4484 u8 *valid;
c0c050c5
MC
4485 u16 cp_ring_id, len = 0;
4486 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
e605db80 4487 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
ebd5818c 4488 struct hwrm_short_input short_input = {0};
2e9ee398
VD
4489 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
4490 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
760b6d33 4491 u16 dst = BNXT_HWRM_CHNL_CHIMP;
c0c050c5 4492
825741b0
VV
4493 if (BNXT_NO_FW_ACCESS(bp) &&
4494 le16_to_cpu(req->req_type) != HWRM_FUNC_RESET)
b4fff207
MC
4495 return -EBUSY;
4496
1dfddc41
MC
4497 if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4498 if (msg_len > bp->hwrm_max_ext_req_len ||
4499 !bp->hwrm_short_cmd_req_addr)
4500 return -EINVAL;
4501 }
4502
760b6d33
VD
4503 if (bnxt_hwrm_kong_chnl(bp, req)) {
4504 dst = BNXT_HWRM_CHNL_KONG;
4505 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
4506 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
4507 resp = bp->hwrm_cmd_kong_resp_addr;
760b6d33
VD
4508 }
4509
4510 memset(resp, 0, PAGE_SIZE);
4511 cp_ring_id = le16_to_cpu(req->cmpl_ring);
4512 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
4513
4514 req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
4515 /* currently supports only one outstanding message */
4516 if (intr_process)
4517 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
4518
1dfddc41
MC
4519 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
4520 msg_len > BNXT_HWRM_MAX_REQ_LEN) {
e605db80 4521 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
1dfddc41
MC
4522 u16 max_msg_len;
4523
4524 /* Set boundary for maximum extended request length for short
4525 * cmd format. If passed up from device use the max supported
4526 * internal req length.
4527 */
4528 max_msg_len = bp->hwrm_max_ext_req_len;
e605db80
DK
4529
4530 memcpy(short_cmd_req, req, msg_len);
1dfddc41
MC
4531 if (msg_len < max_msg_len)
4532 memset(short_cmd_req + msg_len, 0,
4533 max_msg_len - msg_len);
e605db80
DK
4534
4535 short_input.req_type = req->req_type;
4536 short_input.signature =
4537 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
4538 short_input.size = cpu_to_le16(msg_len);
4539 short_input.req_addr =
4540 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
4541
4542 data = (u32 *)&short_input;
4543 msg_len = sizeof(short_input);
4544
4545 /* Sync memory write before updating doorbell */
4546 wmb();
4547
4548 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
4549 }
4550
c0c050c5 4551 /* Write request msg to hwrm channel */
2e9ee398 4552 __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
c0c050c5 4553
e605db80 4554 for (i = msg_len; i < max_req_len; i += 4)
2e9ee398 4555 writel(0, bp->bar0 + bar_offset + i);
d79979a1 4556
c0c050c5 4557 /* Ring channel doorbell */
2e9ee398 4558 writel(1, bp->bar0 + doorbell_offset);
c0c050c5 4559
5bedb529 4560 if (!pci_is_enabled(bp->pdev))
a2f3835c 4561 return -ENODEV;
5bedb529 4562
ff4fe81d
MC
4563 if (!timeout)
4564 timeout = DFLT_HWRM_CMD_TIMEOUT;
881d8353
VV
4565 /* Limit timeout to an upper limit */
4566 timeout = min(timeout, HWRM_CMD_MAX_TIMEOUT);
9751e8e7
AG
4567 /* convert timeout to usec */
4568 timeout *= 1000;
ff4fe81d 4569
c0c050c5 4570 i = 0;
9751e8e7
AG
4571 /* Short timeout for the first few iterations:
4572 * number of loops = number of loops for short timeout +
4573 * number of loops for standard timeout.
4574 */
4575 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
4576 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
4577 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
89455017 4578
c0c050c5 4579 if (intr_process) {
fc718bb2
VD
4580 u16 seq_id = bp->hwrm_intr_seq_id;
4581
c0c050c5 4582 /* Wait until hwrm response cmpl interrupt is processed */
fc718bb2 4583 while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
a11fa2be 4584 i++ < tmo_count) {
642aebde
PC
4585 /* Abort the wait for completion if the FW health
4586 * check has failed.
4587 */
4588 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4589 return -EBUSY;
9751e8e7 4590 /* on first few passes, just barely sleep */
80a9641f 4591 if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
9751e8e7
AG
4592 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4593 HWRM_SHORT_MAX_TIMEOUT);
80a9641f
PC
4594 } else {
4595 if (HWRM_WAIT_MUST_ABORT(bp, req))
4596 break;
9751e8e7
AG
4597 usleep_range(HWRM_MIN_TIMEOUT,
4598 HWRM_MAX_TIMEOUT);
80a9641f 4599 }
c0c050c5
MC
4600 }
4601
fc718bb2 4602 if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
5bedb529
MC
4603 if (!silent)
4604 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
4605 le16_to_cpu(req->req_type));
a935cb7e 4606 return -EBUSY;
c0c050c5 4607 }
2a5a8800
EP
4608 len = le16_to_cpu(resp->resp_len);
4609 valid = ((u8 *)resp) + len - 1;
c0c050c5 4610 } else {
cc559c1a
MC
4611 int j;
4612
c0c050c5 4613 /* Check if response len is updated */
a11fa2be 4614 for (i = 0; i < tmo_count; i++) {
642aebde
PC
4615 /* Abort the wait for completion if the FW health
4616 * check has failed.
4617 */
4618 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4619 return -EBUSY;
2a5a8800 4620 len = le16_to_cpu(resp->resp_len);
c0c050c5
MC
4621 if (len)
4622 break;
9751e8e7 4623 /* on first few passes, just barely sleep */
80a9641f 4624 if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
9751e8e7
AG
4625 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4626 HWRM_SHORT_MAX_TIMEOUT);
80a9641f
PC
4627 } else {
4628 if (HWRM_WAIT_MUST_ABORT(bp, req))
4629 goto timeout_abort;
9751e8e7
AG
4630 usleep_range(HWRM_MIN_TIMEOUT,
4631 HWRM_MAX_TIMEOUT);
80a9641f 4632 }
c0c050c5
MC
4633 }
4634
a11fa2be 4635 if (i >= tmo_count) {
80a9641f 4636timeout_abort:
5bedb529
MC
4637 if (!silent)
4638 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
4639 HWRM_TOTAL_TIMEOUT(i),
4640 le16_to_cpu(req->req_type),
4641 le16_to_cpu(req->seq_id), len);
a935cb7e 4642 return -EBUSY;
c0c050c5
MC
4643 }
4644
845adfe4 4645 /* Last byte of resp contains valid bit */
2a5a8800 4646 valid = ((u8 *)resp) + len - 1;
cc559c1a 4647 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
845adfe4
MC
4648 /* make sure we read from updated DMA memory */
4649 dma_rmb();
4650 if (*valid)
c0c050c5 4651 break;
0000b81a 4652 usleep_range(1, 5);
c0c050c5
MC
4653 }
4654
cc559c1a 4655 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
5bedb529
MC
4656 if (!silent)
4657 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
4658 HWRM_TOTAL_TIMEOUT(i),
4659 le16_to_cpu(req->req_type),
4660 le16_to_cpu(req->seq_id), len,
4661 *valid);
a935cb7e 4662 return -EBUSY;
c0c050c5
MC
4663 }
4664 }
4665
845adfe4
MC
4666 /* Zero valid bit for compatibility. Valid bit in an older spec
4667 * may become a new field in a newer spec. We must make sure that
4668 * a new field not implemented by old spec will read zero.
4669 */
4670 *valid = 0;
c0c050c5 4671 rc = le16_to_cpu(resp->error_code);
fbfbc485 4672 if (rc && !silent)
c0c050c5
MC
4673 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
4674 le16_to_cpu(resp->req_type),
4675 le16_to_cpu(resp->seq_id), rc);
d4f1420d 4676 return bnxt_hwrm_to_stderr(rc);
fbfbc485
MC
4677}
4678
4679int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4680{
4681 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
c0c050c5
MC
4682}
4683
cc72f3b1
MC
4684int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4685 int timeout)
4686{
4687 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4688}
4689
c0c050c5
MC
4690int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4691{
4692 int rc;
4693
4694 mutex_lock(&bp->hwrm_cmd_lock);
4695 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
4696 mutex_unlock(&bp->hwrm_cmd_lock);
4697 return rc;
4698}
4699
90e20921
MC
4700int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4701 int timeout)
4702{
4703 int rc;
4704
4705 mutex_lock(&bp->hwrm_cmd_lock);
4706 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4707 mutex_unlock(&bp->hwrm_cmd_lock);
4708 return rc;
4709}
4710
2e882468
VV
4711int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4712 bool async_only)
c0c050c5 4713{
2e882468 4714 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
c0c050c5 4715 struct hwrm_func_drv_rgtr_input req = {0};
25be8623
MC
4716 DECLARE_BITMAP(async_events_bmap, 256);
4717 u32 *events = (u32 *)async_events_bmap;
acfb50e4 4718 u32 flags;
2e882468 4719 int rc, i;
a1653b13
MC
4720
4721 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4722
4723 req.enables =
4724 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
2e882468
VV
4725 FUNC_DRV_RGTR_REQ_ENABLES_VER |
4726 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
a1653b13 4727
11f15ed3 4728 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
8280b38e
VV
4729 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4730 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4731 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
acfb50e4 4732 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
e633a329
VV
4733 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4734 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
acfb50e4 4735 req.flags = cpu_to_le32(flags);
d4f52de0
MC
4736 req.ver_maj_8b = DRV_VER_MAJ;
4737 req.ver_min_8b = DRV_VER_MIN;
4738 req.ver_upd_8b = DRV_VER_UPD;
4739 req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4740 req.ver_min = cpu_to_le16(DRV_VER_MIN);
4741 req.ver_upd = cpu_to_le16(DRV_VER_UPD);
c0c050c5
MC
4742
4743 if (BNXT_PF(bp)) {
9b0436c3 4744 u32 data[8];
a1653b13 4745 int i;
c0c050c5 4746
9b0436c3
MC
4747 memset(data, 0, sizeof(data));
4748 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4749 u16 cmd = bnxt_vf_req_snif[i];
4750 unsigned int bit, idx;
4751
4752 idx = cmd / 32;
4753 bit = cmd % 32;
4754 data[idx] |= 1 << bit;
4755 }
c0c050c5 4756
de68f5de
MC
4757 for (i = 0; i < 8; i++)
4758 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4759
c0c050c5
MC
4760 req.enables |=
4761 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4762 }
4763
abd43a13
VD
4764 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4765 req.flags |= cpu_to_le32(
4766 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4767
2e882468
VV
4768 memset(async_events_bmap, 0, sizeof(async_events_bmap));
4769 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4770 u16 event_id = bnxt_async_events_arr[i];
4771
4772 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4773 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4774 continue;
4775 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4776 }
4777 if (bmap && bmap_size) {
4778 for (i = 0; i < bmap_size; i++) {
4779 if (test_bit(i, bmap))
4780 __set_bit(i, async_events_bmap);
4781 }
4782 }
4783 for (i = 0; i < 8; i++)
4784 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4785
4786 if (async_only)
4787 req.enables =
4788 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4789
25e1acd6
MC
4790 mutex_lock(&bp->hwrm_cmd_lock);
4791 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
bdb38602
VV
4792 if (!rc) {
4793 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4794 if (resp->flags &
4795 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4796 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4797 }
25e1acd6
MC
4798 mutex_unlock(&bp->hwrm_cmd_lock);
4799 return rc;
c0c050c5
MC
4800}
4801
be58a0da
JH
4802static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4803{
4804 struct hwrm_func_drv_unrgtr_input req = {0};
4805
bdb38602
VV
4806 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4807 return 0;
4808
be58a0da
JH
4809 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4810 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4811}
4812
c0c050c5
MC
4813static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4814{
4815 u32 rc = 0;
4816 struct hwrm_tunnel_dst_port_free_input req = {0};
4817
4818 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4819 req.tunnel_type = tunnel_type;
4820
4821 switch (tunnel_type) {
4822 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
442a35a5
JK
4823 req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
4824 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
c0c050c5
MC
4825 break;
4826 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
442a35a5
JK
4827 req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
4828 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
c0c050c5
MC
4829 break;
4830 default:
4831 break;
4832 }
4833
4834 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4835 if (rc)
4836 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4837 rc);
4838 return rc;
4839}
4840
4841static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4842 u8 tunnel_type)
4843{
4844 u32 rc = 0;
4845 struct hwrm_tunnel_dst_port_alloc_input req = {0};
4846 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4847
4848 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4849
4850 req.tunnel_type = tunnel_type;
4851 req.tunnel_dst_port_val = port;
4852
4853 mutex_lock(&bp->hwrm_cmd_lock);
4854 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4855 if (rc) {
4856 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4857 rc);
4858 goto err_out;
4859 }
4860
57aac71b
CJ
4861 switch (tunnel_type) {
4862 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
442a35a5
JK
4863 bp->vxlan_fw_dst_port_id =
4864 le16_to_cpu(resp->tunnel_dst_port_id);
57aac71b
CJ
4865 break;
4866 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
442a35a5 4867 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
57aac71b
CJ
4868 break;
4869 default:
4870 break;
4871 }
4872
c0c050c5
MC
4873err_out:
4874 mutex_unlock(&bp->hwrm_cmd_lock);
4875 return rc;
4876}
4877
4878static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4879{
4880 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4881 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4882
4883 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
c193554e 4884 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
c0c050c5
MC
4885
4886 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4887 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4888 req.mask = cpu_to_le32(vnic->rx_mask);
4889 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4890}
4891
4892#ifdef CONFIG_RFS_ACCEL
4893static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4894 struct bnxt_ntuple_filter *fltr)
4895{
4896 struct hwrm_cfa_ntuple_filter_free_input req = {0};
4897
4898 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4899 req.ntuple_filter_id = fltr->filter_id;
4900 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4901}
4902
4903#define BNXT_NTP_FLTR_FLAGS \
4904 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4905 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
4906 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
4907 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
4908 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
4909 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
4910 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
4911 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
4912 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
4913 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
4914 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
4915 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
4916 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
c193554e 4917 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
c0c050c5 4918
61aad724
MC
4919#define BNXT_NTP_TUNNEL_FLTR_FLAG \
4920 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4921
c0c050c5
MC
4922static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4923 struct bnxt_ntuple_filter *fltr)
4924{
c0c050c5 4925 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
5c209fc8 4926 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
c0c050c5 4927 struct flow_keys *keys = &fltr->fkeys;
ac33906c 4928 struct bnxt_vnic_info *vnic;
41136ab3 4929 u32 flags = 0;
5c209fc8 4930 int rc = 0;
c0c050c5
MC
4931
4932 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
a54c4d74 4933 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
c0c050c5 4934
41136ab3
MC
4935 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4936 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4937 req.dst_id = cpu_to_le16(fltr->rxq);
ac33906c
MC
4938 } else {
4939 vnic = &bp->vnic_info[fltr->rxq + 1];
41136ab3 4940 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
ac33906c 4941 }
41136ab3
MC
4942 req.flags = cpu_to_le32(flags);
4943 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
c0c050c5
MC
4944
4945 req.ethertype = htons(ETH_P_IP);
4946 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
c193554e 4947 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
c0c050c5
MC
4948 req.ip_protocol = keys->basic.ip_proto;
4949
dda0e746
MC
4950 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4951 int i;
4952
4953 req.ethertype = htons(ETH_P_IPV6);
4954 req.ip_addr_type =
4955 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4956 *(struct in6_addr *)&req.src_ipaddr[0] =
4957 keys->addrs.v6addrs.src;
4958 *(struct in6_addr *)&req.dst_ipaddr[0] =
4959 keys->addrs.v6addrs.dst;
4960 for (i = 0; i < 4; i++) {
4961 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4962 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4963 }
4964 } else {
4965 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4966 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4967 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4968 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4969 }
61aad724
MC
4970 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4971 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4972 req.tunnel_type =
4973 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4974 }
c0c050c5
MC
4975
4976 req.src_port = keys->ports.src;
4977 req.src_port_mask = cpu_to_be16(0xffff);
4978 req.dst_port = keys->ports.dst;
4979 req.dst_port_mask = cpu_to_be16(0xffff);
4980
c0c050c5
MC
4981 mutex_lock(&bp->hwrm_cmd_lock);
4982 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5c209fc8
VD
4983 if (!rc) {
4984 resp = bnxt_get_hwrm_resp_addr(bp, &req);
c0c050c5 4985 fltr->filter_id = resp->ntuple_filter_id;
5c209fc8 4986 }
c0c050c5
MC
4987 mutex_unlock(&bp->hwrm_cmd_lock);
4988 return rc;
4989}
4990#endif
4991
4992static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4993 u8 *mac_addr)
4994{
4995 u32 rc = 0;
4996 struct hwrm_cfa_l2_filter_alloc_input req = {0};
4997 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4998
4999 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
dc52c6c7
PS
5000 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
5001 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
5002 req.flags |=
5003 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
c193554e 5004 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
c0c050c5
MC
5005 req.enables =
5006 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
c193554e 5007 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
c0c050c5
MC
5008 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
5009 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
5010 req.l2_addr_mask[0] = 0xff;
5011 req.l2_addr_mask[1] = 0xff;
5012 req.l2_addr_mask[2] = 0xff;
5013 req.l2_addr_mask[3] = 0xff;
5014 req.l2_addr_mask[4] = 0xff;
5015 req.l2_addr_mask[5] = 0xff;
5016
5017 mutex_lock(&bp->hwrm_cmd_lock);
5018 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5019 if (!rc)
5020 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
5021 resp->l2_filter_id;
5022 mutex_unlock(&bp->hwrm_cmd_lock);
5023 return rc;
5024}
5025
5026static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
5027{
5028 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
5029 int rc = 0;
5030
5031 /* Any associated ntuple filters will also be cleared by firmware. */
5032 mutex_lock(&bp->hwrm_cmd_lock);
5033 for (i = 0; i < num_of_vnics; i++) {
5034 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5035
5036 for (j = 0; j < vnic->uc_filter_count; j++) {
5037 struct hwrm_cfa_l2_filter_free_input req = {0};
5038
5039 bnxt_hwrm_cmd_hdr_init(bp, &req,
5040 HWRM_CFA_L2_FILTER_FREE, -1, -1);
5041
5042 req.l2_filter_id = vnic->fw_l2_filter_id[j];
5043
5044 rc = _hwrm_send_message(bp, &req, sizeof(req),
5045 HWRM_CMD_TIMEOUT);
5046 }
5047 vnic->uc_filter_count = 0;
5048 }
5049 mutex_unlock(&bp->hwrm_cmd_lock);
5050
5051 return rc;
5052}
5053
5054static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
5055{
5056 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
79632e9b 5057 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
c0c050c5
MC
5058 struct hwrm_vnic_tpa_cfg_input req = {0};
5059
3c4fe80b
MC
5060 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
5061 return 0;
5062
c0c050c5
MC
5063 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
5064
5065 if (tpa_flags) {
5066 u16 mss = bp->dev->mtu - 40;
5067 u32 nsegs, n, segs = 0, flags;
5068
5069 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
5070 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
5071 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
5072 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
5073 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
5074 if (tpa_flags & BNXT_FLAG_GRO)
5075 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
5076
5077 req.flags = cpu_to_le32(flags);
5078
5079 req.enables =
5080 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
c193554e
MC
5081 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
5082 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
c0c050c5
MC
5083
5084 /* Number of segs are log2 units, and first packet is not
5085 * included as part of this units.
5086 */
2839f28b
MC
5087 if (mss <= BNXT_RX_PAGE_SIZE) {
5088 n = BNXT_RX_PAGE_SIZE / mss;
c0c050c5
MC
5089 nsegs = (MAX_SKB_FRAGS - 1) * n;
5090 } else {
2839f28b
MC
5091 n = mss / BNXT_RX_PAGE_SIZE;
5092 if (mss & (BNXT_RX_PAGE_SIZE - 1))
c0c050c5
MC
5093 n++;
5094 nsegs = (MAX_SKB_FRAGS - n) / n;
5095 }
5096
79632e9b
MC
5097 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5098 segs = MAX_TPA_SEGS_P5;
5099 max_aggs = bp->max_tpa;
5100 } else {
5101 segs = ilog2(nsegs);
5102 }
c0c050c5 5103 req.max_agg_segs = cpu_to_le16(segs);
79632e9b 5104 req.max_aggs = cpu_to_le16(max_aggs);
c193554e
MC
5105
5106 req.min_agg_len = cpu_to_le32(512);
c0c050c5
MC
5107 }
5108 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5109
5110 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5111}
5112
2c61d211
MC
5113static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
5114{
5115 struct bnxt_ring_grp_info *grp_info;
5116
5117 grp_info = &bp->grp_info[ring->grp_idx];
5118 return grp_info->cp_fw_ring_id;
5119}
5120
5121static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
5122{
5123 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5124 struct bnxt_napi *bnapi = rxr->bnapi;
5125 struct bnxt_cp_ring_info *cpr;
5126
5127 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
5128 return cpr->cp_ring_struct.fw_ring_id;
5129 } else {
5130 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
5131 }
5132}
5133
5134static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
5135{
5136 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5137 struct bnxt_napi *bnapi = txr->bnapi;
5138 struct bnxt_cp_ring_info *cpr;
5139
5140 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
5141 return cpr->cp_ring_struct.fw_ring_id;
5142 } else {
5143 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5144 }
5145}
5146
1667cbf6
MC
5147static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
5148{
5149 int entries;
5150
5151 if (bp->flags & BNXT_FLAG_CHIP_P5)
5152 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
5153 else
5154 entries = HW_HASH_INDEX_SIZE;
5155
5156 bp->rss_indir_tbl_entries = entries;
5157 bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
5158 GFP_KERNEL);
5159 if (!bp->rss_indir_tbl)
5160 return -ENOMEM;
5161 return 0;
5162}
5163
5164static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5165{
5166 u16 max_rings, max_entries, pad, i;
5167
5168 if (!bp->rx_nr_rings)
5169 return;
5170
5171 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5172 max_rings = bp->rx_nr_rings - 1;
5173 else
5174 max_rings = bp->rx_nr_rings;
5175
5176 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5177
5178 for (i = 0; i < max_entries; i++)
5179 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5180
5181 pad = bp->rss_indir_tbl_entries - max_entries;
5182 if (pad)
5183 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5184}
5185
bd3191b5
MC
5186static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5187{
5188 u16 i, tbl_size, max_ring = 0;
5189
5190 if (!bp->rss_indir_tbl)
5191 return 0;
5192
5193 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5194 for (i = 0; i < tbl_size; i++)
5195 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5196 return max_ring;
5197}
5198
f9f6a3fb
MC
5199int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5200{
5201 if (bp->flags & BNXT_FLAG_CHIP_P5)
5202 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5203 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5204 return 2;
5205 return 1;
5206}
5207
f33a305d
MC
5208static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5209{
5210 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5211 u16 i, j;
5212
5213 /* Fill the RSS indirection table with ring group ids */
5214 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5215 if (!no_rss)
5216 j = bp->rss_indir_tbl[i];
5217 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5218 }
5219}
5220
5221static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5222 struct bnxt_vnic_info *vnic)
5223{
5224 __le16 *ring_tbl = vnic->rss_table;
5225 struct bnxt_rx_ring_info *rxr;
5226 u16 tbl_size, i;
5227
5228 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5229
5230 for (i = 0; i < tbl_size; i++) {
5231 u16 ring_id, j;
5232
5233 j = bp->rss_indir_tbl[i];
5234 rxr = &bp->rx_ring[j];
5235
5236 ring_id = rxr->rx_ring_struct.fw_ring_id;
5237 *ring_tbl++ = cpu_to_le16(ring_id);
5238 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5239 *ring_tbl++ = cpu_to_le16(ring_id);
5240 }
5241}
5242
5243static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5244{
5245 if (bp->flags & BNXT_FLAG_CHIP_P5)
5246 __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5247 else
5248 __bnxt_fill_hw_rss_tbl(bp, vnic);
5249}
5250
c0c050c5
MC
5251static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5252{
c0c050c5
MC
5253 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5254 struct hwrm_vnic_rss_cfg_input req = {0};
5255
7b3af4f7
MC
5256 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5257 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
c0c050c5
MC
5258 return 0;
5259
5260 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5261 if (set_rss) {
f33a305d 5262 bnxt_fill_hw_rss_tbl(bp, vnic);
87da7f79 5263 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
50f011b6 5264 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
c0c050c5
MC
5265 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5266 req.hash_key_tbl_addr =
5267 cpu_to_le64(vnic->rss_hash_key_dma_addr);
5268 }
94ce9caa 5269 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
c0c050c5
MC
5270 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5271}
5272
7b3af4f7
MC
5273static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5274{
5275 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
7b3af4f7 5276 struct hwrm_vnic_rss_cfg_input req = {0};
f33a305d
MC
5277 dma_addr_t ring_tbl_map;
5278 u32 i, nr_ctxs;
7b3af4f7
MC
5279
5280 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5281 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5282 if (!set_rss) {
5283 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5284 return 0;
5285 }
f33a305d 5286 bnxt_fill_hw_rss_tbl(bp, vnic);
7b3af4f7
MC
5287 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
5288 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
7b3af4f7 5289 req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
f33a305d 5290 ring_tbl_map = vnic->rss_table_dma_addr;
f9f6a3fb 5291 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
f33a305d 5292 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
7b3af4f7
MC
5293 int rc;
5294
f33a305d 5295 req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
7b3af4f7
MC
5296 req.ring_table_pair_index = i;
5297 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
7b3af4f7
MC
5298 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5299 if (rc)
d4f1420d 5300 return rc;
7b3af4f7
MC
5301 }
5302 return 0;
5303}
5304
c0c050c5
MC
5305static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5306{
5307 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5308 struct hwrm_vnic_plcmodes_cfg_input req = {0};
5309
5310 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
5311 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
5312 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5313 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5314 req.enables =
5315 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
5316 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
5317 /* thresholds not implemented in firmware yet */
5318 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5319 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5320 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5321 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5322}
5323
94ce9caa
PS
5324static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5325 u16 ctx_idx)
c0c050c5
MC
5326{
5327 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
5328
5329 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
5330 req.rss_cos_lb_ctx_id =
94ce9caa 5331 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
c0c050c5
MC
5332
5333 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
94ce9caa 5334 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
c0c050c5
MC
5335}
5336
5337static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5338{
94ce9caa 5339 int i, j;
c0c050c5
MC
5340
5341 for (i = 0; i < bp->nr_vnics; i++) {
5342 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5343
94ce9caa
PS
5344 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5345 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5346 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5347 }
c0c050c5
MC
5348 }
5349 bp->rsscos_nr_ctxs = 0;
5350}
5351
94ce9caa 5352static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
c0c050c5
MC
5353{
5354 int rc;
5355 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
5356 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
5357 bp->hwrm_cmd_resp_addr;
5358
5359 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
5360 -1);
5361
5362 mutex_lock(&bp->hwrm_cmd_lock);
5363 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5364 if (!rc)
94ce9caa 5365 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
c0c050c5
MC
5366 le16_to_cpu(resp->rss_cos_lb_ctx_id);
5367 mutex_unlock(&bp->hwrm_cmd_lock);
5368
5369 return rc;
5370}
5371
abe93ad2
MC
5372static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5373{
5374 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5375 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5376 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5377}
5378
a588e458 5379int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
c0c050c5 5380{
b81a90d3 5381 unsigned int ring = 0, grp_idx;
c0c050c5
MC
5382 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5383 struct hwrm_vnic_cfg_input req = {0};
cf6645f8 5384 u16 def_vlan = 0;
c0c050c5
MC
5385
5386 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
dc52c6c7 5387
7b3af4f7
MC
5388 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5389 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5390
5391 req.default_rx_ring_id =
5392 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5393 req.default_cmpl_ring_id =
5394 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5395 req.enables =
5396 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5397 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5398 goto vnic_mru;
5399 }
dc52c6c7 5400 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
c0c050c5 5401 /* Only RSS support for now TBD: COS & LB */
dc52c6c7
PS
5402 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5403 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5404 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5405 VNIC_CFG_REQ_ENABLES_MRU);
ae10ae74
MC
5406 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5407 req.rss_rule =
5408 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5409 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5410 VNIC_CFG_REQ_ENABLES_MRU);
5411 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
dc52c6c7
PS
5412 } else {
5413 req.rss_rule = cpu_to_le16(0xffff);
5414 }
94ce9caa 5415
dc52c6c7
PS
5416 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5417 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
94ce9caa
PS
5418 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5419 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5420 } else {
5421 req.cos_rule = cpu_to_le16(0xffff);
5422 }
5423
c0c050c5 5424 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
b81a90d3 5425 ring = 0;
c0c050c5 5426 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
b81a90d3 5427 ring = vnic_id - 1;
76595193
PS
5428 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5429 ring = bp->rx_nr_rings - 1;
c0c050c5 5430
b81a90d3 5431 grp_idx = bp->rx_ring[ring].bnapi->index;
c0c050c5 5432 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
c0c050c5 5433 req.lb_rule = cpu_to_le16(0xffff);
7b3af4f7 5434vnic_mru:
d0b82c54 5435 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
c0c050c5 5436
7b3af4f7 5437 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
cf6645f8
MC
5438#ifdef CONFIG_BNXT_SRIOV
5439 if (BNXT_VF(bp))
5440 def_vlan = bp->vf.vlan;
5441#endif
5442 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
c0c050c5 5443 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
a588e458 5444 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
abe93ad2 5445 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
c0c050c5
MC
5446
5447 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5448}
5449
3d061591 5450static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
c0c050c5 5451{
c0c050c5
MC
5452 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5453 struct hwrm_vnic_free_input req = {0};
5454
5455 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
5456 req.vnic_id =
5457 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5458
3d061591 5459 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
c0c050c5
MC
5460 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5461 }
c0c050c5
MC
5462}
5463
5464static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5465{
5466 u16 i;
5467
5468 for (i = 0; i < bp->nr_vnics; i++)
5469 bnxt_hwrm_vnic_free_one(bp, i);
5470}
5471
b81a90d3
MC
5472static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5473 unsigned int start_rx_ring_idx,
5474 unsigned int nr_rings)
c0c050c5 5475{
b81a90d3
MC
5476 int rc = 0;
5477 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
c0c050c5
MC
5478 struct hwrm_vnic_alloc_input req = {0};
5479 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
44c6f72a
MC
5480 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5481
5482 if (bp->flags & BNXT_FLAG_CHIP_P5)
5483 goto vnic_no_ring_grps;
c0c050c5
MC
5484
5485 /* map ring groups to this vnic */
b81a90d3
MC
5486 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5487 grp_idx = bp->rx_ring[i].bnapi->index;
5488 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
c0c050c5 5489 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
b81a90d3 5490 j, nr_rings);
c0c050c5
MC
5491 break;
5492 }
44c6f72a 5493 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
c0c050c5
MC
5494 }
5495
44c6f72a
MC
5496vnic_no_ring_grps:
5497 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5498 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
c0c050c5
MC
5499 if (vnic_id == 0)
5500 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5501
5502 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
5503
5504 mutex_lock(&bp->hwrm_cmd_lock);
5505 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5506 if (!rc)
44c6f72a 5507 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
c0c050c5
MC
5508 mutex_unlock(&bp->hwrm_cmd_lock);
5509 return rc;
5510}
5511
8fdefd63
MC
5512static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5513{
5514 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5515 struct hwrm_vnic_qcaps_input req = {0};
5516 int rc;
5517
fbbdbc64 5518 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
ba642ab7 5519 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
8fdefd63
MC
5520 if (bp->hwrm_spec_code < 0x10600)
5521 return 0;
5522
5523 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
5524 mutex_lock(&bp->hwrm_cmd_lock);
5525 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5526 if (!rc) {
abe93ad2
MC
5527 u32 flags = le32_to_cpu(resp->flags);
5528
41e8d798
MC
5529 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5530 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
8fdefd63 5531 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
abe93ad2
MC
5532 if (flags &
5533 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5534 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
1da63ddd
EP
5535
5536 /* Older P5 fw before EXT_HW_STATS support did not set
5537 * VLAN_STRIP_CAP properly.
5538 */
5539 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
9d6b648c 5540 (BNXT_CHIP_P5_THOR(bp) &&
1da63ddd
EP
5541 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5542 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
79632e9b 5543 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
9d6b648c
MC
5544 if (bp->max_tpa_v2) {
5545 if (BNXT_CHIP_P5_THOR(bp))
5546 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5547 else
5548 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5549 }
8fdefd63
MC
5550 }
5551 mutex_unlock(&bp->hwrm_cmd_lock);
5552 return rc;
5553}
5554
c0c050c5
MC
5555static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5556{
5557 u16 i;
5558 u32 rc = 0;
5559
44c6f72a
MC
5560 if (bp->flags & BNXT_FLAG_CHIP_P5)
5561 return 0;
5562
c0c050c5
MC
5563 mutex_lock(&bp->hwrm_cmd_lock);
5564 for (i = 0; i < bp->rx_nr_rings; i++) {
5565 struct hwrm_ring_grp_alloc_input req = {0};
5566 struct hwrm_ring_grp_alloc_output *resp =
5567 bp->hwrm_cmd_resp_addr;
b81a90d3 5568 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
c0c050c5
MC
5569
5570 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
5571
b81a90d3
MC
5572 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5573 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5574 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5575 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
c0c050c5
MC
5576
5577 rc = _hwrm_send_message(bp, &req, sizeof(req),
5578 HWRM_CMD_TIMEOUT);
5579 if (rc)
5580 break;
5581
b81a90d3
MC
5582 bp->grp_info[grp_idx].fw_grp_id =
5583 le32_to_cpu(resp->ring_group_id);
c0c050c5
MC
5584 }
5585 mutex_unlock(&bp->hwrm_cmd_lock);
5586 return rc;
5587}
5588
3d061591 5589static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
c0c050c5
MC
5590{
5591 u16 i;
c0c050c5
MC
5592 struct hwrm_ring_grp_free_input req = {0};
5593
44c6f72a 5594 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
3d061591 5595 return;
c0c050c5
MC
5596
5597 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
5598
5599 mutex_lock(&bp->hwrm_cmd_lock);
5600 for (i = 0; i < bp->cp_nr_rings; i++) {
5601 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5602 continue;
5603 req.ring_group_id =
5604 cpu_to_le32(bp->grp_info[i].fw_grp_id);
5605
3d061591 5606 _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
c0c050c5
MC
5607 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5608 }
5609 mutex_unlock(&bp->hwrm_cmd_lock);
c0c050c5
MC
5610}
5611
5612static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5613 struct bnxt_ring_struct *ring,
9899bb59 5614 u32 ring_type, u32 map_index)
c0c050c5
MC
5615{
5616 int rc = 0, err = 0;
5617 struct hwrm_ring_alloc_input req = {0};
5618 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6fe19886 5619 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
9899bb59 5620 struct bnxt_ring_grp_info *grp_info;
c0c050c5
MC
5621 u16 ring_id;
5622
5623 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
5624
5625 req.enables = 0;
6fe19886
MC
5626 if (rmem->nr_pages > 1) {
5627 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
c0c050c5
MC
5628 /* Page size is in log2 units */
5629 req.page_size = BNXT_PAGE_SHIFT;
5630 req.page_tbl_depth = 1;
5631 } else {
6fe19886 5632 req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
c0c050c5
MC
5633 }
5634 req.fbo = 0;
5635 /* Association of ring index with doorbell index and MSIX number */
5636 req.logical_id = cpu_to_le16(map_index);
5637
5638 switch (ring_type) {
2c61d211
MC
5639 case HWRM_RING_ALLOC_TX: {
5640 struct bnxt_tx_ring_info *txr;
5641
5642 txr = container_of(ring, struct bnxt_tx_ring_info,
5643 tx_ring_struct);
c0c050c5
MC
5644 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5645 /* Association of transmit ring with completion ring */
9899bb59 5646 grp_info = &bp->grp_info[ring->grp_idx];
2c61d211 5647 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
c0c050c5 5648 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
9899bb59 5649 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
c0c050c5
MC
5650 req.queue_id = cpu_to_le16(ring->queue_id);
5651 break;
2c61d211 5652 }
c0c050c5
MC
5653 case HWRM_RING_ALLOC_RX:
5654 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5655 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
23aefdd7
MC
5656 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5657 u16 flags = 0;
5658
5659 /* Association of rx ring with stats context */
5660 grp_info = &bp->grp_info[ring->grp_idx];
5661 req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5662 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5663 req.enables |= cpu_to_le32(
5664 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5665 if (NET_IP_ALIGN == 2)
5666 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5667 req.flags = cpu_to_le16(flags);
5668 }
c0c050c5
MC
5669 break;
5670 case HWRM_RING_ALLOC_AGG:
23aefdd7
MC
5671 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5672 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5673 /* Association of agg ring with rx ring */
5674 grp_info = &bp->grp_info[ring->grp_idx];
5675 req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5676 req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5677 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5678 req.enables |= cpu_to_le32(
5679 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5680 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5681 } else {
5682 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5683 }
c0c050c5
MC
5684 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5685 break;
5686 case HWRM_RING_ALLOC_CMPL:
bac9a7e0 5687 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
c0c050c5 5688 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
23aefdd7
MC
5689 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5690 /* Association of cp ring with nq */
5691 grp_info = &bp->grp_info[map_index];
5692 req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5693 req.cq_handle = cpu_to_le64(ring->handle);
5694 req.enables |= cpu_to_le32(
5695 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5696 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5697 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5698 }
5699 break;
5700 case HWRM_RING_ALLOC_NQ:
5701 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5702 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
c0c050c5
MC
5703 if (bp->flags & BNXT_FLAG_USING_MSIX)
5704 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5705 break;
5706 default:
5707 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5708 ring_type);
5709 return -1;
5710 }
5711
5712 mutex_lock(&bp->hwrm_cmd_lock);
5713 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5714 err = le16_to_cpu(resp->error_code);
5715 ring_id = le16_to_cpu(resp->ring_id);
5716 mutex_unlock(&bp->hwrm_cmd_lock);
5717
5718 if (rc || err) {
2727c888
MC
5719 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5720 ring_type, rc, err);
5721 return -EIO;
c0c050c5
MC
5722 }
5723 ring->fw_ring_id = ring_id;
5724 return rc;
5725}
5726
486b5c22
MC
5727static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5728{
5729 int rc;
5730
5731 if (BNXT_PF(bp)) {
5732 struct hwrm_func_cfg_input req = {0};
5733
5734 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5735 req.fid = cpu_to_le16(0xffff);
5736 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5737 req.async_event_cr = cpu_to_le16(idx);
5738 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5739 } else {
5740 struct hwrm_func_vf_cfg_input req = {0};
5741
5742 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
5743 req.enables =
5744 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5745 req.async_event_cr = cpu_to_le16(idx);
5746 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5747 }
5748 return rc;
5749}
5750
697197e5
MC
5751static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5752 u32 map_idx, u32 xid)
5753{
5754 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5755 if (BNXT_PF(bp))
ebdf73dc 5756 db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
697197e5 5757 else
ebdf73dc 5758 db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
697197e5
MC
5759 switch (ring_type) {
5760 case HWRM_RING_ALLOC_TX:
5761 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5762 break;
5763 case HWRM_RING_ALLOC_RX:
5764 case HWRM_RING_ALLOC_AGG:
5765 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5766 break;
5767 case HWRM_RING_ALLOC_CMPL:
5768 db->db_key64 = DBR_PATH_L2;
5769 break;
5770 case HWRM_RING_ALLOC_NQ:
5771 db->db_key64 = DBR_PATH_L2;
5772 break;
5773 }
5774 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5775 } else {
5776 db->doorbell = bp->bar1 + map_idx * 0x80;
5777 switch (ring_type) {
5778 case HWRM_RING_ALLOC_TX:
5779 db->db_key32 = DB_KEY_TX;
5780 break;
5781 case HWRM_RING_ALLOC_RX:
5782 case HWRM_RING_ALLOC_AGG:
5783 db->db_key32 = DB_KEY_RX;
5784 break;
5785 case HWRM_RING_ALLOC_CMPL:
5786 db->db_key32 = DB_KEY_CP;
5787 break;
5788 }
5789 }
5790}
5791
c0c050c5
MC
5792static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5793{
e8f267b0 5794 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
c0c050c5 5795 int i, rc = 0;
697197e5 5796 u32 type;
c0c050c5 5797
23aefdd7
MC
5798 if (bp->flags & BNXT_FLAG_CHIP_P5)
5799 type = HWRM_RING_ALLOC_NQ;
5800 else
5801 type = HWRM_RING_ALLOC_CMPL;
edd0c2cc
MC
5802 for (i = 0; i < bp->cp_nr_rings; i++) {
5803 struct bnxt_napi *bnapi = bp->bnapi[i];
5804 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5805 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
9899bb59 5806 u32 map_idx = ring->map_idx;
5e66e35a 5807 unsigned int vector;
c0c050c5 5808
5e66e35a
MC
5809 vector = bp->irq_tbl[map_idx].vector;
5810 disable_irq_nosync(vector);
697197e5 5811 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5e66e35a
MC
5812 if (rc) {
5813 enable_irq(vector);
edd0c2cc 5814 goto err_out;
5e66e35a 5815 }
697197e5
MC
5816 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5817 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5e66e35a 5818 enable_irq(vector);
edd0c2cc 5819 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
486b5c22
MC
5820
5821 if (!i) {
5822 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5823 if (rc)
5824 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5825 }
c0c050c5
MC
5826 }
5827
697197e5 5828 type = HWRM_RING_ALLOC_TX;
edd0c2cc 5829 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 5830 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3e08b184
MC
5831 struct bnxt_ring_struct *ring;
5832 u32 map_idx;
c0c050c5 5833
3e08b184
MC
5834 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5835 struct bnxt_napi *bnapi = txr->bnapi;
5836 struct bnxt_cp_ring_info *cpr, *cpr2;
5837 u32 type2 = HWRM_RING_ALLOC_CMPL;
5838
5839 cpr = &bnapi->cp_ring;
5840 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5841 ring = &cpr2->cp_ring_struct;
5842 ring->handle = BNXT_TX_HDL;
5843 map_idx = bnapi->index;
5844 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5845 if (rc)
5846 goto err_out;
5847 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5848 ring->fw_ring_id);
5849 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5850 }
5851 ring = &txr->tx_ring_struct;
5852 map_idx = i;
697197e5 5853 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
edd0c2cc
MC
5854 if (rc)
5855 goto err_out;
697197e5 5856 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
c0c050c5
MC
5857 }
5858
697197e5 5859 type = HWRM_RING_ALLOC_RX;
edd0c2cc 5860 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5861 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 5862 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
3e08b184
MC
5863 struct bnxt_napi *bnapi = rxr->bnapi;
5864 u32 map_idx = bnapi->index;
c0c050c5 5865
697197e5 5866 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
edd0c2cc
MC
5867 if (rc)
5868 goto err_out;
697197e5 5869 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
e8f267b0
MC
5870 /* If we have agg rings, post agg buffers first. */
5871 if (!agg_rings)
5872 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
b81a90d3 5873 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
3e08b184
MC
5874 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5875 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5876 u32 type2 = HWRM_RING_ALLOC_CMPL;
5877 struct bnxt_cp_ring_info *cpr2;
5878
5879 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5880 ring = &cpr2->cp_ring_struct;
5881 ring->handle = BNXT_RX_HDL;
5882 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5883 if (rc)
5884 goto err_out;
5885 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5886 ring->fw_ring_id);
5887 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5888 }
c0c050c5
MC
5889 }
5890
e8f267b0 5891 if (agg_rings) {
697197e5 5892 type = HWRM_RING_ALLOC_AGG;
c0c050c5 5893 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5894 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
5895 struct bnxt_ring_struct *ring =
5896 &rxr->rx_agg_ring_struct;
9899bb59 5897 u32 grp_idx = ring->grp_idx;
b81a90d3 5898 u32 map_idx = grp_idx + bp->rx_nr_rings;
c0c050c5 5899
697197e5 5900 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
c0c050c5
MC
5901 if (rc)
5902 goto err_out;
5903
697197e5
MC
5904 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5905 ring->fw_ring_id);
5906 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
e8f267b0 5907 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
b81a90d3 5908 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
c0c050c5
MC
5909 }
5910 }
5911err_out:
5912 return rc;
5913}
5914
5915static int hwrm_ring_free_send_msg(struct bnxt *bp,
5916 struct bnxt_ring_struct *ring,
5917 u32 ring_type, int cmpl_ring_id)
5918{
5919 int rc;
5920 struct hwrm_ring_free_input req = {0};
5921 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5922 u16 error_code;
5923
b340dc68 5924 if (BNXT_NO_FW_ACCESS(bp))
b4fff207
MC
5925 return 0;
5926
74608fc9 5927 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
c0c050c5
MC
5928 req.ring_type = ring_type;
5929 req.ring_id = cpu_to_le16(ring->fw_ring_id);
5930
5931 mutex_lock(&bp->hwrm_cmd_lock);
5932 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5933 error_code = le16_to_cpu(resp->error_code);
5934 mutex_unlock(&bp->hwrm_cmd_lock);
5935
5936 if (rc || error_code) {
2727c888
MC
5937 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5938 ring_type, rc, error_code);
5939 return -EIO;
c0c050c5
MC
5940 }
5941 return 0;
5942}
5943
edd0c2cc 5944static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
c0c050c5 5945{
23aefdd7 5946 u32 type;
edd0c2cc 5947 int i;
c0c050c5
MC
5948
5949 if (!bp->bnapi)
edd0c2cc 5950 return;
c0c050c5 5951
edd0c2cc 5952 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 5953 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
edd0c2cc 5954 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
edd0c2cc
MC
5955
5956 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
5957 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5958
edd0c2cc
MC
5959 hwrm_ring_free_send_msg(bp, ring,
5960 RING_FREE_REQ_RING_TYPE_TX,
5961 close_path ? cmpl_ring_id :
5962 INVALID_HW_RING_ID);
5963 ring->fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
5964 }
5965 }
5966
edd0c2cc 5967 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5968 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 5969 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
b81a90d3 5970 u32 grp_idx = rxr->bnapi->index;
edd0c2cc
MC
5971
5972 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
5973 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5974
edd0c2cc
MC
5975 hwrm_ring_free_send_msg(bp, ring,
5976 RING_FREE_REQ_RING_TYPE_RX,
5977 close_path ? cmpl_ring_id :
5978 INVALID_HW_RING_ID);
5979 ring->fw_ring_id = INVALID_HW_RING_ID;
b81a90d3
MC
5980 bp->grp_info[grp_idx].rx_fw_ring_id =
5981 INVALID_HW_RING_ID;
c0c050c5
MC
5982 }
5983 }
5984
23aefdd7
MC
5985 if (bp->flags & BNXT_FLAG_CHIP_P5)
5986 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5987 else
5988 type = RING_FREE_REQ_RING_TYPE_RX;
edd0c2cc 5989 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5990 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 5991 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
b81a90d3 5992 u32 grp_idx = rxr->bnapi->index;
edd0c2cc
MC
5993
5994 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
5995 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5996
23aefdd7 5997 hwrm_ring_free_send_msg(bp, ring, type,
edd0c2cc
MC
5998 close_path ? cmpl_ring_id :
5999 INVALID_HW_RING_ID);
6000 ring->fw_ring_id = INVALID_HW_RING_ID;
b81a90d3
MC
6001 bp->grp_info[grp_idx].agg_fw_ring_id =
6002 INVALID_HW_RING_ID;
c0c050c5
MC
6003 }
6004 }
6005
9d8bc097
MC
6006 /* The completion rings are about to be freed. After that the
6007 * IRQ doorbell will not work anymore. So we need to disable
6008 * IRQ here.
6009 */
6010 bnxt_disable_int_sync(bp);
6011
23aefdd7
MC
6012 if (bp->flags & BNXT_FLAG_CHIP_P5)
6013 type = RING_FREE_REQ_RING_TYPE_NQ;
6014 else
6015 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
edd0c2cc
MC
6016 for (i = 0; i < bp->cp_nr_rings; i++) {
6017 struct bnxt_napi *bnapi = bp->bnapi[i];
6018 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3e08b184
MC
6019 struct bnxt_ring_struct *ring;
6020 int j;
edd0c2cc 6021
3e08b184
MC
6022 for (j = 0; j < 2; j++) {
6023 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
6024
6025 if (cpr2) {
6026 ring = &cpr2->cp_ring_struct;
6027 if (ring->fw_ring_id == INVALID_HW_RING_ID)
6028 continue;
6029 hwrm_ring_free_send_msg(bp, ring,
6030 RING_FREE_REQ_RING_TYPE_L2_CMPL,
6031 INVALID_HW_RING_ID);
6032 ring->fw_ring_id = INVALID_HW_RING_ID;
6033 }
6034 }
6035 ring = &cpr->cp_ring_struct;
edd0c2cc 6036 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
23aefdd7 6037 hwrm_ring_free_send_msg(bp, ring, type,
edd0c2cc
MC
6038 INVALID_HW_RING_ID);
6039 ring->fw_ring_id = INVALID_HW_RING_ID;
6040 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
6041 }
6042 }
c0c050c5
MC
6043}
6044
41e8d798
MC
6045static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
6046 bool shared);
6047
674f50a5
MC
6048static int bnxt_hwrm_get_rings(struct bnxt *bp)
6049{
6050 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6051 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6052 struct hwrm_func_qcfg_input req = {0};
6053 int rc;
6054
6055 if (bp->hwrm_spec_code < 0x10601)
6056 return 0;
6057
6058 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6059 req.fid = cpu_to_le16(0xffff);
6060 mutex_lock(&bp->hwrm_cmd_lock);
6061 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6062 if (rc) {
6063 mutex_unlock(&bp->hwrm_cmd_lock);
d4f1420d 6064 return rc;
674f50a5
MC
6065 }
6066
6067 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
f1ca94de 6068 if (BNXT_NEW_RM(bp)) {
674f50a5
MC
6069 u16 cp, stats;
6070
6071 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
6072 hw_resc->resv_hw_ring_grps =
6073 le32_to_cpu(resp->alloc_hw_ring_grps);
6074 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
6075 cp = le16_to_cpu(resp->alloc_cmpl_rings);
6076 stats = le16_to_cpu(resp->alloc_stat_ctx);
75720e63 6077 hw_resc->resv_irqs = cp;
41e8d798
MC
6078 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6079 int rx = hw_resc->resv_rx_rings;
6080 int tx = hw_resc->resv_tx_rings;
6081
6082 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6083 rx >>= 1;
6084 if (cp < (rx + tx)) {
6085 bnxt_trim_rings(bp, &rx, &tx, cp, false);
6086 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6087 rx <<= 1;
6088 hw_resc->resv_rx_rings = rx;
6089 hw_resc->resv_tx_rings = tx;
6090 }
75720e63 6091 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
41e8d798
MC
6092 hw_resc->resv_hw_ring_grps = rx;
6093 }
674f50a5 6094 hw_resc->resv_cp_rings = cp;
780baad4 6095 hw_resc->resv_stat_ctxs = stats;
674f50a5
MC
6096 }
6097 mutex_unlock(&bp->hwrm_cmd_lock);
6098 return 0;
6099}
6100
391be5c2
MC
6101/* Caller must hold bp->hwrm_cmd_lock */
6102int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
6103{
6104 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6105 struct hwrm_func_qcfg_input req = {0};
6106 int rc;
6107
6108 if (bp->hwrm_spec_code < 0x10601)
6109 return 0;
6110
6111 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6112 req.fid = cpu_to_le16(fid);
6113 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6114 if (!rc)
6115 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6116
6117 return rc;
6118}
6119
41e8d798
MC
6120static bool bnxt_rfs_supported(struct bnxt *bp);
6121
4ed50ef4
MC
6122static void
6123__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
6124 int tx_rings, int rx_rings, int ring_grps,
780baad4 6125 int cp_rings, int stats, int vnics)
391be5c2 6126{
674f50a5 6127 u32 enables = 0;
391be5c2 6128
4ed50ef4
MC
6129 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
6130 req->fid = cpu_to_le16(0xffff);
674f50a5 6131 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
4ed50ef4 6132 req->num_tx_rings = cpu_to_le16(tx_rings);
f1ca94de 6133 if (BNXT_NEW_RM(bp)) {
674f50a5 6134 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
3f93cd3f 6135 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
41e8d798
MC
6136 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6137 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6138 enables |= tx_rings + ring_grps ?
3f93cd3f 6139 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
6140 enables |= rx_rings ?
6141 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6142 } else {
6143 enables |= cp_rings ?
3f93cd3f 6144 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
6145 enables |= ring_grps ?
6146 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
6147 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6148 }
dbe80d44 6149 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
674f50a5 6150
4ed50ef4 6151 req->num_rx_rings = cpu_to_le16(rx_rings);
41e8d798
MC
6152 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6153 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6154 req->num_msix = cpu_to_le16(cp_rings);
6155 req->num_rsscos_ctxs =
6156 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6157 } else {
6158 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6159 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6160 req->num_rsscos_ctxs = cpu_to_le16(1);
6161 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6162 bnxt_rfs_supported(bp))
6163 req->num_rsscos_ctxs =
6164 cpu_to_le16(ring_grps + 1);
6165 }
780baad4 6166 req->num_stat_ctxs = cpu_to_le16(stats);
4ed50ef4 6167 req->num_vnics = cpu_to_le16(vnics);
674f50a5 6168 }
4ed50ef4
MC
6169 req->enables = cpu_to_le32(enables);
6170}
6171
6172static void
6173__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
6174 struct hwrm_func_vf_cfg_input *req, int tx_rings,
6175 int rx_rings, int ring_grps, int cp_rings,
780baad4 6176 int stats, int vnics)
4ed50ef4
MC
6177{
6178 u32 enables = 0;
6179
6180 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
6181 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
41e8d798
MC
6182 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
6183 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
3f93cd3f 6184 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
41e8d798
MC
6185 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6186 enables |= tx_rings + ring_grps ?
3f93cd3f 6187 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
6188 } else {
6189 enables |= cp_rings ?
3f93cd3f 6190 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
6191 enables |= ring_grps ?
6192 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
6193 }
4ed50ef4 6194 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
41e8d798 6195 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
4ed50ef4 6196
41e8d798 6197 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
4ed50ef4
MC
6198 req->num_tx_rings = cpu_to_le16(tx_rings);
6199 req->num_rx_rings = cpu_to_le16(rx_rings);
41e8d798
MC
6200 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6201 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6202 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6203 } else {
6204 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6205 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6206 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6207 }
780baad4 6208 req->num_stat_ctxs = cpu_to_le16(stats);
4ed50ef4
MC
6209 req->num_vnics = cpu_to_le16(vnics);
6210
6211 req->enables = cpu_to_le32(enables);
6212}
6213
6214static int
6215bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4 6216 int ring_grps, int cp_rings, int stats, int vnics)
4ed50ef4
MC
6217{
6218 struct hwrm_func_cfg_input req = {0};
6219 int rc;
6220
6221 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
780baad4 6222 cp_rings, stats, vnics);
4ed50ef4 6223 if (!req.enables)
391be5c2
MC
6224 return 0;
6225
674f50a5
MC
6226 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6227 if (rc)
d4f1420d 6228 return rc;
674f50a5
MC
6229
6230 if (bp->hwrm_spec_code < 0x10601)
6231 bp->hw_resc.resv_tx_rings = tx_rings;
6232
9f90445c 6233 return bnxt_hwrm_get_rings(bp);
674f50a5
MC
6234}
6235
6236static int
6237bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4 6238 int ring_grps, int cp_rings, int stats, int vnics)
674f50a5
MC
6239{
6240 struct hwrm_func_vf_cfg_input req = {0};
674f50a5
MC
6241 int rc;
6242
f1ca94de 6243 if (!BNXT_NEW_RM(bp)) {
674f50a5 6244 bp->hw_resc.resv_tx_rings = tx_rings;
391be5c2 6245 return 0;
674f50a5 6246 }
391be5c2 6247
4ed50ef4 6248 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
780baad4 6249 cp_rings, stats, vnics);
391be5c2 6250 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
674f50a5 6251 if (rc)
d4f1420d 6252 return rc;
674f50a5 6253
9f90445c 6254 return bnxt_hwrm_get_rings(bp);
674f50a5
MC
6255}
6256
6257static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
780baad4 6258 int cp, int stat, int vnic)
674f50a5
MC
6259{
6260 if (BNXT_PF(bp))
780baad4
VV
6261 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6262 vnic);
674f50a5 6263 else
780baad4
VV
6264 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6265 vnic);
674f50a5
MC
6266}
6267
b16b6891 6268int bnxt_nq_rings_in_use(struct bnxt *bp)
08654eb2
MC
6269{
6270 int cp = bp->cp_nr_rings;
6271 int ulp_msix, ulp_base;
6272
6273 ulp_msix = bnxt_get_ulp_msix_num(bp);
6274 if (ulp_msix) {
6275 ulp_base = bnxt_get_ulp_msix_base(bp);
6276 cp += ulp_msix;
6277 if ((ulp_base + ulp_msix) > cp)
6278 cp = ulp_base + ulp_msix;
6279 }
6280 return cp;
6281}
6282
c0b8cda0
MC
6283static int bnxt_cp_rings_in_use(struct bnxt *bp)
6284{
6285 int cp;
6286
6287 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6288 return bnxt_nq_rings_in_use(bp);
6289
6290 cp = bp->tx_nr_rings + bp->rx_nr_rings;
6291 return cp;
6292}
6293
780baad4
VV
6294static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6295{
d77b1ad8
MC
6296 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6297 int cp = bp->cp_nr_rings;
6298
6299 if (!ulp_stat)
6300 return cp;
6301
6302 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6303 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6304
6305 return cp + ulp_stat;
780baad4
VV
6306}
6307
b43b9f53
MC
6308/* Check if a default RSS map needs to be setup. This function is only
6309 * used on older firmware that does not require reserving RX rings.
6310 */
6311static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6312{
6313 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6314
6315 /* The RSS map is valid for RX rings set to resv_rx_rings */
6316 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6317 hw_resc->resv_rx_rings = bp->rx_nr_rings;
6318 if (!netif_is_rxfh_configured(bp->dev))
6319 bnxt_set_dflt_rss_indir_tbl(bp);
6320 }
6321}
6322
4e41dc5d
MC
6323static bool bnxt_need_reserve_rings(struct bnxt *bp)
6324{
6325 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
fbcfc8e4 6326 int cp = bnxt_cp_rings_in_use(bp);
c0b8cda0 6327 int nq = bnxt_nq_rings_in_use(bp);
780baad4 6328 int rx = bp->rx_nr_rings, stat;
4e41dc5d
MC
6329 int vnic = 1, grp = rx;
6330
b43b9f53
MC
6331 if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6332 bp->hwrm_spec_code >= 0x10601)
4e41dc5d
MC
6333 return true;
6334
b43b9f53
MC
6335 /* Old firmware does not need RX ring reservations but we still
6336 * need to setup a default RSS map when needed. With new firmware
6337 * we go through RX ring reservations first and then set up the
6338 * RSS map for the successfully reserved RX rings when needed.
6339 */
6340 if (!BNXT_NEW_RM(bp)) {
6341 bnxt_check_rss_tbl_no_rmgr(bp);
6342 return false;
6343 }
41e8d798 6344 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
4e41dc5d
MC
6345 vnic = rx + 1;
6346 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6347 rx <<= 1;
780baad4 6348 stat = bnxt_get_func_stat_ctxs(bp);
b43b9f53
MC
6349 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6350 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6351 (hw_resc->resv_hw_ring_grps != grp &&
6352 !(bp->flags & BNXT_FLAG_CHIP_P5)))
4e41dc5d 6353 return true;
01989c6b
MC
6354 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6355 hw_resc->resv_irqs != nq)
6356 return true;
4e41dc5d
MC
6357 return false;
6358}
6359
674f50a5
MC
6360static int __bnxt_reserve_rings(struct bnxt *bp)
6361{
6362 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
c0b8cda0 6363 int cp = bnxt_nq_rings_in_use(bp);
674f50a5
MC
6364 int tx = bp->tx_nr_rings;
6365 int rx = bp->rx_nr_rings;
674f50a5 6366 int grp, rx_rings, rc;
780baad4 6367 int vnic = 1, stat;
674f50a5 6368 bool sh = false;
674f50a5 6369
4e41dc5d 6370 if (!bnxt_need_reserve_rings(bp))
674f50a5
MC
6371 return 0;
6372
6373 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6374 sh = true;
41e8d798 6375 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
674f50a5
MC
6376 vnic = rx + 1;
6377 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6378 rx <<= 1;
674f50a5 6379 grp = bp->rx_nr_rings;
780baad4 6380 stat = bnxt_get_func_stat_ctxs(bp);
674f50a5 6381
780baad4 6382 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
391be5c2
MC
6383 if (rc)
6384 return rc;
6385
674f50a5 6386 tx = hw_resc->resv_tx_rings;
f1ca94de 6387 if (BNXT_NEW_RM(bp)) {
674f50a5 6388 rx = hw_resc->resv_rx_rings;
c0b8cda0 6389 cp = hw_resc->resv_irqs;
674f50a5
MC
6390 grp = hw_resc->resv_hw_ring_grps;
6391 vnic = hw_resc->resv_vnics;
780baad4 6392 stat = hw_resc->resv_stat_ctxs;
674f50a5
MC
6393 }
6394
6395 rx_rings = rx;
6396 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6397 if (rx >= 2) {
6398 rx_rings = rx >> 1;
6399 } else {
6400 if (netif_running(bp->dev))
6401 return -ENOMEM;
6402
6403 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6404 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6405 bp->dev->hw_features &= ~NETIF_F_LRO;
6406 bp->dev->features &= ~NETIF_F_LRO;
6407 bnxt_set_ring_params(bp);
6408 }
6409 }
6410 rx_rings = min_t(int, rx_rings, grp);
780baad4
VV
6411 cp = min_t(int, cp, bp->cp_nr_rings);
6412 if (stat > bnxt_get_ulp_stat_ctxs(bp))
6413 stat -= bnxt_get_ulp_stat_ctxs(bp);
6414 cp = min_t(int, cp, stat);
674f50a5
MC
6415 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6416 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6417 rx = rx_rings << 1;
6418 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6419 bp->tx_nr_rings = tx;
bd3191b5
MC
6420
6421 /* If we cannot reserve all the RX rings, reset the RSS map only
6422 * if absolutely necessary
6423 */
6424 if (rx_rings != bp->rx_nr_rings) {
6425 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6426 rx_rings, bp->rx_nr_rings);
6427 if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
6428 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6429 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6430 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6431 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6432 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6433 }
6434 }
674f50a5
MC
6435 bp->rx_nr_rings = rx_rings;
6436 bp->cp_nr_rings = cp;
6437
780baad4 6438 if (!tx || !rx || !cp || !grp || !vnic || !stat)
674f50a5
MC
6439 return -ENOMEM;
6440
5fa65524
EP
6441 if (!netif_is_rxfh_configured(bp->dev))
6442 bnxt_set_dflt_rss_indir_tbl(bp);
6443
391be5c2
MC
6444 return rc;
6445}
6446
8f23d638 6447static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
6448 int ring_grps, int cp_rings, int stats,
6449 int vnics)
98fdbe73 6450{
8f23d638 6451 struct hwrm_func_vf_cfg_input req = {0};
6fc2ffdf 6452 u32 flags;
98fdbe73 6453
f1ca94de 6454 if (!BNXT_NEW_RM(bp))
98fdbe73
MC
6455 return 0;
6456
6fc2ffdf 6457 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
780baad4 6458 cp_rings, stats, vnics);
8f23d638
MC
6459 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6460 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6461 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8f23d638 6462 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
41e8d798
MC
6463 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6464 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6465 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6466 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8f23d638
MC
6467
6468 req.flags = cpu_to_le32(flags);
9f90445c
VV
6469 return hwrm_send_message_silent(bp, &req, sizeof(req),
6470 HWRM_CMD_TIMEOUT);
8f23d638
MC
6471}
6472
6473static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
6474 int ring_grps, int cp_rings, int stats,
6475 int vnics)
8f23d638
MC
6476{
6477 struct hwrm_func_cfg_input req = {0};
6fc2ffdf 6478 u32 flags;
98fdbe73 6479
6fc2ffdf 6480 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
780baad4 6481 cp_rings, stats, vnics);
8f23d638 6482 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
41e8d798 6483 if (BNXT_NEW_RM(bp)) {
8f23d638
MC
6484 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6485 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8f23d638
MC
6486 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6487 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
41e8d798 6488 if (bp->flags & BNXT_FLAG_CHIP_P5)
0b815023
MC
6489 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6490 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
41e8d798
MC
6491 else
6492 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6493 }
6fc2ffdf 6494
8f23d638 6495 req.flags = cpu_to_le32(flags);
9f90445c
VV
6496 return hwrm_send_message_silent(bp, &req, sizeof(req),
6497 HWRM_CMD_TIMEOUT);
98fdbe73
MC
6498}
6499
8f23d638 6500static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
6501 int ring_grps, int cp_rings, int stats,
6502 int vnics)
8f23d638
MC
6503{
6504 if (bp->hwrm_spec_code < 0x10801)
6505 return 0;
6506
6507 if (BNXT_PF(bp))
6508 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
780baad4
VV
6509 ring_grps, cp_rings, stats,
6510 vnics);
8f23d638
MC
6511
6512 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
780baad4 6513 cp_rings, stats, vnics);
8f23d638
MC
6514}
6515
74706afa
MC
6516static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6517{
6518 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6519 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6520 struct hwrm_ring_aggint_qcaps_input req = {0};
6521 int rc;
6522
6523 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6524 coal_cap->num_cmpl_dma_aggr_max = 63;
6525 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6526 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6527 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6528 coal_cap->int_lat_tmr_min_max = 65535;
6529 coal_cap->int_lat_tmr_max_max = 65535;
6530 coal_cap->num_cmpl_aggr_int_max = 65535;
6531 coal_cap->timer_units = 80;
6532
6533 if (bp->hwrm_spec_code < 0x10902)
6534 return;
6535
6536 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
6537 mutex_lock(&bp->hwrm_cmd_lock);
6538 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6539 if (!rc) {
6540 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
58590c8d 6541 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
74706afa
MC
6542 coal_cap->num_cmpl_dma_aggr_max =
6543 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6544 coal_cap->num_cmpl_dma_aggr_during_int_max =
6545 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6546 coal_cap->cmpl_aggr_dma_tmr_max =
6547 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6548 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6549 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6550 coal_cap->int_lat_tmr_min_max =
6551 le16_to_cpu(resp->int_lat_tmr_min_max);
6552 coal_cap->int_lat_tmr_max_max =
6553 le16_to_cpu(resp->int_lat_tmr_max_max);
6554 coal_cap->num_cmpl_aggr_int_max =
6555 le16_to_cpu(resp->num_cmpl_aggr_int_max);
6556 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6557 }
6558 mutex_unlock(&bp->hwrm_cmd_lock);
6559}
6560
6561static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6562{
6563 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6564
6565 return usec * 1000 / coal_cap->timer_units;
6566}
6567
6568static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6569 struct bnxt_coal *hw_coal,
bb053f52
MC
6570 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6571{
74706afa
MC
6572 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6573 u32 cmpl_params = coal_cap->cmpl_params;
6574 u16 val, tmr, max, flags = 0;
f8503969
MC
6575
6576 max = hw_coal->bufs_per_record * 128;
6577 if (hw_coal->budget)
6578 max = hw_coal->bufs_per_record * hw_coal->budget;
74706afa 6579 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
f8503969
MC
6580
6581 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6582 req->num_cmpl_aggr_int = cpu_to_le16(val);
b153cbc5 6583
74706afa 6584 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
f8503969
MC
6585 req->num_cmpl_dma_aggr = cpu_to_le16(val);
6586
74706afa
MC
6587 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6588 coal_cap->num_cmpl_dma_aggr_during_int_max);
f8503969
MC
6589 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6590
74706afa
MC
6591 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6592 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
f8503969
MC
6593 req->int_lat_tmr_max = cpu_to_le16(tmr);
6594
6595 /* min timer set to 1/2 of interrupt timer */
74706afa
MC
6596 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6597 val = tmr / 2;
6598 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6599 req->int_lat_tmr_min = cpu_to_le16(val);
6600 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6601 }
f8503969
MC
6602
6603 /* buf timer set to 1/4 of interrupt timer */
74706afa 6604 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
f8503969
MC
6605 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6606
74706afa
MC
6607 if (cmpl_params &
6608 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6609 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6610 val = clamp_t(u16, tmr, 1,
6611 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6adc4601 6612 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
74706afa
MC
6613 req->enables |=
6614 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6615 }
f8503969 6616
74706afa
MC
6617 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6618 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6619 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6620 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
f8503969 6621 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
bb053f52 6622 req->flags = cpu_to_le16(flags);
74706afa 6623 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
bb053f52
MC
6624}
6625
58590c8d
MC
6626/* Caller holds bp->hwrm_cmd_lock */
6627static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6628 struct bnxt_coal *hw_coal)
6629{
6630 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
6631 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6632 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6633 u32 nq_params = coal_cap->nq_params;
6634 u16 tmr;
6635
6636 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6637 return 0;
6638
6639 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
6640 -1, -1);
6641 req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6642 req.flags =
6643 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6644
6645 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6646 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6647 req.int_lat_tmr_min = cpu_to_le16(tmr);
6648 req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6649 return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6650}
6651
6a8788f2
AG
6652int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6653{
6654 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
6655 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6656 struct bnxt_coal coal;
6a8788f2
AG
6657
6658 /* Tick values in micro seconds.
6659 * 1 coal_buf x bufs_per_record = 1 completion record.
6660 */
6661 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6662
6663 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6664 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6665
6666 if (!bnapi->rx_ring)
6667 return -ENODEV;
6668
6669 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6670 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6671
74706afa 6672 bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
6a8788f2 6673
2c61d211 6674 req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6a8788f2
AG
6675
6676 return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
6677 HWRM_CMD_TIMEOUT);
6678}
6679
c0c050c5
MC
6680int bnxt_hwrm_set_coal(struct bnxt *bp)
6681{
6682 int i, rc = 0;
dfc9c94a
MC
6683 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
6684 req_tx = {0}, *req;
c0c050c5 6685
dfc9c94a
MC
6686 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6687 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6688 bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
6689 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
c0c050c5 6690
74706afa
MC
6691 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
6692 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
c0c050c5
MC
6693
6694 mutex_lock(&bp->hwrm_cmd_lock);
6695 for (i = 0; i < bp->cp_nr_rings; i++) {
dfc9c94a 6696 struct bnxt_napi *bnapi = bp->bnapi[i];
58590c8d 6697 struct bnxt_coal *hw_coal;
2c61d211 6698 u16 ring_id;
c0c050c5 6699
dfc9c94a 6700 req = &req_rx;
2c61d211
MC
6701 if (!bnapi->rx_ring) {
6702 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
dfc9c94a 6703 req = &req_tx;
2c61d211
MC
6704 } else {
6705 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6706 }
6707 req->ring_id = cpu_to_le16(ring_id);
dfc9c94a
MC
6708
6709 rc = _hwrm_send_message(bp, req, sizeof(*req),
c0c050c5
MC
6710 HWRM_CMD_TIMEOUT);
6711 if (rc)
6712 break;
58590c8d
MC
6713
6714 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6715 continue;
6716
6717 if (bnapi->rx_ring && bnapi->tx_ring) {
6718 req = &req_tx;
6719 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6720 req->ring_id = cpu_to_le16(ring_id);
6721 rc = _hwrm_send_message(bp, req, sizeof(*req),
6722 HWRM_CMD_TIMEOUT);
6723 if (rc)
6724 break;
6725 }
6726 if (bnapi->rx_ring)
6727 hw_coal = &bp->rx_coal;
6728 else
6729 hw_coal = &bp->tx_coal;
6730 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
c0c050c5
MC
6731 }
6732 mutex_unlock(&bp->hwrm_cmd_lock);
6733 return rc;
6734}
6735
3d061591 6736static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
c0c050c5 6737{
c2dec363 6738 struct hwrm_stat_ctx_clr_stats_input req0 = {0};
c0c050c5 6739 struct hwrm_stat_ctx_free_input req = {0};
3d061591 6740 int i;
c0c050c5
MC
6741
6742 if (!bp->bnapi)
3d061591 6743 return;
c0c050c5 6744
3e8060fa 6745 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3d061591 6746 return;
3e8060fa 6747
c2dec363 6748 bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1);
c0c050c5
MC
6749 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
6750
6751 mutex_lock(&bp->hwrm_cmd_lock);
6752 for (i = 0; i < bp->cp_nr_rings; i++) {
6753 struct bnxt_napi *bnapi = bp->bnapi[i];
6754 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6755
6756 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6757 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
c2dec363
MC
6758 if (BNXT_FW_MAJ(bp) <= 20) {
6759 req0.stat_ctx_id = req.stat_ctx_id;
6760 _hwrm_send_message(bp, &req0, sizeof(req0),
6761 HWRM_CMD_TIMEOUT);
6762 }
3d061591
VV
6763 _hwrm_send_message(bp, &req, sizeof(req),
6764 HWRM_CMD_TIMEOUT);
c0c050c5
MC
6765
6766 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6767 }
6768 }
6769 mutex_unlock(&bp->hwrm_cmd_lock);
c0c050c5
MC
6770}
6771
6772static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6773{
6774 int rc = 0, i;
6775 struct hwrm_stat_ctx_alloc_input req = {0};
6776 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6777
3e8060fa
PS
6778 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6779 return 0;
6780
c0c050c5
MC
6781 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
6782
4e748506 6783 req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
51f30785 6784 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
c0c050c5
MC
6785
6786 mutex_lock(&bp->hwrm_cmd_lock);
6787 for (i = 0; i < bp->cp_nr_rings; i++) {
6788 struct bnxt_napi *bnapi = bp->bnapi[i];
6789 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6790
177a6cde 6791 req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
c0c050c5
MC
6792
6793 rc = _hwrm_send_message(bp, &req, sizeof(req),
6794 HWRM_CMD_TIMEOUT);
6795 if (rc)
6796 break;
6797
6798 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6799
6800 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6801 }
6802 mutex_unlock(&bp->hwrm_cmd_lock);
89aa8445 6803 return rc;
c0c050c5
MC
6804}
6805
cf6645f8
MC
6806static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6807{
6808 struct hwrm_func_qcfg_input req = {0};
567b2abe 6809 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8ae24738 6810 u32 min_db_offset = 0;
9315edca 6811 u16 flags;
cf6645f8
MC
6812 int rc;
6813
6814 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6815 req.fid = cpu_to_le16(0xffff);
6816 mutex_lock(&bp->hwrm_cmd_lock);
6817 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6818 if (rc)
6819 goto func_qcfg_exit;
6820
6821#ifdef CONFIG_BNXT_SRIOV
6822 if (BNXT_VF(bp)) {
cf6645f8
MC
6823 struct bnxt_vf_info *vf = &bp->vf;
6824
6825 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
230d1f0d
MC
6826 } else {
6827 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
cf6645f8
MC
6828 }
6829#endif
9315edca
MC
6830 flags = le16_to_cpu(resp->flags);
6831 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6832 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
97381a18 6833 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
9315edca 6834 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
97381a18 6835 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
9315edca
MC
6836 }
6837 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6838 bp->flags |= BNXT_FLAG_MULTI_HOST;
8d4bd96b
MC
6839 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
6840 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
bc39f885 6841
567b2abe
SB
6842 switch (resp->port_partition_type) {
6843 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6844 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6845 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6846 bp->port_partition_type = resp->port_partition_type;
6847 break;
6848 }
32e8239c
MC
6849 if (bp->hwrm_spec_code < 0x10707 ||
6850 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6851 bp->br_mode = BRIDGE_MODE_VEB;
6852 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6853 bp->br_mode = BRIDGE_MODE_VEPA;
6854 else
6855 bp->br_mode = BRIDGE_MODE_UNDEF;
cf6645f8 6856
7eb9bb3a
MC
6857 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6858 if (!bp->max_mtu)
6859 bp->max_mtu = BNXT_MAX_MTU;
6860
8ae24738
MC
6861 if (bp->db_size)
6862 goto func_qcfg_exit;
6863
6864 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6865 if (BNXT_PF(bp))
6866 min_db_offset = DB_PF_OFFSET_P5;
6867 else
6868 min_db_offset = DB_VF_OFFSET_P5;
6869 }
6870 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6871 1024);
6872 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6873 bp->db_size <= min_db_offset)
6874 bp->db_size = pci_resource_len(bp->pdev, 2);
6875
cf6645f8
MC
6876func_qcfg_exit:
6877 mutex_unlock(&bp->hwrm_cmd_lock);
6878 return rc;
6879}
6880
e9696ff3
MC
6881static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
6882 struct hwrm_func_backing_store_qcaps_output *resp)
6883{
6884 struct bnxt_mem_init *mem_init;
41435c39 6885 u16 init_mask;
e9696ff3 6886 u8 init_val;
41435c39 6887 u8 *offset;
e9696ff3
MC
6888 int i;
6889
6890 init_val = resp->ctx_kind_initializer;
41435c39
MC
6891 init_mask = le16_to_cpu(resp->ctx_init_mask);
6892 offset = &resp->qp_init_offset;
6893 mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
6894 for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
e9696ff3 6895 mem_init->init_val = init_val;
41435c39
MC
6896 mem_init->offset = BNXT_MEM_INVALID_OFFSET;
6897 if (!init_mask)
6898 continue;
6899 if (i == BNXT_CTX_MEM_INIT_STAT)
6900 offset = &resp->stat_init_offset;
6901 if (init_mask & (1 << i))
6902 mem_init->offset = *offset * 4;
6903 else
6904 mem_init->init_val = 0;
6905 }
6906 ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
6907 ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
6908 ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
6909 ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
6910 ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
6911 ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
e9696ff3
MC
6912}
6913
98f04cf0
MC
6914static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6915{
6916 struct hwrm_func_backing_store_qcaps_input req = {0};
6917 struct hwrm_func_backing_store_qcaps_output *resp =
6918 bp->hwrm_cmd_resp_addr;
6919 int rc;
6920
6921 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6922 return 0;
6923
6924 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
6925 mutex_lock(&bp->hwrm_cmd_lock);
6926 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6927 if (!rc) {
6928 struct bnxt_ctx_pg_info *ctx_pg;
6929 struct bnxt_ctx_mem_info *ctx;
ac3158cb 6930 int i, tqm_rings;
98f04cf0
MC
6931
6932 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6933 if (!ctx) {
6934 rc = -ENOMEM;
6935 goto ctx_err;
6936 }
98f04cf0
MC
6937 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6938 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6939 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6940 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6941 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6942 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6943 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6944 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6945 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6946 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6947 ctx->vnic_max_vnic_entries =
6948 le16_to_cpu(resp->vnic_max_vnic_entries);
6949 ctx->vnic_max_ring_table_entries =
6950 le16_to_cpu(resp->vnic_max_ring_table_entries);
6951 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6952 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6953 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6954 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6955 ctx->tqm_min_entries_per_ring =
6956 le32_to_cpu(resp->tqm_min_entries_per_ring);
6957 ctx->tqm_max_entries_per_ring =
6958 le32_to_cpu(resp->tqm_max_entries_per_ring);
6959 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6960 if (!ctx->tqm_entries_multiple)
6961 ctx->tqm_entries_multiple = 1;
6962 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6963 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
53579e37
DS
6964 ctx->mrav_num_entries_units =
6965 le16_to_cpu(resp->mrav_num_entries_units);
98f04cf0
MC
6966 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6967 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
e9696ff3
MC
6968
6969 bnxt_init_ctx_initializer(ctx, resp);
6970
ac3158cb
MC
6971 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
6972 if (!ctx->tqm_fp_rings_count)
6973 ctx->tqm_fp_rings_count = bp->max_q;
a029a2fe
MC
6974 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
6975 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
ac3158cb 6976
a029a2fe 6977 tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
ac3158cb
MC
6978 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
6979 if (!ctx_pg) {
6980 kfree(ctx);
6981 rc = -ENOMEM;
6982 goto ctx_err;
6983 }
6984 for (i = 0; i < tqm_rings; i++, ctx_pg++)
6985 ctx->tqm_mem[i] = ctx_pg;
6986 bp->ctx = ctx;
98f04cf0
MC
6987 } else {
6988 rc = 0;
6989 }
6990ctx_err:
6991 mutex_unlock(&bp->hwrm_cmd_lock);
6992 return rc;
6993}
6994
1b9394e5
MC
6995static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6996 __le64 *pg_dir)
6997{
be6d755f
EP
6998 if (!rmem->nr_pages)
6999 return;
7000
702279d2 7001 BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
08fe9d18
MC
7002 if (rmem->depth >= 1) {
7003 if (rmem->depth == 2)
7004 *pg_attr |= 2;
7005 else
7006 *pg_attr |= 1;
1b9394e5
MC
7007 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
7008 } else {
7009 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
7010 }
7011}
7012
7013#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
7014 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
7015 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
7016 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
7017 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
7018 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
7019
7020static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
7021{
7022 struct hwrm_func_backing_store_cfg_input req = {0};
7023 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7024 struct bnxt_ctx_pg_info *ctx_pg;
16db6323 7025 u32 req_len = sizeof(req);
1b9394e5
MC
7026 __le32 *num_entries;
7027 __le64 *pg_dir;
53579e37 7028 u32 flags = 0;
1b9394e5 7029 u8 *pg_attr;
1b9394e5 7030 u32 ena;
9f90445c 7031 int i;
1b9394e5
MC
7032
7033 if (!ctx)
7034 return 0;
7035
16db6323
MC
7036 if (req_len > bp->hwrm_max_ext_req_len)
7037 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
1b9394e5
MC
7038 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
7039 req.enables = cpu_to_le32(enables);
7040
7041 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
7042 ctx_pg = &ctx->qp_mem;
7043 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
7044 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
7045 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
7046 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
7047 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7048 &req.qpc_pg_size_qpc_lvl,
7049 &req.qpc_page_dir);
7050 }
7051 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
7052 ctx_pg = &ctx->srq_mem;
7053 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
7054 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
7055 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
7056 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7057 &req.srq_pg_size_srq_lvl,
7058 &req.srq_page_dir);
7059 }
7060 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
7061 ctx_pg = &ctx->cq_mem;
7062 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
7063 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
7064 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
7065 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
7066 &req.cq_page_dir);
7067 }
7068 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
7069 ctx_pg = &ctx->vnic_mem;
7070 req.vnic_num_vnic_entries =
7071 cpu_to_le16(ctx->vnic_max_vnic_entries);
7072 req.vnic_num_ring_table_entries =
7073 cpu_to_le16(ctx->vnic_max_ring_table_entries);
7074 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
7075 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7076 &req.vnic_pg_size_vnic_lvl,
7077 &req.vnic_page_dir);
7078 }
7079 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
7080 ctx_pg = &ctx->stat_mem;
7081 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
7082 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
7083 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7084 &req.stat_pg_size_stat_lvl,
7085 &req.stat_page_dir);
7086 }
cf6daed0
MC
7087 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
7088 ctx_pg = &ctx->mrav_mem;
7089 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
53579e37
DS
7090 if (ctx->mrav_num_entries_units)
7091 flags |=
7092 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
cf6daed0
MC
7093 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
7094 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7095 &req.mrav_pg_size_mrav_lvl,
7096 &req.mrav_page_dir);
7097 }
7098 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
7099 ctx_pg = &ctx->tim_mem;
7100 req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
7101 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
7102 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7103 &req.tim_pg_size_tim_lvl,
7104 &req.tim_page_dir);
7105 }
1b9394e5
MC
7106 for (i = 0, num_entries = &req.tqm_sp_num_entries,
7107 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
7108 pg_dir = &req.tqm_sp_page_dir,
7109 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
a029a2fe
MC
7110 i < BNXT_MAX_TQM_RINGS;
7111 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
1b9394e5
MC
7112 if (!(enables & ena))
7113 continue;
7114
7115 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
7116 ctx_pg = ctx->tqm_mem[i];
7117 *num_entries = cpu_to_le32(ctx_pg->entries);
7118 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
7119 }
53579e37 7120 req.flags = cpu_to_le32(flags);
16db6323 7121 return hwrm_send_message(bp, &req, req_len, HWRM_CMD_TIMEOUT);
1b9394e5
MC
7122}
7123
98f04cf0 7124static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
08fe9d18 7125 struct bnxt_ctx_pg_info *ctx_pg)
98f04cf0
MC
7126{
7127 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7128
98f04cf0
MC
7129 rmem->page_size = BNXT_PAGE_SIZE;
7130 rmem->pg_arr = ctx_pg->ctx_pg_arr;
7131 rmem->dma_arr = ctx_pg->ctx_dma_arr;
1b9394e5 7132 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
08fe9d18
MC
7133 if (rmem->depth >= 1)
7134 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
98f04cf0
MC
7135 return bnxt_alloc_ring(bp, rmem);
7136}
7137
08fe9d18
MC
7138static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7139 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
e9696ff3 7140 u8 depth, struct bnxt_mem_init *mem_init)
08fe9d18
MC
7141{
7142 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7143 int rc;
7144
7145 if (!mem_size)
bbf211b1 7146 return -EINVAL;
08fe9d18
MC
7147
7148 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7149 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
7150 ctx_pg->nr_pages = 0;
7151 return -EINVAL;
7152 }
7153 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
7154 int nr_tbls, i;
7155
7156 rmem->depth = 2;
7157 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
7158 GFP_KERNEL);
7159 if (!ctx_pg->ctx_pg_tbl)
7160 return -ENOMEM;
7161 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
7162 rmem->nr_pages = nr_tbls;
7163 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7164 if (rc)
7165 return rc;
7166 for (i = 0; i < nr_tbls; i++) {
7167 struct bnxt_ctx_pg_info *pg_tbl;
7168
7169 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
7170 if (!pg_tbl)
7171 return -ENOMEM;
7172 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
7173 rmem = &pg_tbl->ring_mem;
7174 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
7175 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
7176 rmem->depth = 1;
7177 rmem->nr_pages = MAX_CTX_PAGES;
e9696ff3 7178 rmem->mem_init = mem_init;
6ef982de
MC
7179 if (i == (nr_tbls - 1)) {
7180 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
7181
7182 if (rem)
7183 rmem->nr_pages = rem;
7184 }
08fe9d18
MC
7185 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7186 if (rc)
7187 break;
7188 }
7189 } else {
7190 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7191 if (rmem->nr_pages > 1 || depth)
7192 rmem->depth = 1;
e9696ff3 7193 rmem->mem_init = mem_init;
08fe9d18
MC
7194 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7195 }
7196 return rc;
7197}
7198
7199static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7200 struct bnxt_ctx_pg_info *ctx_pg)
7201{
7202 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7203
7204 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
7205 ctx_pg->ctx_pg_tbl) {
7206 int i, nr_tbls = rmem->nr_pages;
7207
7208 for (i = 0; i < nr_tbls; i++) {
7209 struct bnxt_ctx_pg_info *pg_tbl;
7210 struct bnxt_ring_mem_info *rmem2;
7211
7212 pg_tbl = ctx_pg->ctx_pg_tbl[i];
7213 if (!pg_tbl)
7214 continue;
7215 rmem2 = &pg_tbl->ring_mem;
7216 bnxt_free_ring(bp, rmem2);
7217 ctx_pg->ctx_pg_arr[i] = NULL;
7218 kfree(pg_tbl);
7219 ctx_pg->ctx_pg_tbl[i] = NULL;
7220 }
7221 kfree(ctx_pg->ctx_pg_tbl);
7222 ctx_pg->ctx_pg_tbl = NULL;
7223 }
7224 bnxt_free_ring(bp, rmem);
7225 ctx_pg->nr_pages = 0;
7226}
7227
98f04cf0
MC
7228static void bnxt_free_ctx_mem(struct bnxt *bp)
7229{
7230 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7231 int i;
7232
7233 if (!ctx)
7234 return;
7235
7236 if (ctx->tqm_mem[0]) {
ac3158cb 7237 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
08fe9d18 7238 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
98f04cf0
MC
7239 kfree(ctx->tqm_mem[0]);
7240 ctx->tqm_mem[0] = NULL;
7241 }
7242
cf6daed0
MC
7243 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7244 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
08fe9d18
MC
7245 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7246 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7247 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7248 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7249 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
98f04cf0
MC
7250 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7251}
7252
7253static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7254{
7255 struct bnxt_ctx_pg_info *ctx_pg;
7256 struct bnxt_ctx_mem_info *ctx;
e9696ff3 7257 struct bnxt_mem_init *init;
1b9394e5 7258 u32 mem_size, ena, entries;
c7dd7ab4 7259 u32 entries_sp, min;
53579e37 7260 u32 num_mr, num_ah;
cf6daed0
MC
7261 u32 extra_srqs = 0;
7262 u32 extra_qps = 0;
7263 u8 pg_lvl = 1;
98f04cf0
MC
7264 int i, rc;
7265
7266 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7267 if (rc) {
7268 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7269 rc);
7270 return rc;
7271 }
7272 ctx = bp->ctx;
7273 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7274 return 0;
7275
d629522e 7276 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
cf6daed0
MC
7277 pg_lvl = 2;
7278 extra_qps = 65536;
7279 extra_srqs = 8192;
7280 }
7281
98f04cf0 7282 ctx_pg = &ctx->qp_mem;
cf6daed0
MC
7283 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7284 extra_qps;
be6d755f
EP
7285 if (ctx->qp_entry_size) {
7286 mem_size = ctx->qp_entry_size * ctx_pg->entries;
e9696ff3
MC
7287 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7288 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
be6d755f
EP
7289 if (rc)
7290 return rc;
7291 }
98f04cf0
MC
7292
7293 ctx_pg = &ctx->srq_mem;
cf6daed0 7294 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
be6d755f
EP
7295 if (ctx->srq_entry_size) {
7296 mem_size = ctx->srq_entry_size * ctx_pg->entries;
e9696ff3
MC
7297 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
7298 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
be6d755f
EP
7299 if (rc)
7300 return rc;
7301 }
98f04cf0
MC
7302
7303 ctx_pg = &ctx->cq_mem;
cf6daed0 7304 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
be6d755f
EP
7305 if (ctx->cq_entry_size) {
7306 mem_size = ctx->cq_entry_size * ctx_pg->entries;
e9696ff3
MC
7307 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
7308 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
be6d755f
EP
7309 if (rc)
7310 return rc;
7311 }
98f04cf0
MC
7312
7313 ctx_pg = &ctx->vnic_mem;
7314 ctx_pg->entries = ctx->vnic_max_vnic_entries +
7315 ctx->vnic_max_ring_table_entries;
be6d755f
EP
7316 if (ctx->vnic_entry_size) {
7317 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
e9696ff3
MC
7318 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
7319 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
be6d755f
EP
7320 if (rc)
7321 return rc;
7322 }
98f04cf0
MC
7323
7324 ctx_pg = &ctx->stat_mem;
7325 ctx_pg->entries = ctx->stat_max_entries;
be6d755f
EP
7326 if (ctx->stat_entry_size) {
7327 mem_size = ctx->stat_entry_size * ctx_pg->entries;
e9696ff3
MC
7328 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
7329 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
be6d755f
EP
7330 if (rc)
7331 return rc;
7332 }
98f04cf0 7333
cf6daed0
MC
7334 ena = 0;
7335 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7336 goto skip_rdma;
7337
7338 ctx_pg = &ctx->mrav_mem;
53579e37
DS
7339 /* 128K extra is needed to accommodate static AH context
7340 * allocation by f/w.
7341 */
7342 num_mr = 1024 * 256;
7343 num_ah = 1024 * 128;
7344 ctx_pg->entries = num_mr + num_ah;
be6d755f
EP
7345 if (ctx->mrav_entry_size) {
7346 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
e9696ff3
MC
7347 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
7348 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
be6d755f
EP
7349 if (rc)
7350 return rc;
7351 }
cf6daed0 7352 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
53579e37
DS
7353 if (ctx->mrav_num_entries_units)
7354 ctx_pg->entries =
7355 ((num_mr / ctx->mrav_num_entries_units) << 16) |
7356 (num_ah / ctx->mrav_num_entries_units);
cf6daed0
MC
7357
7358 ctx_pg = &ctx->tim_mem;
7359 ctx_pg->entries = ctx->qp_mem.entries;
be6d755f
EP
7360 if (ctx->tim_entry_size) {
7361 mem_size = ctx->tim_entry_size * ctx_pg->entries;
e9696ff3 7362 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
be6d755f
EP
7363 if (rc)
7364 return rc;
7365 }
cf6daed0
MC
7366 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7367
7368skip_rdma:
c7dd7ab4
MC
7369 min = ctx->tqm_min_entries_per_ring;
7370 entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7371 2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7372 entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
c12e1643 7373 entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
98f04cf0 7374 entries = roundup(entries, ctx->tqm_entries_multiple);
c7dd7ab4 7375 entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
ac3158cb 7376 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
98f04cf0 7377 ctx_pg = ctx->tqm_mem[i];
c7dd7ab4 7378 ctx_pg->entries = i ? entries : entries_sp;
be6d755f
EP
7379 if (ctx->tqm_entry_size) {
7380 mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7381 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
e9696ff3 7382 NULL);
be6d755f
EP
7383 if (rc)
7384 return rc;
7385 }
1b9394e5 7386 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
98f04cf0 7387 }
1b9394e5
MC
7388 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7389 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
0b5b561c 7390 if (rc) {
1b9394e5
MC
7391 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7392 rc);
0b5b561c
MC
7393 return rc;
7394 }
7395 ctx->flags |= BNXT_CTX_FLAG_INITED;
98f04cf0
MC
7396 return 0;
7397}
7398
db4723b3 7399int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
be0dd9c4
MC
7400{
7401 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7402 struct hwrm_func_resource_qcaps_input req = {0};
7403 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7404 int rc;
7405
7406 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
7407 req.fid = cpu_to_le16(0xffff);
7408
7409 mutex_lock(&bp->hwrm_cmd_lock);
351cbde9
JT
7410 rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
7411 HWRM_CMD_TIMEOUT);
d4f1420d 7412 if (rc)
be0dd9c4 7413 goto hwrm_func_resc_qcaps_exit;
be0dd9c4 7414
db4723b3
MC
7415 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7416 if (!all)
7417 goto hwrm_func_resc_qcaps_exit;
7418
be0dd9c4
MC
7419 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7420 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7421 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7422 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7423 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7424 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7425 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7426 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7427 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7428 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7429 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7430 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7431 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7432 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7433 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7434 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7435
9c1fabdf
MC
7436 if (bp->flags & BNXT_FLAG_CHIP_P5) {
7437 u16 max_msix = le16_to_cpu(resp->max_msix);
7438
f7588cd8 7439 hw_resc->max_nqs = max_msix;
9c1fabdf
MC
7440 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7441 }
7442
4673d664
MC
7443 if (BNXT_PF(bp)) {
7444 struct bnxt_pf_info *pf = &bp->pf;
7445
7446 pf->vf_resv_strategy =
7447 le16_to_cpu(resp->vf_reservation_strategy);
bf82736d 7448 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
4673d664
MC
7449 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7450 }
be0dd9c4
MC
7451hwrm_func_resc_qcaps_exit:
7452 mutex_unlock(&bp->hwrm_cmd_lock);
7453 return rc;
7454}
7455
ae5c42f0
MC
7456/* bp->hwrm_cmd_lock already held. */
7457static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
7458{
7459 struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7460 struct hwrm_port_mac_ptp_qcfg_input req = {0};
7461 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
7462 u8 flags;
7463 int rc;
7464
7465 if (bp->hwrm_spec_code < 0x10801) {
7466 rc = -ENODEV;
7467 goto no_ptp;
7468 }
7469
7470 req.port_id = cpu_to_le16(bp->pf.port_id);
7471 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_PTP_QCFG, -1, -1);
7472 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7473 if (rc)
7474 goto no_ptp;
7475
7476 flags = resp->flags;
7477 if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
7478 rc = -ENODEV;
7479 goto no_ptp;
7480 }
7481 if (!ptp) {
7482 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
7483 if (!ptp)
7484 return -ENOMEM;
7485 ptp->bp = bp;
7486 bp->ptp_cfg = ptp;
7487 }
7488 if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
7489 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
7490 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
7491 } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
7492 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
7493 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
7494 } else {
7495 rc = -ENODEV;
7496 goto no_ptp;
7497 }
a521c8a0
MC
7498 rc = bnxt_ptp_init(bp);
7499 if (!rc)
7500 return 0;
7501
7502 netdev_warn(bp->dev, "PTP initialization failed.\n");
ae5c42f0
MC
7503
7504no_ptp:
a521c8a0 7505 bnxt_ptp_clear(bp);
ae5c42f0
MC
7506 kfree(ptp);
7507 bp->ptp_cfg = NULL;
7508 return rc;
7509}
7510
be0dd9c4 7511static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
c0c050c5
MC
7512{
7513 int rc = 0;
7514 struct hwrm_func_qcaps_input req = {0};
7515 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6a4f2947 7516 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
1da63ddd 7517 u32 flags, flags_ext;
c0c050c5
MC
7518
7519 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
7520 req.fid = cpu_to_le16(0xffff);
7521
7522 mutex_lock(&bp->hwrm_cmd_lock);
7523 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7524 if (rc)
7525 goto hwrm_func_qcaps_exit;
7526
6a4f2947
MC
7527 flags = le32_to_cpu(resp->flags);
7528 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
e4060d30 7529 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
6a4f2947 7530 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
e4060d30 7531 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
55e4398d
VV
7532 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7533 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
0a3f4e4f
VV
7534 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7535 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
6154532f
VV
7536 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7537 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
07f83d72
MC
7538 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7539 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
4037eb71
VV
7540 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7541 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
1da63ddd
EP
7542 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7543 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7544
7545 flags_ext = le32_to_cpu(resp->flags_ext);
7546 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7547 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
e4060d30 7548
7cc5a20e 7549 bp->tx_push_thresh = 0;
fed7edd1
MC
7550 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7551 BNXT_FW_MAJ(bp) > 217)
7cc5a20e
MC
7552 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7553
6a4f2947
MC
7554 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7555 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7556 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7557 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7558 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7559 if (!hw_resc->max_hw_ring_grps)
7560 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7561 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7562 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7563 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7564
c0c050c5
MC
7565 if (BNXT_PF(bp)) {
7566 struct bnxt_pf_info *pf = &bp->pf;
7567
7568 pf->fw_fid = le16_to_cpu(resp->fid);
7569 pf->port_id = le16_to_cpu(resp->port_id);
11f15ed3 7570 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
c0c050c5
MC
7571 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7572 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7573 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7574 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7575 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7576 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7577 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7578 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
ba642ab7 7579 bp->flags &= ~BNXT_FLAG_WOL_CAP;
6a4f2947 7580 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
c1ef146a 7581 bp->flags |= BNXT_FLAG_WOL_CAP;
de5bf194 7582 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
ae5c42f0 7583 __bnxt_hwrm_ptp_qcfg(bp);
de5bf194 7584 } else {
a521c8a0 7585 bnxt_ptp_clear(bp);
de5bf194
MC
7586 kfree(bp->ptp_cfg);
7587 bp->ptp_cfg = NULL;
7588 }
c0c050c5 7589 } else {
379a80a1 7590#ifdef CONFIG_BNXT_SRIOV
c0c050c5
MC
7591 struct bnxt_vf_info *vf = &bp->vf;
7592
7593 vf->fw_fid = le16_to_cpu(resp->fid);
7cc5a20e 7594 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
379a80a1 7595#endif
c0c050c5
MC
7596 }
7597
c0c050c5
MC
7598hwrm_func_qcaps_exit:
7599 mutex_unlock(&bp->hwrm_cmd_lock);
7600 return rc;
7601}
7602
804fba4e
MC
7603static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7604
be0dd9c4
MC
7605static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7606{
7607 int rc;
7608
7609 rc = __bnxt_hwrm_func_qcaps(bp);
7610 if (rc)
7611 return rc;
804fba4e
MC
7612 rc = bnxt_hwrm_queue_qportcfg(bp);
7613 if (rc) {
7614 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7615 return rc;
7616 }
be0dd9c4 7617 if (bp->hwrm_spec_code >= 0x10803) {
98f04cf0
MC
7618 rc = bnxt_alloc_ctx_mem(bp);
7619 if (rc)
7620 return rc;
db4723b3 7621 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
be0dd9c4 7622 if (!rc)
97381a18 7623 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
be0dd9c4
MC
7624 }
7625 return 0;
7626}
7627
e969ae5b
MC
7628static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7629{
7630 struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
7631 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7632 int rc = 0;
7633 u32 flags;
7634
7635 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7636 return 0;
7637
7638 resp = bp->hwrm_cmd_resp_addr;
7639 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
7640
7641 mutex_lock(&bp->hwrm_cmd_lock);
7642 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7643 if (rc)
7644 goto hwrm_cfa_adv_qcaps_exit;
7645
7646 flags = le32_to_cpu(resp->flags);
7647 if (flags &
41136ab3
MC
7648 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7649 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
e969ae5b
MC
7650
7651hwrm_cfa_adv_qcaps_exit:
7652 mutex_unlock(&bp->hwrm_cmd_lock);
7653 return rc;
7654}
7655
3e9ec2bb
EP
7656static int __bnxt_alloc_fw_health(struct bnxt *bp)
7657{
7658 if (bp->fw_health)
7659 return 0;
7660
7661 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7662 if (!bp->fw_health)
7663 return -ENOMEM;
7664
7665 return 0;
7666}
7667
7668static int bnxt_alloc_fw_health(struct bnxt *bp)
7669{
7670 int rc;
7671
7672 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7673 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7674 return 0;
7675
7676 rc = __bnxt_alloc_fw_health(bp);
7677 if (rc) {
7678 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7679 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7680 return rc;
7681 }
7682
7683 return 0;
7684}
7685
ba02629f
EP
7686static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7687{
7688 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7689 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7690 BNXT_FW_HEALTH_WIN_MAP_OFF);
7691}
7692
80a9641f
PC
7693bool bnxt_is_fw_healthy(struct bnxt *bp)
7694{
7695 if (bp->fw_health && bp->fw_health->status_reliable) {
7696 u32 fw_status;
7697
7698 fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
7699 if (fw_status && !BNXT_FW_IS_HEALTHY(fw_status))
7700 return false;
7701 }
7702
7703 return true;
7704}
7705
43a440c4
MC
7706static void bnxt_inv_fw_health_reg(struct bnxt *bp)
7707{
7708 struct bnxt_fw_health *fw_health = bp->fw_health;
7709 u32 reg_type;
7710
7711 if (!fw_health || !fw_health->status_reliable)
7712 return;
7713
7714 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
7715 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7716 fw_health->status_reliable = false;
7717}
7718
ba02629f
EP
7719static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7720{
7721 void __iomem *hs;
7722 u32 status_loc;
7723 u32 reg_type;
7724 u32 sig;
7725
43a440c4
MC
7726 if (bp->fw_health)
7727 bp->fw_health->status_reliable = false;
7728
ba02629f
EP
7729 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7730 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7731
7732 sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
7733 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
d1cbd165
MC
7734 if (!bp->chip_num) {
7735 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
7736 bp->chip_num = readl(bp->bar0 +
7737 BNXT_FW_HEALTH_WIN_BASE +
7738 BNXT_GRC_REG_CHIP_NUM);
7739 }
43a440c4 7740 if (!BNXT_CHIP_P5(bp))
d1cbd165 7741 return;
43a440c4 7742
d1cbd165
MC
7743 status_loc = BNXT_GRC_REG_STATUS_P5 |
7744 BNXT_FW_HEALTH_REG_TYPE_BAR0;
7745 } else {
7746 status_loc = readl(hs + offsetof(struct hcomm_status,
7747 fw_status_loc));
ba02629f
EP
7748 }
7749
7750 if (__bnxt_alloc_fw_health(bp)) {
7751 netdev_warn(bp->dev, "no memory for firmware status checks\n");
7752 return;
7753 }
7754
ba02629f
EP
7755 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7756 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
7757 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
7758 __bnxt_map_fw_health_reg(bp, status_loc);
7759 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7760 BNXT_FW_HEALTH_WIN_OFF(status_loc);
7761 }
7762
7763 bp->fw_health->status_reliable = true;
7764}
7765
9ffbd677
MC
7766static int bnxt_map_fw_health_regs(struct bnxt *bp)
7767{
7768 struct bnxt_fw_health *fw_health = bp->fw_health;
7769 u32 reg_base = 0xffffffff;
7770 int i;
7771
43a440c4 7772 bp->fw_health->status_reliable = false;
9ffbd677
MC
7773 /* Only pre-map the monitoring GRC registers using window 3 */
7774 for (i = 0; i < 4; i++) {
7775 u32 reg = fw_health->regs[i];
7776
7777 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7778 continue;
7779 if (reg_base == 0xffffffff)
7780 reg_base = reg & BNXT_GRC_BASE_MASK;
7781 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7782 return -ERANGE;
ba02629f 7783 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
9ffbd677 7784 }
43a440c4 7785 bp->fw_health->status_reliable = true;
9ffbd677
MC
7786 if (reg_base == 0xffffffff)
7787 return 0;
7788
ba02629f 7789 __bnxt_map_fw_health_reg(bp, reg_base);
9ffbd677
MC
7790 return 0;
7791}
7792
07f83d72
MC
7793static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7794{
7795 struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7796 struct bnxt_fw_health *fw_health = bp->fw_health;
7797 struct hwrm_error_recovery_qcfg_input req = {0};
7798 int rc, i;
7799
7800 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7801 return 0;
7802
7803 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
7804 mutex_lock(&bp->hwrm_cmd_lock);
7805 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7806 if (rc)
7807 goto err_recovery_out;
07f83d72
MC
7808 fw_health->flags = le32_to_cpu(resp->flags);
7809 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7810 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7811 rc = -EINVAL;
7812 goto err_recovery_out;
7813 }
7814 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7815 fw_health->master_func_wait_dsecs =
7816 le32_to_cpu(resp->master_func_wait_period);
7817 fw_health->normal_func_wait_dsecs =
7818 le32_to_cpu(resp->normal_func_wait_period);
7819 fw_health->post_reset_wait_dsecs =
7820 le32_to_cpu(resp->master_func_wait_period_after_reset);
7821 fw_health->post_reset_max_wait_dsecs =
7822 le32_to_cpu(resp->max_bailout_time_after_reset);
7823 fw_health->regs[BNXT_FW_HEALTH_REG] =
7824 le32_to_cpu(resp->fw_health_status_reg);
7825 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7826 le32_to_cpu(resp->fw_heartbeat_reg);
7827 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7828 le32_to_cpu(resp->fw_reset_cnt_reg);
7829 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7830 le32_to_cpu(resp->reset_inprogress_reg);
7831 fw_health->fw_reset_inprog_reg_mask =
7832 le32_to_cpu(resp->reset_inprogress_reg_mask);
7833 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7834 if (fw_health->fw_reset_seq_cnt >= 16) {
7835 rc = -EINVAL;
7836 goto err_recovery_out;
7837 }
7838 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7839 fw_health->fw_reset_seq_regs[i] =
7840 le32_to_cpu(resp->reset_reg[i]);
7841 fw_health->fw_reset_seq_vals[i] =
7842 le32_to_cpu(resp->reset_reg_val[i]);
7843 fw_health->fw_reset_seq_delay_msec[i] =
7844 resp->delay_after_reset[i];
7845 }
7846err_recovery_out:
7847 mutex_unlock(&bp->hwrm_cmd_lock);
9ffbd677
MC
7848 if (!rc)
7849 rc = bnxt_map_fw_health_regs(bp);
07f83d72
MC
7850 if (rc)
7851 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7852 return rc;
7853}
7854
c0c050c5
MC
7855static int bnxt_hwrm_func_reset(struct bnxt *bp)
7856{
7857 struct hwrm_func_reset_input req = {0};
7858
7859 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
7860 req.enables = 0;
7861
7862 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
7863}
7864
4933f675
VV
7865static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
7866{
7867 struct hwrm_nvm_get_dev_info_output nvm_info;
7868
7869 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
7870 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
7871 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
7872 nvm_info.nvm_cfg_ver_upd);
7873}
7874
c0c050c5
MC
7875static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7876{
7877 int rc = 0;
7878 struct hwrm_queue_qportcfg_input req = {0};
7879 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
aabfc016
MC
7880 u8 i, j, *qptr;
7881 bool no_rdma;
c0c050c5
MC
7882
7883 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
7884
7885 mutex_lock(&bp->hwrm_cmd_lock);
7886 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7887 if (rc)
7888 goto qportcfg_exit;
7889
7890 if (!resp->max_configurable_queues) {
7891 rc = -EINVAL;
7892 goto qportcfg_exit;
7893 }
7894 bp->max_tc = resp->max_configurable_queues;
87c374de 7895 bp->max_lltc = resp->max_configurable_lossless_queues;
c0c050c5
MC
7896 if (bp->max_tc > BNXT_MAX_QUEUE)
7897 bp->max_tc = BNXT_MAX_QUEUE;
7898
aabfc016
MC
7899 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7900 qptr = &resp->queue_id0;
7901 for (i = 0, j = 0; i < bp->max_tc; i++) {
98f04cf0
MC
7902 bp->q_info[j].queue_id = *qptr;
7903 bp->q_ids[i] = *qptr++;
aabfc016
MC
7904 bp->q_info[j].queue_profile = *qptr++;
7905 bp->tc_to_qidx[j] = j;
7906 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7907 (no_rdma && BNXT_PF(bp)))
7908 j++;
7909 }
98f04cf0 7910 bp->max_q = bp->max_tc;
aabfc016
MC
7911 bp->max_tc = max_t(u8, j, 1);
7912
441cabbb
MC
7913 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7914 bp->max_tc = 1;
7915
87c374de
MC
7916 if (bp->max_lltc > bp->max_tc)
7917 bp->max_lltc = bp->max_tc;
7918
c0c050c5
MC
7919qportcfg_exit:
7920 mutex_unlock(&bp->hwrm_cmd_lock);
7921 return rc;
7922}
7923
ba642ab7 7924static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
c0c050c5 7925{
c0c050c5 7926 struct hwrm_ver_get_input req = {0};
ba642ab7 7927 int rc;
c0c050c5
MC
7928
7929 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
7930 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
7931 req.hwrm_intf_min = HWRM_VERSION_MINOR;
7932 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
ba642ab7
MC
7933
7934 rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT,
7935 silent);
7936 return rc;
7937}
7938
7939static int bnxt_hwrm_ver_get(struct bnxt *bp)
7940{
7941 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
d0ad2ea2 7942 u16 fw_maj, fw_min, fw_bld, fw_rsv;
b7a444f0 7943 u32 dev_caps_cfg, hwrm_ver;
d0ad2ea2 7944 int rc, len;
ba642ab7
MC
7945
7946 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
c0c050c5 7947 mutex_lock(&bp->hwrm_cmd_lock);
ba642ab7 7948 rc = __bnxt_hwrm_ver_get(bp, false);
c0c050c5
MC
7949 if (rc)
7950 goto hwrm_ver_get_exit;
7951
7952 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7953
894aa69a
MC
7954 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7955 resp->hwrm_intf_min_8b << 8 |
7956 resp->hwrm_intf_upd_8b;
7957 if (resp->hwrm_intf_maj_8b < 1) {
c193554e 7958 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
894aa69a
MC
7959 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7960 resp->hwrm_intf_upd_8b);
c193554e 7961 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
c0c050c5 7962 }
b7a444f0
VV
7963
7964 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
7965 HWRM_VERSION_UPDATE;
7966
7967 if (bp->hwrm_spec_code > hwrm_ver)
7968 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7969 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
7970 HWRM_VERSION_UPDATE);
7971 else
7972 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7973 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7974 resp->hwrm_intf_upd_8b);
7975
d0ad2ea2
MC
7976 fw_maj = le16_to_cpu(resp->hwrm_fw_major);
7977 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
7978 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
7979 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
7980 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
7981 len = FW_VER_STR_LEN;
7982 } else {
7983 fw_maj = resp->hwrm_fw_maj_8b;
7984 fw_min = resp->hwrm_fw_min_8b;
7985 fw_bld = resp->hwrm_fw_bld_8b;
7986 fw_rsv = resp->hwrm_fw_rsvd_8b;
7987 len = BC_HWRM_STR_LEN;
7988 }
7989 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
7990 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
7991 fw_rsv);
c0c050c5 7992
691aa620
VV
7993 if (strlen(resp->active_pkg_name)) {
7994 int fw_ver_len = strlen(bp->fw_ver_str);
7995
7996 snprintf(bp->fw_ver_str + fw_ver_len,
7997 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
7998 resp->active_pkg_name);
7999 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
8000 }
8001
ff4fe81d
MC
8002 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
8003 if (!bp->hwrm_cmd_timeout)
8004 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
8005
1dfddc41 8006 if (resp->hwrm_intf_maj_8b >= 1) {
e6ef2699 8007 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
1dfddc41
MC
8008 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
8009 }
8010 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
8011 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
e6ef2699 8012
659c805c 8013 bp->chip_num = le16_to_cpu(resp->chip_num);
5313845f 8014 bp->chip_rev = resp->chip_rev;
3e8060fa
PS
8015 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
8016 !resp->chip_metal)
8017 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
659c805c 8018
e605db80
DK
8019 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
8020 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
8021 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
97381a18 8022 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
e605db80 8023
760b6d33
VD
8024 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
8025 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
8026
abd43a13
VD
8027 if (dev_caps_cfg &
8028 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
8029 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
8030
2a516444
MC
8031 if (dev_caps_cfg &
8032 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
8033 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
8034
e969ae5b
MC
8035 if (dev_caps_cfg &
8036 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
8037 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
8038
c0c050c5
MC
8039hwrm_ver_get_exit:
8040 mutex_unlock(&bp->hwrm_cmd_lock);
8041 return rc;
8042}
8043
5ac67d8b
RS
8044int bnxt_hwrm_fw_set_time(struct bnxt *bp)
8045{
8046 struct hwrm_fw_set_time_input req = {0};
7dfaa7bc
AB
8047 struct tm tm;
8048 time64_t now = ktime_get_real_seconds();
5ac67d8b 8049
ca2c39e2
MC
8050 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
8051 bp->hwrm_spec_code < 0x10400)
5ac67d8b
RS
8052 return -EOPNOTSUPP;
8053
7dfaa7bc 8054 time64_to_tm(now, 0, &tm);
5ac67d8b
RS
8055 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
8056 req.year = cpu_to_le16(1900 + tm.tm_year);
8057 req.month = 1 + tm.tm_mon;
8058 req.day = tm.tm_mday;
8059 req.hour = tm.tm_hour;
8060 req.minute = tm.tm_min;
8061 req.second = tm.tm_sec;
8062 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8063}
8064
fea6b333
MC
8065static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
8066{
8067 u64 sw_tmp;
8068
fa97f303 8069 hw &= mask;
fea6b333
MC
8070 sw_tmp = (*sw & ~mask) | hw;
8071 if (hw < (*sw & mask))
8072 sw_tmp += mask + 1;
8073 WRITE_ONCE(*sw, sw_tmp);
8074}
8075
8076static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
8077 int count, bool ignore_zero)
8078{
8079 int i;
8080
8081 for (i = 0; i < count; i++) {
8082 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
8083
8084 if (ignore_zero && !hw)
8085 continue;
8086
8087 if (masks[i] == -1ULL)
8088 sw_stats[i] = hw;
8089 else
8090 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
8091 }
8092}
8093
8094static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
8095{
8096 if (!stats->hw_stats)
8097 return;
8098
8099 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8100 stats->hw_masks, stats->len / 8, false);
8101}
8102
8103static void bnxt_accumulate_all_stats(struct bnxt *bp)
8104{
8105 struct bnxt_stats_mem *ring0_stats;
8106 bool ignore_zero = false;
8107 int i;
8108
8109 /* Chip bug. Counter intermittently becomes 0. */
8110 if (bp->flags & BNXT_FLAG_CHIP_P5)
8111 ignore_zero = true;
8112
8113 for (i = 0; i < bp->cp_nr_rings; i++) {
8114 struct bnxt_napi *bnapi = bp->bnapi[i];
8115 struct bnxt_cp_ring_info *cpr;
8116 struct bnxt_stats_mem *stats;
8117
8118 cpr = &bnapi->cp_ring;
8119 stats = &cpr->stats;
8120 if (!i)
8121 ring0_stats = stats;
8122 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8123 ring0_stats->hw_masks,
8124 ring0_stats->len / 8, ignore_zero);
8125 }
8126 if (bp->flags & BNXT_FLAG_PORT_STATS) {
8127 struct bnxt_stats_mem *stats = &bp->port_stats;
8128 __le64 *hw_stats = stats->hw_stats;
8129 u64 *sw_stats = stats->sw_stats;
8130 u64 *masks = stats->hw_masks;
8131 int cnt;
8132
8133 cnt = sizeof(struct rx_port_stats) / 8;
8134 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8135
8136 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8137 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8138 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8139 cnt = sizeof(struct tx_port_stats) / 8;
8140 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8141 }
8142 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
8143 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
8144 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
8145 }
8146}
8147
531d1d26 8148static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
3bdf56c4 8149{
3bdf56c4
MC
8150 struct bnxt_pf_info *pf = &bp->pf;
8151 struct hwrm_port_qstats_input req = {0};
8152
8153 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
8154 return 0;
8155
531d1d26
MC
8156 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8157 return -EOPNOTSUPP;
8158
8159 req.flags = flags;
3bdf56c4
MC
8160 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
8161 req.port_id = cpu_to_le16(pf->port_id);
177a6cde
MC
8162 req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
8163 BNXT_TX_PORT_STATS_BYTE_OFFSET);
8164 req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
9f90445c 8165 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3bdf56c4
MC
8166}
8167
531d1d26 8168static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
00db3cba 8169{
36e53349 8170 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
e37fed79 8171 struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
00db3cba
VV
8172 struct hwrm_port_qstats_ext_input req = {0};
8173 struct bnxt_pf_info *pf = &bp->pf;
ad361adf 8174 u32 tx_stat_size;
36e53349 8175 int rc;
00db3cba
VV
8176
8177 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
8178 return 0;
8179
531d1d26
MC
8180 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8181 return -EOPNOTSUPP;
8182
00db3cba 8183 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
531d1d26 8184 req.flags = flags;
00db3cba
VV
8185 req.port_id = cpu_to_le16(pf->port_id);
8186 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
177a6cde
MC
8187 req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
8188 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
8189 sizeof(struct tx_port_stats_ext) : 0;
ad361adf 8190 req.tx_stat_size = cpu_to_le16(tx_stat_size);
177a6cde 8191 req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
36e53349
MC
8192 mutex_lock(&bp->hwrm_cmd_lock);
8193 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8194 if (!rc) {
8195 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
ad361adf
MC
8196 bp->fw_tx_stats_ext_size = tx_stat_size ?
8197 le16_to_cpu(resp->tx_stat_size) / 8 : 0;
36e53349
MC
8198 } else {
8199 bp->fw_rx_stats_ext_size = 0;
8200 bp->fw_tx_stats_ext_size = 0;
8201 }
531d1d26
MC
8202 if (flags)
8203 goto qstats_done;
8204
e37fed79
MC
8205 if (bp->fw_tx_stats_ext_size <=
8206 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
8207 mutex_unlock(&bp->hwrm_cmd_lock);
8208 bp->pri2cos_valid = 0;
8209 return rc;
8210 }
8211
8212 bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
8213 req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
8214
8215 rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
8216 if (!rc) {
8217 struct hwrm_queue_pri2cos_qcfg_output *resp2;
8218 u8 *pri2cos;
8219 int i, j;
8220
8221 resp2 = bp->hwrm_cmd_resp_addr;
8222 pri2cos = &resp2->pri0_cos_queue_id;
8223 for (i = 0; i < 8; i++) {
8224 u8 queue_id = pri2cos[i];
a24ec322 8225 u8 queue_idx;
e37fed79 8226
a24ec322
MC
8227 /* Per port queue IDs start from 0, 10, 20, etc */
8228 queue_idx = queue_id % 10;
8229 if (queue_idx > BNXT_MAX_QUEUE) {
8230 bp->pri2cos_valid = false;
8231 goto qstats_done;
8232 }
e37fed79
MC
8233 for (j = 0; j < bp->max_q; j++) {
8234 if (bp->q_ids[j] == queue_id)
a24ec322 8235 bp->pri2cos_idx[i] = queue_idx;
e37fed79
MC
8236 }
8237 }
8238 bp->pri2cos_valid = 1;
8239 }
a24ec322 8240qstats_done:
36e53349
MC
8241 mutex_unlock(&bp->hwrm_cmd_lock);
8242 return rc;
00db3cba
VV
8243}
8244
c0c050c5
MC
8245static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8246{
442a35a5 8247 if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID)
c0c050c5
MC
8248 bnxt_hwrm_tunnel_dst_port_free(
8249 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
442a35a5 8250 if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID)
c0c050c5
MC
8251 bnxt_hwrm_tunnel_dst_port_free(
8252 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
c0c050c5
MC
8253}
8254
8255static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8256{
8257 int rc, i;
8258 u32 tpa_flags = 0;
8259
8260 if (set_tpa)
8261 tpa_flags = bp->flags & BNXT_FLAG_TPA;
b340dc68 8262 else if (BNXT_NO_FW_ACCESS(bp))
b4fff207 8263 return 0;
c0c050c5
MC
8264 for (i = 0; i < bp->nr_vnics; i++) {
8265 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8266 if (rc) {
8267 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
23e12c89 8268 i, rc);
c0c050c5
MC
8269 return rc;
8270 }
8271 }
8272 return 0;
8273}
8274
8275static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8276{
8277 int i;
8278
8279 for (i = 0; i < bp->nr_vnics; i++)
8280 bnxt_hwrm_vnic_set_rss(bp, i, false);
8281}
8282
a46ecb11 8283static void bnxt_clear_vnic(struct bnxt *bp)
c0c050c5 8284{
a46ecb11
MC
8285 if (!bp->vnic_info)
8286 return;
8287
8288 bnxt_hwrm_clear_vnic_filter(bp);
8289 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
c0c050c5
MC
8290 /* clear all RSS setting before free vnic ctx */
8291 bnxt_hwrm_clear_vnic_rss(bp);
8292 bnxt_hwrm_vnic_ctx_free(bp);
c0c050c5 8293 }
a46ecb11
MC
8294 /* before free the vnic, undo the vnic tpa settings */
8295 if (bp->flags & BNXT_FLAG_TPA)
8296 bnxt_set_tpa(bp, false);
8297 bnxt_hwrm_vnic_free(bp);
8298 if (bp->flags & BNXT_FLAG_CHIP_P5)
8299 bnxt_hwrm_vnic_ctx_free(bp);
8300}
8301
8302static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8303 bool irq_re_init)
8304{
8305 bnxt_clear_vnic(bp);
c0c050c5
MC
8306 bnxt_hwrm_ring_free(bp, close_path);
8307 bnxt_hwrm_ring_grp_free(bp);
8308 if (irq_re_init) {
8309 bnxt_hwrm_stat_ctx_free(bp);
8310 bnxt_hwrm_free_tunnel_ports(bp);
8311 }
8312}
8313
39d8ba2e
MC
8314static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8315{
8316 struct hwrm_func_cfg_input req = {0};
39d8ba2e
MC
8317
8318 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
8319 req.fid = cpu_to_le16(0xffff);
8320 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
8321 if (br_mode == BRIDGE_MODE_VEB)
8322 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
8323 else if (br_mode == BRIDGE_MODE_VEPA)
8324 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
8325 else
8326 return -EINVAL;
9f90445c 8327 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
39d8ba2e
MC
8328}
8329
c3480a60
MC
8330static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8331{
8332 struct hwrm_func_cfg_input req = {0};
c3480a60
MC
8333
8334 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8335 return 0;
8336
8337 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
8338 req.fid = cpu_to_le16(0xffff);
8339 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
d4f52de0 8340 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
c3480a60 8341 if (size == 128)
d4f52de0 8342 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
c3480a60 8343
9f90445c 8344 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
c3480a60
MC
8345}
8346
7b3af4f7 8347static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
c0c050c5 8348{
ae10ae74 8349 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
c0c050c5
MC
8350 int rc;
8351
ae10ae74
MC
8352 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8353 goto skip_rss_ctx;
8354
c0c050c5 8355 /* allocate context for vnic */
94ce9caa 8356 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
c0c050c5
MC
8357 if (rc) {
8358 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8359 vnic_id, rc);
8360 goto vnic_setup_err;
8361 }
8362 bp->rsscos_nr_ctxs++;
8363
94ce9caa
PS
8364 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8365 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8366 if (rc) {
8367 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8368 vnic_id, rc);
8369 goto vnic_setup_err;
8370 }
8371 bp->rsscos_nr_ctxs++;
8372 }
8373
ae10ae74 8374skip_rss_ctx:
c0c050c5
MC
8375 /* configure default vnic, ring grp */
8376 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8377 if (rc) {
8378 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8379 vnic_id, rc);
8380 goto vnic_setup_err;
8381 }
8382
8383 /* Enable RSS hashing on vnic */
8384 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8385 if (rc) {
8386 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8387 vnic_id, rc);
8388 goto vnic_setup_err;
8389 }
8390
8391 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8392 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8393 if (rc) {
8394 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8395 vnic_id, rc);
8396 }
8397 }
8398
8399vnic_setup_err:
8400 return rc;
8401}
8402
7b3af4f7
MC
8403static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8404{
8405 int rc, i, nr_ctxs;
8406
f9f6a3fb 8407 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
7b3af4f7
MC
8408 for (i = 0; i < nr_ctxs; i++) {
8409 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8410 if (rc) {
8411 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8412 vnic_id, i, rc);
8413 break;
8414 }
8415 bp->rsscos_nr_ctxs++;
8416 }
8417 if (i < nr_ctxs)
8418 return -ENOMEM;
8419
8420 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8421 if (rc) {
8422 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8423 vnic_id, rc);
8424 return rc;
8425 }
8426 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8427 if (rc) {
8428 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8429 vnic_id, rc);
8430 return rc;
8431 }
8432 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8433 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8434 if (rc) {
8435 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8436 vnic_id, rc);
8437 }
8438 }
8439 return rc;
8440}
8441
8442static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8443{
8444 if (bp->flags & BNXT_FLAG_CHIP_P5)
8445 return __bnxt_setup_vnic_p5(bp, vnic_id);
8446 else
8447 return __bnxt_setup_vnic(bp, vnic_id);
8448}
8449
c0c050c5
MC
8450static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8451{
8452#ifdef CONFIG_RFS_ACCEL
8453 int i, rc = 0;
8454
9b3d15e6
MC
8455 if (bp->flags & BNXT_FLAG_CHIP_P5)
8456 return 0;
8457
c0c050c5 8458 for (i = 0; i < bp->rx_nr_rings; i++) {
ae10ae74 8459 struct bnxt_vnic_info *vnic;
c0c050c5
MC
8460 u16 vnic_id = i + 1;
8461 u16 ring_id = i;
8462
8463 if (vnic_id >= bp->nr_vnics)
8464 break;
8465
ae10ae74
MC
8466 vnic = &bp->vnic_info[vnic_id];
8467 vnic->flags |= BNXT_VNIC_RFS_FLAG;
8468 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8469 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
b81a90d3 8470 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
c0c050c5
MC
8471 if (rc) {
8472 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8473 vnic_id, rc);
8474 break;
8475 }
8476 rc = bnxt_setup_vnic(bp, vnic_id);
8477 if (rc)
8478 break;
8479 }
8480 return rc;
8481#else
8482 return 0;
8483#endif
8484}
8485
dd85fc0a 8486/* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
17c71ac3
MC
8487static bool bnxt_promisc_ok(struct bnxt *bp)
8488{
8489#ifdef CONFIG_BNXT_SRIOV
dd85fc0a 8490 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
17c71ac3
MC
8491 return false;
8492#endif
8493 return true;
8494}
8495
dc52c6c7
PS
8496static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8497{
8498 unsigned int rc = 0;
8499
8500 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8501 if (rc) {
8502 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8503 rc);
8504 return rc;
8505 }
8506
8507 rc = bnxt_hwrm_vnic_cfg(bp, 1);
8508 if (rc) {
8509 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8510 rc);
8511 return rc;
8512 }
8513 return rc;
8514}
8515
b664f008 8516static int bnxt_cfg_rx_mode(struct bnxt *);
7d2837dd 8517static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
b664f008 8518
c0c050c5
MC
8519static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8520{
7d2837dd 8521 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
c0c050c5 8522 int rc = 0;
76595193 8523 unsigned int rx_nr_rings = bp->rx_nr_rings;
c0c050c5
MC
8524
8525 if (irq_re_init) {
8526 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8527 if (rc) {
8528 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8529 rc);
8530 goto err_out;
8531 }
8532 }
8533
8534 rc = bnxt_hwrm_ring_alloc(bp);
8535 if (rc) {
8536 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8537 goto err_out;
8538 }
8539
8540 rc = bnxt_hwrm_ring_grp_alloc(bp);
8541 if (rc) {
8542 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8543 goto err_out;
8544 }
8545
76595193
PS
8546 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8547 rx_nr_rings--;
8548
c0c050c5 8549 /* default vnic 0 */
76595193 8550 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
c0c050c5
MC
8551 if (rc) {
8552 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8553 goto err_out;
8554 }
8555
8556 rc = bnxt_setup_vnic(bp, 0);
8557 if (rc)
8558 goto err_out;
8559
8560 if (bp->flags & BNXT_FLAG_RFS) {
8561 rc = bnxt_alloc_rfs_vnics(bp);
8562 if (rc)
8563 goto err_out;
8564 }
8565
8566 if (bp->flags & BNXT_FLAG_TPA) {
8567 rc = bnxt_set_tpa(bp, true);
8568 if (rc)
8569 goto err_out;
8570 }
8571
8572 if (BNXT_VF(bp))
8573 bnxt_update_vf_mac(bp);
8574
8575 /* Filter for default vnic 0 */
8576 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8577 if (rc) {
8578 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8579 goto err_out;
8580 }
7d2837dd 8581 vnic->uc_filter_count = 1;
c0c050c5 8582
30e33848
MC
8583 vnic->rx_mask = 0;
8584 if (bp->dev->flags & IFF_BROADCAST)
8585 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
c0c050c5 8586
dd85fc0a 8587 if (bp->dev->flags & IFF_PROMISC)
7d2837dd
MC
8588 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8589
8590 if (bp->dev->flags & IFF_ALLMULTI) {
8591 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8592 vnic->mc_list_count = 0;
8593 } else {
8594 u32 mask = 0;
8595
8596 bnxt_mc_list_updated(bp, &mask);
8597 vnic->rx_mask |= mask;
8598 }
c0c050c5 8599
b664f008
MC
8600 rc = bnxt_cfg_rx_mode(bp);
8601 if (rc)
c0c050c5 8602 goto err_out;
c0c050c5
MC
8603
8604 rc = bnxt_hwrm_set_coal(bp);
8605 if (rc)
8606 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
dc52c6c7
PS
8607 rc);
8608
8609 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8610 rc = bnxt_setup_nitroa0_vnic(bp);
8611 if (rc)
8612 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8613 rc);
8614 }
c0c050c5 8615
cf6645f8
MC
8616 if (BNXT_VF(bp)) {
8617 bnxt_hwrm_func_qcfg(bp);
8618 netdev_update_features(bp->dev);
8619 }
8620
c0c050c5
MC
8621 return 0;
8622
8623err_out:
8624 bnxt_hwrm_resource_free(bp, 0, true);
8625
8626 return rc;
8627}
8628
8629static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8630{
8631 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8632 return 0;
8633}
8634
8635static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8636{
2247925f 8637 bnxt_init_cp_rings(bp);
c0c050c5
MC
8638 bnxt_init_rx_rings(bp);
8639 bnxt_init_tx_rings(bp);
8640 bnxt_init_ring_grps(bp, irq_re_init);
8641 bnxt_init_vnics(bp);
8642
8643 return bnxt_init_chip(bp, irq_re_init);
8644}
8645
c0c050c5
MC
8646static int bnxt_set_real_num_queues(struct bnxt *bp)
8647{
8648 int rc;
8649 struct net_device *dev = bp->dev;
8650
5f449249
MC
8651 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8652 bp->tx_nr_rings_xdp);
c0c050c5
MC
8653 if (rc)
8654 return rc;
8655
8656 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8657 if (rc)
8658 return rc;
8659
8660#ifdef CONFIG_RFS_ACCEL
45019a18 8661 if (bp->flags & BNXT_FLAG_RFS)
c0c050c5 8662 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
c0c050c5
MC
8663#endif
8664
8665 return rc;
8666}
8667
6e6c5a57
MC
8668static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8669 bool shared)
8670{
8671 int _rx = *rx, _tx = *tx;
8672
8673 if (shared) {
8674 *rx = min_t(int, _rx, max);
8675 *tx = min_t(int, _tx, max);
8676 } else {
8677 if (max < 2)
8678 return -ENOMEM;
8679
8680 while (_rx + _tx > max) {
8681 if (_rx > _tx && _rx > 1)
8682 _rx--;
8683 else if (_tx > 1)
8684 _tx--;
8685 }
8686 *rx = _rx;
8687 *tx = _tx;
8688 }
8689 return 0;
8690}
8691
7809592d
MC
8692static void bnxt_setup_msix(struct bnxt *bp)
8693{
8694 const int len = sizeof(bp->irq_tbl[0].name);
8695 struct net_device *dev = bp->dev;
8696 int tcs, i;
8697
8698 tcs = netdev_get_num_tc(dev);
18e4960c 8699 if (tcs) {
d1e7925e 8700 int i, off, count;
7809592d 8701
d1e7925e
MC
8702 for (i = 0; i < tcs; i++) {
8703 count = bp->tx_nr_rings_per_tc;
8704 off = i * count;
8705 netdev_set_tc_queue(dev, i, count, off);
7809592d
MC
8706 }
8707 }
8708
8709 for (i = 0; i < bp->cp_nr_rings; i++) {
e5811b8c 8710 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7809592d
MC
8711 char *attr;
8712
8713 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8714 attr = "TxRx";
8715 else if (i < bp->rx_nr_rings)
8716 attr = "rx";
8717 else
8718 attr = "tx";
8719
e5811b8c
MC
8720 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8721 attr, i);
8722 bp->irq_tbl[map_idx].handler = bnxt_msix;
7809592d
MC
8723 }
8724}
8725
8726static void bnxt_setup_inta(struct bnxt *bp)
8727{
8728 const int len = sizeof(bp->irq_tbl[0].name);
8729
8730 if (netdev_get_num_tc(bp->dev))
8731 netdev_reset_tc(bp->dev);
8732
8733 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8734 0);
8735 bp->irq_tbl[0].handler = bnxt_inta;
8736}
8737
20d7d1c5
EP
8738static int bnxt_init_int_mode(struct bnxt *bp);
8739
7809592d
MC
8740static int bnxt_setup_int_mode(struct bnxt *bp)
8741{
8742 int rc;
8743
20d7d1c5
EP
8744 if (!bp->irq_tbl) {
8745 rc = bnxt_init_int_mode(bp);
8746 if (rc || !bp->irq_tbl)
8747 return rc ?: -ENODEV;
8748 }
8749
7809592d
MC
8750 if (bp->flags & BNXT_FLAG_USING_MSIX)
8751 bnxt_setup_msix(bp);
8752 else
8753 bnxt_setup_inta(bp);
8754
8755 rc = bnxt_set_real_num_queues(bp);
8756 return rc;
8757}
8758
b7429954 8759#ifdef CONFIG_RFS_ACCEL
8079e8f1
MC
8760static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
8761{
6a4f2947 8762 return bp->hw_resc.max_rsscos_ctxs;
8079e8f1
MC
8763}
8764
8765static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
8766{
6a4f2947 8767 return bp->hw_resc.max_vnics;
8079e8f1 8768}
b7429954 8769#endif
8079e8f1 8770
e4060d30
MC
8771unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
8772{
6a4f2947 8773 return bp->hw_resc.max_stat_ctxs;
e4060d30
MC
8774}
8775
8776unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
8777{
6a4f2947 8778 return bp->hw_resc.max_cp_rings;
e4060d30
MC
8779}
8780
e916b081 8781static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
a588e458 8782{
c0b8cda0
MC
8783 unsigned int cp = bp->hw_resc.max_cp_rings;
8784
8785 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8786 cp -= bnxt_get_ulp_msix_num(bp);
8787
8788 return cp;
a588e458
MC
8789}
8790
ad95c27b 8791static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
7809592d 8792{
6a4f2947
MC
8793 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8794
f7588cd8
MC
8795 if (bp->flags & BNXT_FLAG_CHIP_P5)
8796 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
8797
6a4f2947 8798 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
7809592d
MC
8799}
8800
30f52947 8801static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
33c2657e 8802{
6a4f2947 8803 bp->hw_resc.max_irqs = max_irqs;
33c2657e
MC
8804}
8805
e916b081
MC
8806unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
8807{
8808 unsigned int cp;
8809
8810 cp = bnxt_get_max_func_cp_rings_for_en(bp);
8811 if (bp->flags & BNXT_FLAG_CHIP_P5)
8812 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8813 else
8814 return cp - bp->cp_nr_rings;
8815}
8816
c027c6b4
VV
8817unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8818{
d77b1ad8 8819 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
c027c6b4
VV
8820}
8821
fbcfc8e4
MC
8822int bnxt_get_avail_msix(struct bnxt *bp, int num)
8823{
8824 int max_cp = bnxt_get_max_func_cp_rings(bp);
8825 int max_irq = bnxt_get_max_func_irqs(bp);
8826 int total_req = bp->cp_nr_rings + num;
8827 int max_idx, avail_msix;
8828
75720e63
MC
8829 max_idx = bp->total_irqs;
8830 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8831 max_idx = min_t(int, bp->total_irqs, max_cp);
fbcfc8e4 8832 avail_msix = max_idx - bp->cp_nr_rings;
f1ca94de 8833 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
fbcfc8e4
MC
8834 return avail_msix;
8835
8836 if (max_irq < total_req) {
8837 num = max_irq - bp->cp_nr_rings;
8838 if (num <= 0)
8839 return 0;
8840 }
8841 return num;
8842}
8843
08654eb2
MC
8844static int bnxt_get_num_msix(struct bnxt *bp)
8845{
f1ca94de 8846 if (!BNXT_NEW_RM(bp))
08654eb2
MC
8847 return bnxt_get_max_func_irqs(bp);
8848
c0b8cda0 8849 return bnxt_nq_rings_in_use(bp);
08654eb2
MC
8850}
8851
7809592d 8852static int bnxt_init_msix(struct bnxt *bp)
c0c050c5 8853{
fbcfc8e4 8854 int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
7809592d 8855 struct msix_entry *msix_ent;
c0c050c5 8856
08654eb2
MC
8857 total_vecs = bnxt_get_num_msix(bp);
8858 max = bnxt_get_max_func_irqs(bp);
8859 if (total_vecs > max)
8860 total_vecs = max;
8861
2773dfb2
MC
8862 if (!total_vecs)
8863 return 0;
8864
c0c050c5
MC
8865 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8866 if (!msix_ent)
8867 return -ENOMEM;
8868
8869 for (i = 0; i < total_vecs; i++) {
8870 msix_ent[i].entry = i;
8871 msix_ent[i].vector = 0;
8872 }
8873
01657bcd
MC
8874 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8875 min = 2;
8876
8877 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
fbcfc8e4
MC
8878 ulp_msix = bnxt_get_ulp_msix_num(bp);
8879 if (total_vecs < 0 || total_vecs < ulp_msix) {
c0c050c5
MC
8880 rc = -ENODEV;
8881 goto msix_setup_exit;
8882 }
8883
8884 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8885 if (bp->irq_tbl) {
7809592d
MC
8886 for (i = 0; i < total_vecs; i++)
8887 bp->irq_tbl[i].vector = msix_ent[i].vector;
c0c050c5 8888
7809592d 8889 bp->total_irqs = total_vecs;
c0c050c5 8890 /* Trim rings based upon num of vectors allocated */
6e6c5a57 8891 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
fbcfc8e4 8892 total_vecs - ulp_msix, min == 1);
6e6c5a57
MC
8893 if (rc)
8894 goto msix_setup_exit;
8895
7809592d
MC
8896 bp->cp_nr_rings = (min == 1) ?
8897 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8898 bp->tx_nr_rings + bp->rx_nr_rings;
c0c050c5 8899
c0c050c5
MC
8900 } else {
8901 rc = -ENOMEM;
8902 goto msix_setup_exit;
8903 }
8904 bp->flags |= BNXT_FLAG_USING_MSIX;
8905 kfree(msix_ent);
8906 return 0;
8907
8908msix_setup_exit:
7809592d
MC
8909 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8910 kfree(bp->irq_tbl);
8911 bp->irq_tbl = NULL;
c0c050c5
MC
8912 pci_disable_msix(bp->pdev);
8913 kfree(msix_ent);
8914 return rc;
8915}
8916
7809592d 8917static int bnxt_init_inta(struct bnxt *bp)
c0c050c5 8918{
33dbcf60 8919 bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
7809592d
MC
8920 if (!bp->irq_tbl)
8921 return -ENOMEM;
8922
8923 bp->total_irqs = 1;
c0c050c5
MC
8924 bp->rx_nr_rings = 1;
8925 bp->tx_nr_rings = 1;
8926 bp->cp_nr_rings = 1;
01657bcd 8927 bp->flags |= BNXT_FLAG_SHARED_RINGS;
c0c050c5 8928 bp->irq_tbl[0].vector = bp->pdev->irq;
7809592d 8929 return 0;
c0c050c5
MC
8930}
8931
7809592d 8932static int bnxt_init_int_mode(struct bnxt *bp)
c0c050c5 8933{
20d7d1c5 8934 int rc = -ENODEV;
c0c050c5
MC
8935
8936 if (bp->flags & BNXT_FLAG_MSIX_CAP)
7809592d 8937 rc = bnxt_init_msix(bp);
c0c050c5 8938
1fa72e29 8939 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
c0c050c5 8940 /* fallback to INTA */
7809592d 8941 rc = bnxt_init_inta(bp);
c0c050c5
MC
8942 }
8943 return rc;
8944}
8945
7809592d
MC
8946static void bnxt_clear_int_mode(struct bnxt *bp)
8947{
8948 if (bp->flags & BNXT_FLAG_USING_MSIX)
8949 pci_disable_msix(bp->pdev);
8950
8951 kfree(bp->irq_tbl);
8952 bp->irq_tbl = NULL;
8953 bp->flags &= ~BNXT_FLAG_USING_MSIX;
8954}
8955
1b3f0b75 8956int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
674f50a5 8957{
674f50a5 8958 int tcs = netdev_get_num_tc(bp->dev);
1b3f0b75 8959 bool irq_cleared = false;
674f50a5
MC
8960 int rc;
8961
8962 if (!bnxt_need_reserve_rings(bp))
8963 return 0;
8964
1b3f0b75
MC
8965 if (irq_re_init && BNXT_NEW_RM(bp) &&
8966 bnxt_get_num_msix(bp) != bp->total_irqs) {
ec86f14e 8967 bnxt_ulp_irq_stop(bp);
674f50a5 8968 bnxt_clear_int_mode(bp);
1b3f0b75 8969 irq_cleared = true;
36d65be9
MC
8970 }
8971 rc = __bnxt_reserve_rings(bp);
1b3f0b75 8972 if (irq_cleared) {
36d65be9
MC
8973 if (!rc)
8974 rc = bnxt_init_int_mode(bp);
ec86f14e 8975 bnxt_ulp_irq_restart(bp, rc);
36d65be9
MC
8976 }
8977 if (rc) {
8978 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
8979 return rc;
674f50a5
MC
8980 }
8981 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
8982 netdev_err(bp->dev, "tx ring reservation failure\n");
8983 netdev_reset_tc(bp->dev);
8984 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8985 return -ENOMEM;
8986 }
674f50a5
MC
8987 return 0;
8988}
8989
c0c050c5
MC
8990static void bnxt_free_irq(struct bnxt *bp)
8991{
8992 struct bnxt_irq *irq;
8993 int i;
8994
8995#ifdef CONFIG_RFS_ACCEL
8996 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
8997 bp->dev->rx_cpu_rmap = NULL;
8998#endif
cb98526b 8999 if (!bp->irq_tbl || !bp->bnapi)
c0c050c5
MC
9000 return;
9001
9002 for (i = 0; i < bp->cp_nr_rings; i++) {
e5811b8c
MC
9003 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9004
9005 irq = &bp->irq_tbl[map_idx];
56f0fd80
VV
9006 if (irq->requested) {
9007 if (irq->have_cpumask) {
9008 irq_set_affinity_hint(irq->vector, NULL);
9009 free_cpumask_var(irq->cpu_mask);
9010 irq->have_cpumask = 0;
9011 }
c0c050c5 9012 free_irq(irq->vector, bp->bnapi[i]);
56f0fd80
VV
9013 }
9014
c0c050c5
MC
9015 irq->requested = 0;
9016 }
c0c050c5
MC
9017}
9018
9019static int bnxt_request_irq(struct bnxt *bp)
9020{
b81a90d3 9021 int i, j, rc = 0;
c0c050c5
MC
9022 unsigned long flags = 0;
9023#ifdef CONFIG_RFS_ACCEL
e5811b8c 9024 struct cpu_rmap *rmap;
c0c050c5
MC
9025#endif
9026
e5811b8c
MC
9027 rc = bnxt_setup_int_mode(bp);
9028 if (rc) {
9029 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
9030 rc);
9031 return rc;
9032 }
9033#ifdef CONFIG_RFS_ACCEL
9034 rmap = bp->dev->rx_cpu_rmap;
9035#endif
c0c050c5
MC
9036 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
9037 flags = IRQF_SHARED;
9038
b81a90d3 9039 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
e5811b8c
MC
9040 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9041 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
9042
c0c050c5 9043#ifdef CONFIG_RFS_ACCEL
b81a90d3 9044 if (rmap && bp->bnapi[i]->rx_ring) {
c0c050c5
MC
9045 rc = irq_cpu_rmap_add(rmap, irq->vector);
9046 if (rc)
9047 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
b81a90d3
MC
9048 j);
9049 j++;
c0c050c5
MC
9050 }
9051#endif
9052 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
9053 bp->bnapi[i]);
9054 if (rc)
9055 break;
9056
9057 irq->requested = 1;
56f0fd80
VV
9058
9059 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
9060 int numa_node = dev_to_node(&bp->pdev->dev);
9061
9062 irq->have_cpumask = 1;
9063 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
9064 irq->cpu_mask);
9065 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
9066 if (rc) {
9067 netdev_warn(bp->dev,
9068 "Set affinity failed, IRQ = %d\n",
9069 irq->vector);
9070 break;
9071 }
9072 }
c0c050c5
MC
9073 }
9074 return rc;
9075}
9076
9077static void bnxt_del_napi(struct bnxt *bp)
9078{
9079 int i;
9080
9081 if (!bp->bnapi)
9082 return;
9083
9084 for (i = 0; i < bp->cp_nr_rings; i++) {
9085 struct bnxt_napi *bnapi = bp->bnapi[i];
9086
5198d545 9087 __netif_napi_del(&bnapi->napi);
c0c050c5 9088 }
5198d545 9089 /* We called __netif_napi_del(), we need
e5f6f564
ED
9090 * to respect an RCU grace period before freeing napi structures.
9091 */
9092 synchronize_net();
c0c050c5
MC
9093}
9094
9095static void bnxt_init_napi(struct bnxt *bp)
9096{
9097 int i;
10bbdaf5 9098 unsigned int cp_nr_rings = bp->cp_nr_rings;
c0c050c5
MC
9099 struct bnxt_napi *bnapi;
9100
9101 if (bp->flags & BNXT_FLAG_USING_MSIX) {
0fcec985
MC
9102 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
9103
9104 if (bp->flags & BNXT_FLAG_CHIP_P5)
9105 poll_fn = bnxt_poll_p5;
9106 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
10bbdaf5
PS
9107 cp_nr_rings--;
9108 for (i = 0; i < cp_nr_rings; i++) {
c0c050c5 9109 bnapi = bp->bnapi[i];
0fcec985 9110 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
c0c050c5 9111 }
10bbdaf5
PS
9112 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9113 bnapi = bp->bnapi[cp_nr_rings];
9114 netif_napi_add(bp->dev, &bnapi->napi,
9115 bnxt_poll_nitroa0, 64);
10bbdaf5 9116 }
c0c050c5
MC
9117 } else {
9118 bnapi = bp->bnapi[0];
9119 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
c0c050c5
MC
9120 }
9121}
9122
9123static void bnxt_disable_napi(struct bnxt *bp)
9124{
9125 int i;
9126
e340a5c4
MC
9127 if (!bp->bnapi ||
9128 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
c0c050c5
MC
9129 return;
9130
0bc0b97f
AG
9131 for (i = 0; i < bp->cp_nr_rings; i++) {
9132 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
9133
9134 if (bp->bnapi[i]->rx_ring)
9135 cancel_work_sync(&cpr->dim.work);
9136
c0c050c5 9137 napi_disable(&bp->bnapi[i]->napi);
0bc0b97f 9138 }
c0c050c5
MC
9139}
9140
9141static void bnxt_enable_napi(struct bnxt *bp)
9142{
9143 int i;
9144
e340a5c4 9145 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
c0c050c5 9146 for (i = 0; i < bp->cp_nr_rings; i++) {
8a27d4b9
MC
9147 struct bnxt_napi *bnapi = bp->bnapi[i];
9148 struct bnxt_cp_ring_info *cpr;
9149
9150 cpr = &bnapi->cp_ring;
9151 if (bnapi->in_reset)
9152 cpr->sw_stats.rx.rx_resets++;
9153 bnapi->in_reset = false;
6a8788f2 9154
8a27d4b9 9155 if (bnapi->rx_ring) {
6a8788f2 9156 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
c002bd52 9157 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6a8788f2 9158 }
8a27d4b9 9159 napi_enable(&bnapi->napi);
c0c050c5
MC
9160 }
9161}
9162
7df4ae9f 9163void bnxt_tx_disable(struct bnxt *bp)
c0c050c5
MC
9164{
9165 int i;
c0c050c5 9166 struct bnxt_tx_ring_info *txr;
c0c050c5 9167
b6ab4b01 9168 if (bp->tx_ring) {
c0c050c5 9169 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 9170 txr = &bp->tx_ring[i];
c0c050c5 9171 txr->dev_state = BNXT_DEV_STATE_CLOSING;
c0c050c5
MC
9172 }
9173 }
132e0b65
EP
9174 /* Drop carrier first to prevent TX timeout */
9175 netif_carrier_off(bp->dev);
c0c050c5
MC
9176 /* Stop all TX queues */
9177 netif_tx_disable(bp->dev);
c0c050c5
MC
9178}
9179
7df4ae9f 9180void bnxt_tx_enable(struct bnxt *bp)
c0c050c5
MC
9181{
9182 int i;
c0c050c5 9183 struct bnxt_tx_ring_info *txr;
c0c050c5
MC
9184
9185 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 9186 txr = &bp->tx_ring[i];
c0c050c5
MC
9187 txr->dev_state = 0;
9188 }
9189 netif_tx_wake_all_queues(bp->dev);
9190 if (bp->link_info.link_up)
9191 netif_carrier_on(bp->dev);
9192}
9193
2046e3c3
MC
9194static char *bnxt_report_fec(struct bnxt_link_info *link_info)
9195{
9196 u8 active_fec = link_info->active_fec_sig_mode &
9197 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
9198
9199 switch (active_fec) {
9200 default:
9201 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
9202 return "None";
9203 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
9204 return "Clause 74 BaseR";
9205 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
9206 return "Clause 91 RS(528,514)";
9207 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
9208 return "Clause 91 RS544_1XN";
9209 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
9210 return "Clause 91 RS(544,514)";
9211 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
9212 return "Clause 91 RS272_1XN";
9213 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
9214 return "Clause 91 RS(272,257)";
9215 }
9216}
9217
c0c050c5
MC
9218static void bnxt_report_link(struct bnxt *bp)
9219{
9220 if (bp->link_info.link_up) {
1d2deb61 9221 const char *signal = "";
c0c050c5 9222 const char *flow_ctrl;
1d2deb61 9223 const char *duplex;
38a21b34
DK
9224 u32 speed;
9225 u16 fec;
c0c050c5
MC
9226
9227 netif_carrier_on(bp->dev);
8eddb3e7
MC
9228 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9229 if (speed == SPEED_UNKNOWN) {
9230 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9231 return;
9232 }
c0c050c5
MC
9233 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9234 duplex = "full";
9235 else
9236 duplex = "half";
9237 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9238 flow_ctrl = "ON - receive & transmit";
9239 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9240 flow_ctrl = "ON - transmit";
9241 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9242 flow_ctrl = "ON - receive";
9243 else
9244 flow_ctrl = "none";
1d2deb61
EP
9245 if (bp->link_info.phy_qcfg_resp.option_flags &
9246 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
9247 u8 sig_mode = bp->link_info.active_fec_sig_mode &
9248 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
9249 switch (sig_mode) {
9250 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
9251 signal = "(NRZ) ";
9252 break;
9253 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
9254 signal = "(PAM4) ";
9255 break;
9256 default:
9257 break;
9258 }
9259 }
9260 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
9261 speed, signal, duplex, flow_ctrl);
b0d28207 9262 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
170ce013
MC
9263 netdev_info(bp->dev, "EEE is %s\n",
9264 bp->eee.eee_active ? "active" :
9265 "not active");
e70c752f
MC
9266 fec = bp->link_info.fec_cfg;
9267 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
2046e3c3 9268 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
e70c752f 9269 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
2046e3c3 9270 bnxt_report_fec(&bp->link_info));
c0c050c5
MC
9271 } else {
9272 netif_carrier_off(bp->dev);
9273 netdev_err(bp->dev, "NIC Link is Down\n");
9274 }
9275}
9276
3128e811
MC
9277static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
9278{
9279 if (!resp->supported_speeds_auto_mode &&
9280 !resp->supported_speeds_force_mode &&
9281 !resp->supported_pam4_speeds_auto_mode &&
9282 !resp->supported_pam4_speeds_force_mode)
9283 return true;
9284 return false;
9285}
9286
170ce013
MC
9287static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9288{
9289 int rc = 0;
9290 struct hwrm_port_phy_qcaps_input req = {0};
9291 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
93ed8117 9292 struct bnxt_link_info *link_info = &bp->link_info;
170ce013
MC
9293
9294 if (bp->hwrm_spec_code < 0x10201)
9295 return 0;
9296
9297 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
9298
9299 mutex_lock(&bp->hwrm_cmd_lock);
9300 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9301 if (rc)
9302 goto hwrm_phy_qcaps_exit;
9303
b0d28207 9304 bp->phy_flags = resp->flags;
acb20054 9305 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
170ce013
MC
9306 struct ethtool_eee *eee = &bp->eee;
9307 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
9308
170ce013
MC
9309 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9310 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9311 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
9312 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9313 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
9314 }
fea6b333 9315
3128e811
MC
9316 if (bp->hwrm_spec_code >= 0x10a01) {
9317 if (bnxt_phy_qcaps_no_speed(resp)) {
9318 link_info->phy_state = BNXT_PHY_STATE_DISABLED;
9319 netdev_warn(bp->dev, "Ethernet link disabled\n");
9320 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
9321 link_info->phy_state = BNXT_PHY_STATE_ENABLED;
9322 netdev_info(bp->dev, "Ethernet link enabled\n");
9323 /* Phy re-enabled, reprobe the speeds */
9324 link_info->support_auto_speeds = 0;
9325 link_info->support_pam4_auto_speeds = 0;
9326 }
9327 }
520ad89a
MC
9328 if (resp->supported_speeds_auto_mode)
9329 link_info->support_auto_speeds =
9330 le16_to_cpu(resp->supported_speeds_auto_mode);
d058426e
EP
9331 if (resp->supported_pam4_speeds_auto_mode)
9332 link_info->support_pam4_auto_speeds =
9333 le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
170ce013 9334
d5430d31
MC
9335 bp->port_count = resp->port_cnt;
9336
170ce013
MC
9337hwrm_phy_qcaps_exit:
9338 mutex_unlock(&bp->hwrm_cmd_lock);
9339 return rc;
9340}
9341
c916062a
EP
9342static bool bnxt_support_dropped(u16 advertising, u16 supported)
9343{
9344 u16 diff = advertising ^ supported;
9345
9346 return ((supported | diff) != supported);
9347}
9348
ccd6a9dc 9349int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
c0c050c5
MC
9350{
9351 int rc = 0;
9352 struct bnxt_link_info *link_info = &bp->link_info;
9353 struct hwrm_port_phy_qcfg_input req = {0};
9354 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9355 u8 link_up = link_info->link_up;
d058426e 9356 bool support_changed = false;
c0c050c5
MC
9357
9358 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
9359
9360 mutex_lock(&bp->hwrm_cmd_lock);
9361 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9362 if (rc) {
9363 mutex_unlock(&bp->hwrm_cmd_lock);
9364 return rc;
9365 }
9366
9367 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
9368 link_info->phy_link_status = resp->link;
acb20054
MC
9369 link_info->duplex = resp->duplex_cfg;
9370 if (bp->hwrm_spec_code >= 0x10800)
9371 link_info->duplex = resp->duplex_state;
c0c050c5
MC
9372 link_info->pause = resp->pause;
9373 link_info->auto_mode = resp->auto_mode;
9374 link_info->auto_pause_setting = resp->auto_pause;
3277360e 9375 link_info->lp_pause = resp->link_partner_adv_pause;
c0c050c5 9376 link_info->force_pause_setting = resp->force_pause;
acb20054 9377 link_info->duplex_setting = resp->duplex_cfg;
c0c050c5
MC
9378 if (link_info->phy_link_status == BNXT_LINK_LINK)
9379 link_info->link_speed = le16_to_cpu(resp->link_speed);
9380 else
9381 link_info->link_speed = 0;
9382 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
d058426e
EP
9383 link_info->force_pam4_link_speed =
9384 le16_to_cpu(resp->force_pam4_link_speed);
c0c050c5 9385 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
d058426e 9386 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
c0c050c5 9387 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
d058426e
EP
9388 link_info->auto_pam4_link_speeds =
9389 le16_to_cpu(resp->auto_pam4_link_speed_mask);
3277360e
MC
9390 link_info->lp_auto_link_speeds =
9391 le16_to_cpu(resp->link_partner_adv_speeds);
d058426e
EP
9392 link_info->lp_auto_pam4_link_speeds =
9393 resp->link_partner_pam4_adv_speeds;
c0c050c5
MC
9394 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
9395 link_info->phy_ver[0] = resp->phy_maj;
9396 link_info->phy_ver[1] = resp->phy_min;
9397 link_info->phy_ver[2] = resp->phy_bld;
9398 link_info->media_type = resp->media_type;
03efbec0 9399 link_info->phy_type = resp->phy_type;
11f15ed3 9400 link_info->transceiver = resp->xcvr_pkg_type;
170ce013
MC
9401 link_info->phy_addr = resp->eee_config_phy_addr &
9402 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
42ee18fe 9403 link_info->module_status = resp->module_status;
170ce013 9404
b0d28207 9405 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
170ce013
MC
9406 struct ethtool_eee *eee = &bp->eee;
9407 u16 fw_speeds;
9408
9409 eee->eee_active = 0;
9410 if (resp->eee_config_phy_addr &
9411 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
9412 eee->eee_active = 1;
9413 fw_speeds = le16_to_cpu(
9414 resp->link_partner_adv_eee_link_speed_mask);
9415 eee->lp_advertised =
9416 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9417 }
9418
9419 /* Pull initial EEE config */
9420 if (!chng_link_state) {
9421 if (resp->eee_config_phy_addr &
9422 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
9423 eee->eee_enabled = 1;
c0c050c5 9424
170ce013
MC
9425 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
9426 eee->advertised =
9427 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9428
9429 if (resp->eee_config_phy_addr &
9430 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
9431 __le32 tmr;
9432
9433 eee->tx_lpi_enabled = 1;
9434 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
9435 eee->tx_lpi_timer = le32_to_cpu(tmr) &
9436 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
9437 }
9438 }
9439 }
e70c752f
MC
9440
9441 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
8b277589 9442 if (bp->hwrm_spec_code >= 0x10504) {
e70c752f 9443 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
8b277589
MC
9444 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
9445 }
c0c050c5
MC
9446 /* TODO: need to add more logic to report VF link */
9447 if (chng_link_state) {
9448 if (link_info->phy_link_status == BNXT_LINK_LINK)
9449 link_info->link_up = 1;
9450 else
9451 link_info->link_up = 0;
9452 if (link_up != link_info->link_up)
9453 bnxt_report_link(bp);
9454 } else {
9455 /* alwasy link down if not require to update link state */
9456 link_info->link_up = 0;
9457 }
9458 mutex_unlock(&bp->hwrm_cmd_lock);
286ef9d6 9459
c7e457f4 9460 if (!BNXT_PHY_CFG_ABLE(bp))
dac04907
MC
9461 return 0;
9462
c916062a
EP
9463 /* Check if any advertised speeds are no longer supported. The caller
9464 * holds the link_lock mutex, so we can modify link_info settings.
9465 */
9466 if (bnxt_support_dropped(link_info->advertising,
9467 link_info->support_auto_speeds)) {
286ef9d6 9468 link_info->advertising = link_info->support_auto_speeds;
d058426e 9469 support_changed = true;
286ef9d6 9470 }
d058426e
EP
9471 if (bnxt_support_dropped(link_info->advertising_pam4,
9472 link_info->support_pam4_auto_speeds)) {
9473 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
9474 support_changed = true;
9475 }
9476 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
9477 bnxt_hwrm_set_link_setting(bp, true, false);
c0c050c5
MC
9478 return 0;
9479}
9480
10289bec
MC
9481static void bnxt_get_port_module_status(struct bnxt *bp)
9482{
9483 struct bnxt_link_info *link_info = &bp->link_info;
9484 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
9485 u8 module_status;
9486
9487 if (bnxt_update_link(bp, true))
9488 return;
9489
9490 module_status = link_info->module_status;
9491 switch (module_status) {
9492 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
9493 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
9494 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
9495 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9496 bp->pf.port_id);
9497 if (bp->hwrm_spec_code >= 0x10201) {
9498 netdev_warn(bp->dev, "Module part number %s\n",
9499 resp->phy_vendor_partnumber);
9500 }
9501 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
9502 netdev_warn(bp->dev, "TX is disabled\n");
9503 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
9504 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9505 }
9506}
9507
c0c050c5
MC
9508static void
9509bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9510{
9511 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
c9ee9516
MC
9512 if (bp->hwrm_spec_code >= 0x10201)
9513 req->auto_pause =
9514 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
c0c050c5
MC
9515 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9516 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
9517 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
49b5c7a1 9518 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
c0c050c5
MC
9519 req->enables |=
9520 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9521 } else {
9522 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9523 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
9524 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9525 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9526 req->enables |=
9527 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
c9ee9516
MC
9528 if (bp->hwrm_spec_code >= 0x10201) {
9529 req->auto_pause = req->force_pause;
9530 req->enables |= cpu_to_le32(
9531 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9532 }
c0c050c5
MC
9533 }
9534}
9535
d058426e 9536static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
c0c050c5 9537{
d058426e
EP
9538 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9539 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9540 if (bp->link_info.advertising) {
9541 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9542 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9543 }
9544 if (bp->link_info.advertising_pam4) {
9545 req->enables |=
9546 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
9547 req->auto_link_pam4_speed_mask =
9548 cpu_to_le16(bp->link_info.advertising_pam4);
9549 }
c0c050c5 9550 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
d058426e 9551 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
c0c050c5 9552 } else {
c0c050c5 9553 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
d058426e
EP
9554 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9555 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9556 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
9557 } else {
9558 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9559 }
c0c050c5
MC
9560 }
9561
c0c050c5
MC
9562 /* tell chimp that the setting takes effect immediately */
9563 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9564}
9565
9566int bnxt_hwrm_set_pause(struct bnxt *bp)
9567{
9568 struct hwrm_port_phy_cfg_input req = {0};
9569 int rc;
9570
9571 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9572 bnxt_hwrm_set_pause_common(bp, &req);
9573
9574 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9575 bp->link_info.force_link_chng)
9576 bnxt_hwrm_set_link_common(bp, &req);
9577
9578 mutex_lock(&bp->hwrm_cmd_lock);
9579 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9580 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9581 /* since changing of pause setting doesn't trigger any link
9582 * change event, the driver needs to update the current pause
9583 * result upon successfully return of the phy_cfg command
9584 */
9585 bp->link_info.pause =
9586 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9587 bp->link_info.auto_pause_setting = 0;
9588 if (!bp->link_info.force_link_chng)
9589 bnxt_report_link(bp);
9590 }
9591 bp->link_info.force_link_chng = false;
9592 mutex_unlock(&bp->hwrm_cmd_lock);
9593 return rc;
9594}
9595
939f7f0c
MC
9596static void bnxt_hwrm_set_eee(struct bnxt *bp,
9597 struct hwrm_port_phy_cfg_input *req)
9598{
9599 struct ethtool_eee *eee = &bp->eee;
9600
9601 if (eee->eee_enabled) {
9602 u16 eee_speeds;
9603 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9604
9605 if (eee->tx_lpi_enabled)
9606 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9607 else
9608 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9609
9610 req->flags |= cpu_to_le32(flags);
9611 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9612 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9613 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9614 } else {
9615 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9616 }
9617}
9618
9619int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
c0c050c5
MC
9620{
9621 struct hwrm_port_phy_cfg_input req = {0};
9622
9623 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9624 if (set_pause)
9625 bnxt_hwrm_set_pause_common(bp, &req);
9626
9627 bnxt_hwrm_set_link_common(bp, &req);
939f7f0c
MC
9628
9629 if (set_eee)
9630 bnxt_hwrm_set_eee(bp, &req);
c0c050c5
MC
9631 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9632}
9633
33f7d55f
MC
9634static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9635{
9636 struct hwrm_port_phy_cfg_input req = {0};
9637
567b2abe 9638 if (!BNXT_SINGLE_PF(bp))
33f7d55f
MC
9639 return 0;
9640
d5ca9905
MC
9641 if (pci_num_vf(bp->pdev) &&
9642 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
33f7d55f
MC
9643 return 0;
9644
9645 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
16d663a6 9646 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
33f7d55f
MC
9647 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9648}
9649
ec5d31e3
MC
9650static int bnxt_fw_init_one(struct bnxt *bp);
9651
b187e4ba
EP
9652static int bnxt_fw_reset_via_optee(struct bnxt *bp)
9653{
9654#ifdef CONFIG_TEE_BNXT_FW
9655 int rc = tee_bnxt_fw_load();
9656
9657 if (rc)
9658 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
9659
9660 return rc;
9661#else
9662 netdev_err(bp->dev, "OP-TEE not supported\n");
9663 return -ENODEV;
9664#endif
9665}
9666
9667static int bnxt_try_recover_fw(struct bnxt *bp)
9668{
9669 if (bp->fw_health && bp->fw_health->status_reliable) {
d1cbd165
MC
9670 int retry = 0, rc;
9671 u32 sts;
9672
9673 mutex_lock(&bp->hwrm_cmd_lock);
9674 do {
d1cbd165 9675 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
861aae78 9676 rc = __bnxt_hwrm_ver_get(bp, true);
17e1be34
MC
9677 if (!BNXT_FW_IS_BOOTING(sts) &&
9678 !BNXT_FW_IS_RECOVERING(sts))
d1cbd165
MC
9679 break;
9680 retry++;
9681 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
9682 mutex_unlock(&bp->hwrm_cmd_lock);
b187e4ba 9683
d1cbd165
MC
9684 if (!BNXT_FW_IS_HEALTHY(sts)) {
9685 netdev_err(bp->dev,
9686 "Firmware not responding, status: 0x%x\n",
9687 sts);
9688 rc = -ENODEV;
9689 }
b187e4ba
EP
9690 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
9691 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
9692 return bnxt_fw_reset_via_optee(bp);
9693 }
d1cbd165 9694 return rc;
b187e4ba
EP
9695 }
9696
9697 return -ENODEV;
9698}
9699
25e1acd6
MC
9700static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
9701{
9702 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
9703 struct hwrm_func_drv_if_change_input req = {0};
20d7d1c5
EP
9704 bool fw_reset = !bp->irq_tbl;
9705 bool resc_reinit = false;
5d06eb5c 9706 int rc, retry = 0;
ec5d31e3 9707 u32 flags = 0;
25e1acd6
MC
9708
9709 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
9710 return 0;
9711
9712 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
9713 if (up)
9714 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
9715 mutex_lock(&bp->hwrm_cmd_lock);
5d06eb5c
VV
9716 while (retry < BNXT_FW_IF_RETRY) {
9717 rc = _hwrm_send_message(bp, &req, sizeof(req),
9718 HWRM_CMD_TIMEOUT);
9719 if (rc != -EAGAIN)
9720 break;
9721
9722 msleep(50);
9723 retry++;
9724 }
ec5d31e3
MC
9725 if (!rc)
9726 flags = le32_to_cpu(resp->flags);
25e1acd6 9727 mutex_unlock(&bp->hwrm_cmd_lock);
5d06eb5c
VV
9728
9729 if (rc == -EAGAIN)
9730 return rc;
b187e4ba
EP
9731 if (rc && up) {
9732 rc = bnxt_try_recover_fw(bp);
9733 fw_reset = true;
9734 }
ec5d31e3
MC
9735 if (rc)
9736 return rc;
25e1acd6 9737
43a440c4
MC
9738 if (!up) {
9739 bnxt_inv_fw_health_reg(bp);
ec5d31e3 9740 return 0;
43a440c4 9741 }
25e1acd6 9742
ec5d31e3
MC
9743 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
9744 resc_reinit = true;
9745 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
9746 fw_reset = true;
43a440c4
MC
9747 else if (bp->fw_health && !bp->fw_health->status_reliable)
9748 bnxt_try_map_fw_health_reg(bp);
ec5d31e3 9749
3bc7d4a3
MC
9750 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
9751 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
20d7d1c5 9752 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
3bc7d4a3
MC
9753 return -ENODEV;
9754 }
ec5d31e3
MC
9755 if (resc_reinit || fw_reset) {
9756 if (fw_reset) {
2924ad95 9757 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
f3a6d206
VV
9758 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9759 bnxt_ulp_stop(bp);
325f85f3
MC
9760 bnxt_free_ctx_mem(bp);
9761 kfree(bp->ctx);
9762 bp->ctx = NULL;
843d699d 9763 bnxt_dcb_free(bp);
ec5d31e3
MC
9764 rc = bnxt_fw_init_one(bp);
9765 if (rc) {
2924ad95 9766 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
ec5d31e3
MC
9767 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9768 return rc;
9769 }
9770 bnxt_clear_int_mode(bp);
9771 rc = bnxt_init_int_mode(bp);
9772 if (rc) {
2924ad95 9773 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
ec5d31e3
MC
9774 netdev_err(bp->dev, "init int mode failed\n");
9775 return rc;
9776 }
ec5d31e3
MC
9777 }
9778 if (BNXT_NEW_RM(bp)) {
9779 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9780
9781 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
15a7deb8
SB
9782 if (rc)
9783 netdev_err(bp->dev, "resc_qcaps failed\n");
9784
ec5d31e3
MC
9785 hw_resc->resv_cp_rings = 0;
9786 hw_resc->resv_stat_ctxs = 0;
9787 hw_resc->resv_irqs = 0;
9788 hw_resc->resv_tx_rings = 0;
9789 hw_resc->resv_rx_rings = 0;
9790 hw_resc->resv_hw_ring_grps = 0;
9791 hw_resc->resv_vnics = 0;
9792 if (!fw_reset) {
9793 bp->tx_nr_rings = 0;
9794 bp->rx_nr_rings = 0;
9795 }
9796 }
25e1acd6 9797 }
15a7deb8 9798 return rc;
25e1acd6
MC
9799}
9800
5ad2cbee
MC
9801static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
9802{
9803 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
9804 struct hwrm_port_led_qcaps_input req = {0};
9805 struct bnxt_pf_info *pf = &bp->pf;
9806 int rc;
9807
ba642ab7 9808 bp->num_leds = 0;
5ad2cbee
MC
9809 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
9810 return 0;
9811
9812 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
9813 req.port_id = cpu_to_le16(pf->port_id);
9814 mutex_lock(&bp->hwrm_cmd_lock);
9815 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9816 if (rc) {
9817 mutex_unlock(&bp->hwrm_cmd_lock);
9818 return rc;
9819 }
9820 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
9821 int i;
9822
9823 bp->num_leds = resp->num_leds;
9824 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
9825 bp->num_leds);
9826 for (i = 0; i < bp->num_leds; i++) {
9827 struct bnxt_led_info *led = &bp->leds[i];
9828 __le16 caps = led->led_state_caps;
9829
9830 if (!led->led_group_id ||
9831 !BNXT_LED_ALT_BLINK_CAP(caps)) {
9832 bp->num_leds = 0;
9833 break;
9834 }
9835 }
9836 }
9837 mutex_unlock(&bp->hwrm_cmd_lock);
9838 return 0;
9839}
9840
5282db6c
MC
9841int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
9842{
9843 struct hwrm_wol_filter_alloc_input req = {0};
9844 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
9845 int rc;
9846
9847 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
9848 req.port_id = cpu_to_le16(bp->pf.port_id);
9849 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
9850 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
9851 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
9852 mutex_lock(&bp->hwrm_cmd_lock);
9853 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9854 if (!rc)
9855 bp->wol_filter_id = resp->wol_filter_id;
9856 mutex_unlock(&bp->hwrm_cmd_lock);
9857 return rc;
9858}
9859
9860int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
9861{
9862 struct hwrm_wol_filter_free_input req = {0};
5282db6c
MC
9863
9864 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
9865 req.port_id = cpu_to_le16(bp->pf.port_id);
9866 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
9867 req.wol_filter_id = bp->wol_filter_id;
9f90445c 9868 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5282db6c
MC
9869}
9870
c1ef146a
MC
9871static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
9872{
9873 struct hwrm_wol_filter_qcfg_input req = {0};
9874 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9875 u16 next_handle = 0;
9876 int rc;
9877
9878 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
9879 req.port_id = cpu_to_le16(bp->pf.port_id);
9880 req.handle = cpu_to_le16(handle);
9881 mutex_lock(&bp->hwrm_cmd_lock);
9882 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9883 if (!rc) {
9884 next_handle = le16_to_cpu(resp->next_handle);
9885 if (next_handle != 0) {
9886 if (resp->wol_type ==
9887 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
9888 bp->wol = 1;
9889 bp->wol_filter_id = resp->wol_filter_id;
9890 }
9891 }
9892 }
9893 mutex_unlock(&bp->hwrm_cmd_lock);
9894 return next_handle;
9895}
9896
9897static void bnxt_get_wol_settings(struct bnxt *bp)
9898{
9899 u16 handle = 0;
9900
ba642ab7 9901 bp->wol = 0;
c1ef146a
MC
9902 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
9903 return;
9904
9905 do {
9906 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
9907 } while (handle && handle != 0xffff);
9908}
9909
cde49a42
VV
9910#ifdef CONFIG_BNXT_HWMON
9911static ssize_t bnxt_show_temp(struct device *dev,
9912 struct device_attribute *devattr, char *buf)
9913{
9914 struct hwrm_temp_monitor_query_input req = {0};
9915 struct hwrm_temp_monitor_query_output *resp;
9916 struct bnxt *bp = dev_get_drvdata(dev);
12cce90b 9917 u32 len = 0;
d69753fa 9918 int rc;
cde49a42
VV
9919
9920 resp = bp->hwrm_cmd_resp_addr;
9921 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
9922 mutex_lock(&bp->hwrm_cmd_lock);
d69753fa
EP
9923 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9924 if (!rc)
12cce90b 9925 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
cde49a42 9926 mutex_unlock(&bp->hwrm_cmd_lock);
27537929
DC
9927 if (rc)
9928 return rc;
9929 return len;
cde49a42
VV
9930}
9931static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
9932
9933static struct attribute *bnxt_attrs[] = {
9934 &sensor_dev_attr_temp1_input.dev_attr.attr,
9935 NULL
9936};
9937ATTRIBUTE_GROUPS(bnxt);
9938
9939static void bnxt_hwmon_close(struct bnxt *bp)
9940{
9941 if (bp->hwmon_dev) {
9942 hwmon_device_unregister(bp->hwmon_dev);
9943 bp->hwmon_dev = NULL;
9944 }
9945}
9946
9947static void bnxt_hwmon_open(struct bnxt *bp)
9948{
d69753fa 9949 struct hwrm_temp_monitor_query_input req = {0};
cde49a42 9950 struct pci_dev *pdev = bp->pdev;
d69753fa
EP
9951 int rc;
9952
9953 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
9954 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9955 if (rc == -EACCES || rc == -EOPNOTSUPP) {
9956 bnxt_hwmon_close(bp);
9957 return;
9958 }
cde49a42 9959
ba642ab7
MC
9960 if (bp->hwmon_dev)
9961 return;
9962
cde49a42
VV
9963 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
9964 DRV_MODULE_NAME, bp,
9965 bnxt_groups);
9966 if (IS_ERR(bp->hwmon_dev)) {
9967 bp->hwmon_dev = NULL;
9968 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
9969 }
9970}
9971#else
9972static void bnxt_hwmon_close(struct bnxt *bp)
9973{
9974}
9975
9976static void bnxt_hwmon_open(struct bnxt *bp)
9977{
9978}
9979#endif
9980
939f7f0c
MC
9981static bool bnxt_eee_config_ok(struct bnxt *bp)
9982{
9983 struct ethtool_eee *eee = &bp->eee;
9984 struct bnxt_link_info *link_info = &bp->link_info;
9985
b0d28207 9986 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
939f7f0c
MC
9987 return true;
9988
9989 if (eee->eee_enabled) {
9990 u32 advertising =
9991 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
9992
9993 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9994 eee->eee_enabled = 0;
9995 return false;
9996 }
9997 if (eee->advertised & ~advertising) {
9998 eee->advertised = advertising & eee->supported;
9999 return false;
10000 }
10001 }
10002 return true;
10003}
10004
c0c050c5
MC
10005static int bnxt_update_phy_setting(struct bnxt *bp)
10006{
10007 int rc;
10008 bool update_link = false;
10009 bool update_pause = false;
939f7f0c 10010 bool update_eee = false;
c0c050c5
MC
10011 struct bnxt_link_info *link_info = &bp->link_info;
10012
10013 rc = bnxt_update_link(bp, true);
10014 if (rc) {
10015 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
10016 rc);
10017 return rc;
10018 }
33dac24a
MC
10019 if (!BNXT_SINGLE_PF(bp))
10020 return 0;
10021
c0c050c5 10022 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
c9ee9516
MC
10023 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
10024 link_info->req_flow_ctrl)
c0c050c5
MC
10025 update_pause = true;
10026 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10027 link_info->force_pause_setting != link_info->req_flow_ctrl)
10028 update_pause = true;
c0c050c5
MC
10029 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10030 if (BNXT_AUTO_MODE(link_info->auto_mode))
10031 update_link = true;
d058426e
EP
10032 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
10033 link_info->req_link_speed != link_info->force_link_speed)
10034 update_link = true;
10035 else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
10036 link_info->req_link_speed != link_info->force_pam4_link_speed)
c0c050c5 10037 update_link = true;
de73018f
MC
10038 if (link_info->req_duplex != link_info->duplex_setting)
10039 update_link = true;
c0c050c5
MC
10040 } else {
10041 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
10042 update_link = true;
d058426e
EP
10043 if (link_info->advertising != link_info->auto_link_speeds ||
10044 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
c0c050c5 10045 update_link = true;
c0c050c5
MC
10046 }
10047
16d663a6
MC
10048 /* The last close may have shutdown the link, so need to call
10049 * PHY_CFG to bring it back up.
10050 */
83d8f5e9 10051 if (!bp->link_info.link_up)
16d663a6
MC
10052 update_link = true;
10053
939f7f0c
MC
10054 if (!bnxt_eee_config_ok(bp))
10055 update_eee = true;
10056
c0c050c5 10057 if (update_link)
939f7f0c 10058 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
c0c050c5
MC
10059 else if (update_pause)
10060 rc = bnxt_hwrm_set_pause(bp);
10061 if (rc) {
10062 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
10063 rc);
10064 return rc;
10065 }
10066
10067 return rc;
10068}
10069
11809490
JH
10070/* Common routine to pre-map certain register block to different GRC window.
10071 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
10072 * in PF and 3 windows in VF that can be customized to map in different
10073 * register blocks.
10074 */
10075static void bnxt_preset_reg_win(struct bnxt *bp)
10076{
10077 if (BNXT_PF(bp)) {
10078 /* CAG registers map to GRC window #4 */
10079 writel(BNXT_CAG_REG_BASE,
10080 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
10081 }
10082}
10083
47558acd
MC
10084static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
10085
6882c36c
EP
10086static int bnxt_reinit_after_abort(struct bnxt *bp)
10087{
10088 int rc;
10089
10090 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10091 return -EBUSY;
10092
d20cd745
VV
10093 if (bp->dev->reg_state == NETREG_UNREGISTERED)
10094 return -ENODEV;
10095
6882c36c
EP
10096 rc = bnxt_fw_init_one(bp);
10097 if (!rc) {
10098 bnxt_clear_int_mode(bp);
10099 rc = bnxt_init_int_mode(bp);
10100 if (!rc) {
10101 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10102 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10103 }
10104 }
10105 return rc;
10106}
10107
c0c050c5
MC
10108static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10109{
10110 int rc = 0;
10111
11809490 10112 bnxt_preset_reg_win(bp);
c0c050c5
MC
10113 netif_carrier_off(bp->dev);
10114 if (irq_re_init) {
47558acd
MC
10115 /* Reserve rings now if none were reserved at driver probe. */
10116 rc = bnxt_init_dflt_ring_mode(bp);
10117 if (rc) {
10118 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
10119 return rc;
10120 }
c0c050c5 10121 }
1b3f0b75 10122 rc = bnxt_reserve_rings(bp, irq_re_init);
41e8d798
MC
10123 if (rc)
10124 return rc;
c0c050c5
MC
10125 if ((bp->flags & BNXT_FLAG_RFS) &&
10126 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
10127 /* disable RFS if falling back to INTA */
10128 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
10129 bp->flags &= ~BNXT_FLAG_RFS;
10130 }
10131
10132 rc = bnxt_alloc_mem(bp, irq_re_init);
10133 if (rc) {
10134 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10135 goto open_err_free_mem;
10136 }
10137
10138 if (irq_re_init) {
10139 bnxt_init_napi(bp);
10140 rc = bnxt_request_irq(bp);
10141 if (rc) {
10142 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
c58387ab 10143 goto open_err_irq;
c0c050c5
MC
10144 }
10145 }
10146
c0c050c5
MC
10147 rc = bnxt_init_nic(bp, irq_re_init);
10148 if (rc) {
10149 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
96ecdcc9 10150 goto open_err_irq;
c0c050c5
MC
10151 }
10152
96ecdcc9
JK
10153 bnxt_enable_napi(bp);
10154 bnxt_debug_dev_init(bp);
10155
c0c050c5 10156 if (link_re_init) {
e2dc9b6e 10157 mutex_lock(&bp->link_lock);
c0c050c5 10158 rc = bnxt_update_phy_setting(bp);
e2dc9b6e 10159 mutex_unlock(&bp->link_lock);
a1ef4a79 10160 if (rc) {
ba41d46f 10161 netdev_warn(bp->dev, "failed to update phy settings\n");
a1ef4a79
MC
10162 if (BNXT_SINGLE_PF(bp)) {
10163 bp->link_info.phy_retry = true;
10164 bp->link_info.phy_retry_expires =
10165 jiffies + 5 * HZ;
10166 }
10167 }
c0c050c5
MC
10168 }
10169
7cdd5fc3 10170 if (irq_re_init)
442a35a5 10171 udp_tunnel_nic_reset_ntf(bp->dev);
c0c050c5 10172
caefe526 10173 set_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
10174 bnxt_enable_int(bp);
10175 /* Enable TX queues */
10176 bnxt_tx_enable(bp);
10177 mod_timer(&bp->timer, jiffies + bp->current_interval);
10289bec
MC
10178 /* Poll link status and check for SFP+ module status */
10179 bnxt_get_port_module_status(bp);
c0c050c5 10180
ee5c7fb3
SP
10181 /* VF-reps may need to be re-opened after the PF is re-opened */
10182 if (BNXT_PF(bp))
10183 bnxt_vf_reps_open(bp);
c0c050c5
MC
10184 return 0;
10185
c58387ab 10186open_err_irq:
c0c050c5
MC
10187 bnxt_del_napi(bp);
10188
10189open_err_free_mem:
10190 bnxt_free_skbs(bp);
10191 bnxt_free_irq(bp);
10192 bnxt_free_mem(bp, true);
10193 return rc;
10194}
10195
10196/* rtnl_lock held */
10197int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10198{
10199 int rc = 0;
10200
a1301f08
MC
10201 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
10202 rc = -EIO;
10203 if (!rc)
10204 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
c0c050c5
MC
10205 if (rc) {
10206 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
10207 dev_close(bp->dev);
10208 }
10209 return rc;
10210}
10211
f7dc1ea6
MC
10212/* rtnl_lock held, open the NIC half way by allocating all resources, but
10213 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
10214 * self tests.
10215 */
10216int bnxt_half_open_nic(struct bnxt *bp)
10217{
10218 int rc = 0;
10219
11a39259
SK
10220 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10221 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
10222 rc = -ENODEV;
10223 goto half_open_err;
10224 }
10225
f7dc1ea6
MC
10226 rc = bnxt_alloc_mem(bp, false);
10227 if (rc) {
10228 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10229 goto half_open_err;
10230 }
10231 rc = bnxt_init_nic(bp, false);
10232 if (rc) {
10233 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10234 goto half_open_err;
10235 }
10236 return 0;
10237
10238half_open_err:
10239 bnxt_free_skbs(bp);
10240 bnxt_free_mem(bp, false);
10241 dev_close(bp->dev);
10242 return rc;
10243}
10244
10245/* rtnl_lock held, this call can only be made after a previous successful
10246 * call to bnxt_half_open_nic().
10247 */
10248void bnxt_half_close_nic(struct bnxt *bp)
10249{
10250 bnxt_hwrm_resource_free(bp, false, false);
10251 bnxt_free_skbs(bp);
10252 bnxt_free_mem(bp, false);
10253}
10254
c16d4ee0
MC
10255static void bnxt_reenable_sriov(struct bnxt *bp)
10256{
10257 if (BNXT_PF(bp)) {
10258 struct bnxt_pf_info *pf = &bp->pf;
10259 int n = pf->active_vfs;
10260
10261 if (n)
10262 bnxt_cfg_hw_sriov(bp, &n, true);
10263 }
10264}
10265
c0c050c5
MC
10266static int bnxt_open(struct net_device *dev)
10267{
10268 struct bnxt *bp = netdev_priv(dev);
25e1acd6 10269 int rc;
c0c050c5 10270
ec5d31e3 10271 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
6882c36c
EP
10272 rc = bnxt_reinit_after_abort(bp);
10273 if (rc) {
10274 if (rc == -EBUSY)
10275 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
10276 else
10277 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
10278 return -ENODEV;
10279 }
ec5d31e3
MC
10280 }
10281
10282 rc = bnxt_hwrm_if_change(bp, true);
25e1acd6 10283 if (rc)
ec5d31e3 10284 return rc;
d7859afb 10285
ec5d31e3
MC
10286 rc = __bnxt_open_nic(bp, true, true);
10287 if (rc) {
25e1acd6 10288 bnxt_hwrm_if_change(bp, false);
ec5d31e3 10289 } else {
f3a6d206 10290 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
12de2ead 10291 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
f3a6d206 10292 bnxt_ulp_start(bp, 0);
12de2ead
MC
10293 bnxt_reenable_sriov(bp);
10294 }
ec5d31e3
MC
10295 }
10296 bnxt_hwmon_open(bp);
10297 }
cde49a42 10298
25e1acd6 10299 return rc;
c0c050c5
MC
10300}
10301
f9b76ebd
MC
10302static bool bnxt_drv_busy(struct bnxt *bp)
10303{
10304 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
10305 test_bit(BNXT_STATE_READ_STATS, &bp->state));
10306}
10307
b8875ca3
MC
10308static void bnxt_get_ring_stats(struct bnxt *bp,
10309 struct rtnl_link_stats64 *stats);
10310
86e953db
MC
10311static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
10312 bool link_re_init)
c0c050c5 10313{
ee5c7fb3
SP
10314 /* Close the VF-reps before closing PF */
10315 if (BNXT_PF(bp))
10316 bnxt_vf_reps_close(bp);
86e953db 10317
c0c050c5
MC
10318 /* Change device state to avoid TX queue wake up's */
10319 bnxt_tx_disable(bp);
10320
caefe526 10321 clear_bit(BNXT_STATE_OPEN, &bp->state);
4cebdcec 10322 smp_mb__after_atomic();
f9b76ebd 10323 while (bnxt_drv_busy(bp))
4cebdcec 10324 msleep(20);
c0c050c5 10325
9d8bc097 10326 /* Flush rings and and disable interrupts */
c0c050c5
MC
10327 bnxt_shutdown_nic(bp, irq_re_init);
10328
10329 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
10330
cabfb09d 10331 bnxt_debug_dev_exit(bp);
c0c050c5 10332 bnxt_disable_napi(bp);
c0c050c5
MC
10333 del_timer_sync(&bp->timer);
10334 bnxt_free_skbs(bp);
10335
b8875ca3 10336 /* Save ring stats before shutdown */
b8056e84 10337 if (bp->bnapi && irq_re_init)
b8875ca3 10338 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
c0c050c5
MC
10339 if (irq_re_init) {
10340 bnxt_free_irq(bp);
10341 bnxt_del_napi(bp);
10342 }
10343 bnxt_free_mem(bp, irq_re_init);
86e953db
MC
10344}
10345
10346int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10347{
10348 int rc = 0;
10349
3bc7d4a3
MC
10350 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10351 /* If we get here, it means firmware reset is in progress
10352 * while we are trying to close. We can safely proceed with
10353 * the close because we are holding rtnl_lock(). Some firmware
10354 * messages may fail as we proceed to close. We set the
10355 * ABORT_ERR flag here so that the FW reset thread will later
10356 * abort when it gets the rtnl_lock() and sees the flag.
10357 */
10358 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10359 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10360 }
10361
86e953db
MC
10362#ifdef CONFIG_BNXT_SRIOV
10363 if (bp->sriov_cfg) {
10364 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10365 !bp->sriov_cfg,
10366 BNXT_SRIOV_CFG_WAIT_TMO);
10367 if (rc)
10368 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
10369 }
10370#endif
10371 __bnxt_close_nic(bp, irq_re_init, link_re_init);
c0c050c5
MC
10372 return rc;
10373}
10374
10375static int bnxt_close(struct net_device *dev)
10376{
10377 struct bnxt *bp = netdev_priv(dev);
10378
cde49a42 10379 bnxt_hwmon_close(bp);
c0c050c5 10380 bnxt_close_nic(bp, true, true);
33f7d55f 10381 bnxt_hwrm_shutdown_link(bp);
25e1acd6 10382 bnxt_hwrm_if_change(bp, false);
c0c050c5
MC
10383 return 0;
10384}
10385
0ca12be9
VV
10386static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10387 u16 *val)
10388{
10389 struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
10390 struct hwrm_port_phy_mdio_read_input req = {0};
10391 int rc;
10392
10393 if (bp->hwrm_spec_code < 0x10a00)
10394 return -EOPNOTSUPP;
10395
10396 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
10397 req.port_id = cpu_to_le16(bp->pf.port_id);
10398 req.phy_addr = phy_addr;
10399 req.reg_addr = cpu_to_le16(reg & 0x1f);
2730214d 10400 if (mdio_phy_id_is_c45(phy_addr)) {
0ca12be9
VV
10401 req.cl45_mdio = 1;
10402 req.phy_addr = mdio_phy_id_prtad(phy_addr);
10403 req.dev_addr = mdio_phy_id_devad(phy_addr);
10404 req.reg_addr = cpu_to_le16(reg);
10405 }
10406
10407 mutex_lock(&bp->hwrm_cmd_lock);
10408 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10409 if (!rc)
10410 *val = le16_to_cpu(resp->reg_data);
10411 mutex_unlock(&bp->hwrm_cmd_lock);
10412 return rc;
10413}
10414
10415static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10416 u16 val)
10417{
10418 struct hwrm_port_phy_mdio_write_input req = {0};
10419
10420 if (bp->hwrm_spec_code < 0x10a00)
10421 return -EOPNOTSUPP;
10422
10423 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
10424 req.port_id = cpu_to_le16(bp->pf.port_id);
10425 req.phy_addr = phy_addr;
10426 req.reg_addr = cpu_to_le16(reg & 0x1f);
2730214d 10427 if (mdio_phy_id_is_c45(phy_addr)) {
0ca12be9
VV
10428 req.cl45_mdio = 1;
10429 req.phy_addr = mdio_phy_id_prtad(phy_addr);
10430 req.dev_addr = mdio_phy_id_devad(phy_addr);
10431 req.reg_addr = cpu_to_le16(reg);
10432 }
10433 req.reg_data = cpu_to_le16(val);
10434
10435 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10436}
10437
c0c050c5
MC
10438/* rtnl_lock held */
10439static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10440{
0ca12be9
VV
10441 struct mii_ioctl_data *mdio = if_mii(ifr);
10442 struct bnxt *bp = netdev_priv(dev);
10443 int rc;
10444
c0c050c5
MC
10445 switch (cmd) {
10446 case SIOCGMIIPHY:
0ca12be9
VV
10447 mdio->phy_id = bp->link_info.phy_addr;
10448
df561f66 10449 fallthrough;
c0c050c5 10450 case SIOCGMIIREG: {
0ca12be9
VV
10451 u16 mii_regval = 0;
10452
c0c050c5
MC
10453 if (!netif_running(dev))
10454 return -EAGAIN;
10455
0ca12be9
VV
10456 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10457 &mii_regval);
10458 mdio->val_out = mii_regval;
10459 return rc;
c0c050c5
MC
10460 }
10461
10462 case SIOCSMIIREG:
10463 if (!netif_running(dev))
10464 return -EAGAIN;
10465
0ca12be9
VV
10466 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10467 mdio->val_in);
c0c050c5 10468
118612d5
MC
10469 case SIOCSHWTSTAMP:
10470 return bnxt_hwtstamp_set(dev, ifr);
10471
10472 case SIOCGHWTSTAMP:
10473 return bnxt_hwtstamp_get(dev, ifr);
10474
c0c050c5
MC
10475 default:
10476 /* do nothing */
10477 break;
10478 }
10479 return -EOPNOTSUPP;
10480}
10481
b8875ca3
MC
10482static void bnxt_get_ring_stats(struct bnxt *bp,
10483 struct rtnl_link_stats64 *stats)
c0c050c5 10484{
b8875ca3 10485 int i;
c0c050c5 10486
c0c050c5
MC
10487 for (i = 0; i < bp->cp_nr_rings; i++) {
10488 struct bnxt_napi *bnapi = bp->bnapi[i];
10489 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
a0c30621 10490 u64 *sw = cpr->stats.sw_stats;
c0c050c5 10491
a0c30621
MC
10492 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
10493 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10494 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
c0c050c5 10495
a0c30621
MC
10496 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
10497 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
10498 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
c0c050c5 10499
a0c30621
MC
10500 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
10501 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
10502 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
c0c050c5 10503
a0c30621
MC
10504 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
10505 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
10506 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
c0c050c5
MC
10507
10508 stats->rx_missed_errors +=
a0c30621 10509 BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
c0c050c5 10510
a0c30621 10511 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
c0c050c5 10512
a0c30621 10513 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
c0c050c5 10514 }
b8875ca3
MC
10515}
10516
10517static void bnxt_add_prev_stats(struct bnxt *bp,
10518 struct rtnl_link_stats64 *stats)
10519{
10520 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10521
10522 stats->rx_packets += prev_stats->rx_packets;
10523 stats->tx_packets += prev_stats->tx_packets;
10524 stats->rx_bytes += prev_stats->rx_bytes;
10525 stats->tx_bytes += prev_stats->tx_bytes;
10526 stats->rx_missed_errors += prev_stats->rx_missed_errors;
10527 stats->multicast += prev_stats->multicast;
10528 stats->tx_dropped += prev_stats->tx_dropped;
10529}
10530
10531static void
10532bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
10533{
10534 struct bnxt *bp = netdev_priv(dev);
10535
10536 set_bit(BNXT_STATE_READ_STATS, &bp->state);
10537 /* Make sure bnxt_close_nic() sees that we are reading stats before
10538 * we check the BNXT_STATE_OPEN flag.
10539 */
10540 smp_mb__after_atomic();
10541 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10542 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10543 *stats = bp->net_stats_prev;
10544 return;
10545 }
10546
10547 bnxt_get_ring_stats(bp, stats);
10548 bnxt_add_prev_stats(bp, stats);
c0c050c5 10549
9947f83f 10550 if (bp->flags & BNXT_FLAG_PORT_STATS) {
a0c30621
MC
10551 u64 *rx = bp->port_stats.sw_stats;
10552 u64 *tx = bp->port_stats.sw_stats +
10553 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10554
10555 stats->rx_crc_errors =
10556 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
10557 stats->rx_frame_errors =
10558 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
10559 stats->rx_length_errors =
10560 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
10561 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
10562 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
10563 stats->rx_errors =
10564 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
10565 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
10566 stats->collisions =
10567 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
10568 stats->tx_fifo_errors =
10569 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
10570 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
9947f83f 10571 }
f9b76ebd 10572 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
c0c050c5
MC
10573}
10574
10575static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10576{
10577 struct net_device *dev = bp->dev;
10578 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10579 struct netdev_hw_addr *ha;
10580 u8 *haddr;
10581 int mc_count = 0;
10582 bool update = false;
10583 int off = 0;
10584
10585 netdev_for_each_mc_addr(ha, dev) {
10586 if (mc_count >= BNXT_MAX_MC_ADDRS) {
10587 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10588 vnic->mc_list_count = 0;
10589 return false;
10590 }
10591 haddr = ha->addr;
10592 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
10593 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
10594 update = true;
10595 }
10596 off += ETH_ALEN;
10597 mc_count++;
10598 }
10599 if (mc_count)
10600 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
10601
10602 if (mc_count != vnic->mc_list_count) {
10603 vnic->mc_list_count = mc_count;
10604 update = true;
10605 }
10606 return update;
10607}
10608
10609static bool bnxt_uc_list_updated(struct bnxt *bp)
10610{
10611 struct net_device *dev = bp->dev;
10612 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10613 struct netdev_hw_addr *ha;
10614 int off = 0;
10615
10616 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
10617 return true;
10618
10619 netdev_for_each_uc_addr(ha, dev) {
10620 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
10621 return true;
10622
10623 off += ETH_ALEN;
10624 }
10625 return false;
10626}
10627
10628static void bnxt_set_rx_mode(struct net_device *dev)
10629{
10630 struct bnxt *bp = netdev_priv(dev);
268d0895 10631 struct bnxt_vnic_info *vnic;
c0c050c5
MC
10632 bool mc_update = false;
10633 bool uc_update;
268d0895 10634 u32 mask;
c0c050c5 10635
268d0895 10636 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
c0c050c5
MC
10637 return;
10638
268d0895
MC
10639 vnic = &bp->vnic_info[0];
10640 mask = vnic->rx_mask;
c0c050c5
MC
10641 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
10642 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
30e33848
MC
10643 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
10644 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
c0c050c5 10645
dd85fc0a 10646 if (dev->flags & IFF_PROMISC)
c0c050c5
MC
10647 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10648
10649 uc_update = bnxt_uc_list_updated(bp);
10650
30e33848
MC
10651 if (dev->flags & IFF_BROADCAST)
10652 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
c0c050c5
MC
10653 if (dev->flags & IFF_ALLMULTI) {
10654 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10655 vnic->mc_list_count = 0;
10656 } else {
10657 mc_update = bnxt_mc_list_updated(bp, &mask);
10658 }
10659
10660 if (mask != vnic->rx_mask || uc_update || mc_update) {
10661 vnic->rx_mask = mask;
10662
10663 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
c213eae8 10664 bnxt_queue_sp_work(bp);
c0c050c5
MC
10665 }
10666}
10667
b664f008 10668static int bnxt_cfg_rx_mode(struct bnxt *bp)
c0c050c5
MC
10669{
10670 struct net_device *dev = bp->dev;
10671 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10672 struct netdev_hw_addr *ha;
10673 int i, off = 0, rc;
10674 bool uc_update;
10675
10676 netif_addr_lock_bh(dev);
10677 uc_update = bnxt_uc_list_updated(bp);
10678 netif_addr_unlock_bh(dev);
10679
10680 if (!uc_update)
10681 goto skip_uc;
10682
10683 mutex_lock(&bp->hwrm_cmd_lock);
10684 for (i = 1; i < vnic->uc_filter_count; i++) {
10685 struct hwrm_cfa_l2_filter_free_input req = {0};
10686
10687 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
10688 -1);
10689
10690 req.l2_filter_id = vnic->fw_l2_filter_id[i];
10691
10692 rc = _hwrm_send_message(bp, &req, sizeof(req),
10693 HWRM_CMD_TIMEOUT);
10694 }
10695 mutex_unlock(&bp->hwrm_cmd_lock);
10696
10697 vnic->uc_filter_count = 1;
10698
10699 netif_addr_lock_bh(dev);
10700 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
10701 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10702 } else {
10703 netdev_for_each_uc_addr(ha, dev) {
10704 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
10705 off += ETH_ALEN;
10706 vnic->uc_filter_count++;
10707 }
10708 }
10709 netif_addr_unlock_bh(dev);
10710
10711 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
10712 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
10713 if (rc) {
10714 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
10715 rc);
10716 vnic->uc_filter_count = i;
b664f008 10717 return rc;
c0c050c5
MC
10718 }
10719 }
10720
10721skip_uc:
dd85fc0a
EP
10722 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
10723 !bnxt_promisc_ok(bp))
10724 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
c0c050c5 10725 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
b4e30e8e
MC
10726 if (rc && vnic->mc_list_count) {
10727 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
10728 rc);
10729 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10730 vnic->mc_list_count = 0;
10731 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10732 }
c0c050c5 10733 if (rc)
b4e30e8e 10734 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
c0c050c5 10735 rc);
b664f008
MC
10736
10737 return rc;
c0c050c5
MC
10738}
10739
2773dfb2
MC
10740static bool bnxt_can_reserve_rings(struct bnxt *bp)
10741{
10742#ifdef CONFIG_BNXT_SRIOV
f1ca94de 10743 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
2773dfb2
MC
10744 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10745
10746 /* No minimum rings were provisioned by the PF. Don't
10747 * reserve rings by default when device is down.
10748 */
10749 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
10750 return true;
10751
10752 if (!netif_running(bp->dev))
10753 return false;
10754 }
10755#endif
10756 return true;
10757}
10758
8079e8f1
MC
10759/* If the chip and firmware supports RFS */
10760static bool bnxt_rfs_supported(struct bnxt *bp)
10761{
e969ae5b 10762 if (bp->flags & BNXT_FLAG_CHIP_P5) {
41136ab3 10763 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
e969ae5b 10764 return true;
41e8d798 10765 return false;
e969ae5b 10766 }
8079e8f1
MC
10767 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
10768 return true;
ae10ae74
MC
10769 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10770 return true;
8079e8f1
MC
10771 return false;
10772}
10773
10774/* If runtime conditions support RFS */
2bcfa6f6
MC
10775static bool bnxt_rfs_capable(struct bnxt *bp)
10776{
10777#ifdef CONFIG_RFS_ACCEL
8079e8f1 10778 int vnics, max_vnics, max_rss_ctxs;
2bcfa6f6 10779
41e8d798 10780 if (bp->flags & BNXT_FLAG_CHIP_P5)
ac33906c 10781 return bnxt_rfs_supported(bp);
2773dfb2 10782 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
2bcfa6f6
MC
10783 return false;
10784
10785 vnics = 1 + bp->rx_nr_rings;
8079e8f1
MC
10786 max_vnics = bnxt_get_max_func_vnics(bp);
10787 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
ae10ae74
MC
10788
10789 /* RSS contexts not a limiting factor */
10790 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10791 max_rss_ctxs = max_vnics;
8079e8f1 10792 if (vnics > max_vnics || vnics > max_rss_ctxs) {
6a1eef5b
MC
10793 if (bp->rx_nr_rings > 1)
10794 netdev_warn(bp->dev,
10795 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
10796 min(max_rss_ctxs - 1, max_vnics - 1));
2bcfa6f6 10797 return false;
a2304909 10798 }
2bcfa6f6 10799
f1ca94de 10800 if (!BNXT_NEW_RM(bp))
6a1eef5b
MC
10801 return true;
10802
10803 if (vnics == bp->hw_resc.resv_vnics)
10804 return true;
10805
780baad4 10806 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
6a1eef5b
MC
10807 if (vnics <= bp->hw_resc.resv_vnics)
10808 return true;
10809
10810 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
780baad4 10811 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
6a1eef5b 10812 return false;
2bcfa6f6
MC
10813#else
10814 return false;
10815#endif
10816}
10817
c0c050c5
MC
10818static netdev_features_t bnxt_fix_features(struct net_device *dev,
10819 netdev_features_t features)
10820{
2bcfa6f6 10821 struct bnxt *bp = netdev_priv(dev);
c72cb303 10822 netdev_features_t vlan_features;
2bcfa6f6 10823
a2304909 10824 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
2bcfa6f6 10825 features &= ~NETIF_F_NTUPLE;
5a9f6b23 10826
1054aee8
MC
10827 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10828 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10829
10830 if (!(features & NETIF_F_GRO))
10831 features &= ~NETIF_F_GRO_HW;
10832
10833 if (features & NETIF_F_GRO_HW)
10834 features &= ~NETIF_F_LRO;
10835
5a9f6b23
MC
10836 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
10837 * turned on or off together.
10838 */
a196e96b
EP
10839 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
10840 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
10841 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10842 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
c72cb303 10843 else if (vlan_features)
a196e96b 10844 features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
5a9f6b23 10845 }
cf6645f8 10846#ifdef CONFIG_BNXT_SRIOV
a196e96b
EP
10847 if (BNXT_VF(bp) && bp->vf.vlan)
10848 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
cf6645f8 10849#endif
c0c050c5
MC
10850 return features;
10851}
10852
10853static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
10854{
10855 struct bnxt *bp = netdev_priv(dev);
10856 u32 flags = bp->flags;
10857 u32 changes;
10858 int rc = 0;
10859 bool re_init = false;
10860 bool update_tpa = false;
10861
10862 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
1054aee8 10863 if (features & NETIF_F_GRO_HW)
c0c050c5 10864 flags |= BNXT_FLAG_GRO;
1054aee8 10865 else if (features & NETIF_F_LRO)
c0c050c5
MC
10866 flags |= BNXT_FLAG_LRO;
10867
bdbd1eb5
MC
10868 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10869 flags &= ~BNXT_FLAG_TPA;
10870
a196e96b 10871 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
c0c050c5
MC
10872 flags |= BNXT_FLAG_STRIP_VLAN;
10873
10874 if (features & NETIF_F_NTUPLE)
10875 flags |= BNXT_FLAG_RFS;
10876
10877 changes = flags ^ bp->flags;
10878 if (changes & BNXT_FLAG_TPA) {
10879 update_tpa = true;
10880 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
f45b7b78
MC
10881 (flags & BNXT_FLAG_TPA) == 0 ||
10882 (bp->flags & BNXT_FLAG_CHIP_P5))
c0c050c5
MC
10883 re_init = true;
10884 }
10885
10886 if (changes & ~BNXT_FLAG_TPA)
10887 re_init = true;
10888
10889 if (flags != bp->flags) {
10890 u32 old_flags = bp->flags;
10891
2bcfa6f6 10892 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
f45b7b78 10893 bp->flags = flags;
c0c050c5
MC
10894 if (update_tpa)
10895 bnxt_set_ring_params(bp);
10896 return rc;
10897 }
10898
10899 if (re_init) {
10900 bnxt_close_nic(bp, false, false);
f45b7b78 10901 bp->flags = flags;
c0c050c5
MC
10902 if (update_tpa)
10903 bnxt_set_ring_params(bp);
10904
10905 return bnxt_open_nic(bp, false, false);
10906 }
10907 if (update_tpa) {
f45b7b78 10908 bp->flags = flags;
c0c050c5
MC
10909 rc = bnxt_set_tpa(bp,
10910 (flags & BNXT_FLAG_TPA) ?
10911 true : false);
10912 if (rc)
10913 bp->flags = old_flags;
10914 }
10915 }
10916 return rc;
10917}
10918
aa473d6c
MC
10919static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
10920 u8 **nextp)
10921{
10922 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
10923 int hdr_count = 0;
10924 u8 *nexthdr;
10925 int start;
10926
10927 /* Check that there are at most 2 IPv6 extension headers, no
10928 * fragment header, and each is <= 64 bytes.
10929 */
10930 start = nw_off + sizeof(*ip6h);
10931 nexthdr = &ip6h->nexthdr;
10932 while (ipv6_ext_hdr(*nexthdr)) {
10933 struct ipv6_opt_hdr *hp;
10934 int hdrlen;
10935
10936 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
10937 *nexthdr == NEXTHDR_FRAGMENT)
10938 return false;
10939 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
10940 skb_headlen(skb), NULL);
10941 if (!hp)
10942 return false;
10943 if (*nexthdr == NEXTHDR_AUTH)
10944 hdrlen = ipv6_authlen(hp);
10945 else
10946 hdrlen = ipv6_optlen(hp);
10947
10948 if (hdrlen > 64)
10949 return false;
10950 nexthdr = &hp->nexthdr;
10951 start += hdrlen;
10952 hdr_count++;
10953 }
10954 if (nextp) {
10955 /* Caller will check inner protocol */
10956 if (skb->encapsulation) {
10957 *nextp = nexthdr;
10958 return true;
10959 }
10960 *nextp = NULL;
10961 }
10962 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
10963 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
10964}
10965
10966/* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
10967static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
10968{
10969 struct udphdr *uh = udp_hdr(skb);
10970 __be16 udp_port = uh->dest;
10971
10972 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
10973 return false;
10974 if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
10975 struct ethhdr *eh = inner_eth_hdr(skb);
10976
10977 switch (eh->h_proto) {
10978 case htons(ETH_P_IP):
10979 return true;
10980 case htons(ETH_P_IPV6):
10981 return bnxt_exthdr_check(bp, skb,
10982 skb_inner_network_offset(skb),
10983 NULL);
10984 }
10985 }
10986 return false;
10987}
10988
10989static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
10990{
10991 switch (l4_proto) {
10992 case IPPROTO_UDP:
10993 return bnxt_udp_tunl_check(bp, skb);
10994 case IPPROTO_IPIP:
10995 return true;
10996 case IPPROTO_GRE: {
10997 switch (skb->inner_protocol) {
10998 default:
10999 return false;
11000 case htons(ETH_P_IP):
11001 return true;
11002 case htons(ETH_P_IPV6):
11003 fallthrough;
11004 }
11005 }
11006 case IPPROTO_IPV6:
11007 /* Check ext headers of inner ipv6 */
11008 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
11009 NULL);
11010 }
11011 return false;
11012}
11013
1698d600
MC
11014static netdev_features_t bnxt_features_check(struct sk_buff *skb,
11015 struct net_device *dev,
11016 netdev_features_t features)
11017{
aa473d6c
MC
11018 struct bnxt *bp = netdev_priv(dev);
11019 u8 *l4_proto;
1698d600
MC
11020
11021 features = vlan_features_check(skb, features);
1698d600
MC
11022 switch (vlan_get_protocol(skb)) {
11023 case htons(ETH_P_IP):
aa473d6c
MC
11024 if (!skb->encapsulation)
11025 return features;
11026 l4_proto = &ip_hdr(skb)->protocol;
11027 if (bnxt_tunl_check(bp, skb, *l4_proto))
11028 return features;
1698d600
MC
11029 break;
11030 case htons(ETH_P_IPV6):
aa473d6c
MC
11031 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
11032 &l4_proto))
11033 break;
11034 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
11035 return features;
1698d600 11036 break;
1698d600 11037 }
1698d600
MC
11038 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11039}
11040
b5d600b0
VV
11041int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
11042 u32 *reg_buf)
11043{
11044 struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr;
11045 struct hwrm_dbg_read_direct_input req = {0};
11046 __le32 *dbg_reg_buf;
11047 dma_addr_t mapping;
11048 int rc, i;
11049
11050 dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4,
11051 &mapping, GFP_KERNEL);
11052 if (!dbg_reg_buf)
11053 return -ENOMEM;
11054 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1);
11055 req.host_dest_addr = cpu_to_le64(mapping);
11056 req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
11057 req.read_len32 = cpu_to_le32(num_words);
11058 mutex_lock(&bp->hwrm_cmd_lock);
11059 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11060 if (rc || resp->error_code) {
11061 rc = -EIO;
11062 goto dbg_rd_reg_exit;
11063 }
11064 for (i = 0; i < num_words; i++)
11065 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
11066
11067dbg_rd_reg_exit:
11068 mutex_unlock(&bp->hwrm_cmd_lock);
11069 dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping);
11070 return rc;
11071}
11072
ffd77621
MC
11073static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
11074 u32 ring_id, u32 *prod, u32 *cons)
11075{
11076 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
11077 struct hwrm_dbg_ring_info_get_input req = {0};
11078 int rc;
11079
11080 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
11081 req.ring_type = ring_type;
11082 req.fw_ring_id = cpu_to_le32(ring_id);
11083 mutex_lock(&bp->hwrm_cmd_lock);
11084 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11085 if (!rc) {
11086 *prod = le32_to_cpu(resp->producer_index);
11087 *cons = le32_to_cpu(resp->consumer_index);
11088 }
11089 mutex_unlock(&bp->hwrm_cmd_lock);
11090 return rc;
11091}
11092
9f554590
MC
11093static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
11094{
b6ab4b01 11095 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
9f554590
MC
11096 int i = bnapi->index;
11097
3b2b7d9d
MC
11098 if (!txr)
11099 return;
11100
9f554590
MC
11101 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
11102 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
11103 txr->tx_cons);
11104}
11105
11106static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
11107{
b6ab4b01 11108 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
9f554590
MC
11109 int i = bnapi->index;
11110
3b2b7d9d
MC
11111 if (!rxr)
11112 return;
11113
9f554590
MC
11114 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
11115 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
11116 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
11117 rxr->rx_sw_agg_prod);
11118}
11119
11120static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
11121{
11122 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
11123 int i = bnapi->index;
11124
11125 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
11126 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
11127}
11128
c0c050c5
MC
11129static void bnxt_dbg_dump_states(struct bnxt *bp)
11130{
11131 int i;
11132 struct bnxt_napi *bnapi;
c0c050c5
MC
11133
11134 for (i = 0; i < bp->cp_nr_rings; i++) {
11135 bnapi = bp->bnapi[i];
c0c050c5 11136 if (netif_msg_drv(bp)) {
9f554590
MC
11137 bnxt_dump_tx_sw_state(bnapi);
11138 bnxt_dump_rx_sw_state(bnapi);
11139 bnxt_dump_cp_sw_state(bnapi);
c0c050c5
MC
11140 }
11141 }
11142}
11143
8fbf58e1
MC
11144static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
11145{
11146 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
11147 struct hwrm_ring_reset_input req = {0};
11148 struct bnxt_napi *bnapi = rxr->bnapi;
11149 struct bnxt_cp_ring_info *cpr;
11150 u16 cp_ring_id;
11151
11152 cpr = &bnapi->cp_ring;
11153 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
11154 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_RESET, cp_ring_id, -1);
11155 req.ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
11156 req.ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
11157 return hwrm_send_message_silent(bp, &req, sizeof(req),
11158 HWRM_CMD_TIMEOUT);
11159}
11160
6988bd92 11161static void bnxt_reset_task(struct bnxt *bp, bool silent)
c0c050c5 11162{
6988bd92
MC
11163 if (!silent)
11164 bnxt_dbg_dump_states(bp);
028de140 11165 if (netif_running(bp->dev)) {
b386cd36
MC
11166 int rc;
11167
aa46dfff
VV
11168 if (silent) {
11169 bnxt_close_nic(bp, false, false);
11170 bnxt_open_nic(bp, false, false);
11171 } else {
b386cd36 11172 bnxt_ulp_stop(bp);
aa46dfff
VV
11173 bnxt_close_nic(bp, true, false);
11174 rc = bnxt_open_nic(bp, true, false);
11175 bnxt_ulp_start(bp, rc);
11176 }
028de140 11177 }
c0c050c5
MC
11178}
11179
0290bd29 11180static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
c0c050c5
MC
11181{
11182 struct bnxt *bp = netdev_priv(dev);
11183
11184 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
11185 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
c213eae8 11186 bnxt_queue_sp_work(bp);
c0c050c5
MC
11187}
11188
acfb50e4
VV
11189static void bnxt_fw_health_check(struct bnxt *bp)
11190{
11191 struct bnxt_fw_health *fw_health = bp->fw_health;
11192 u32 val;
11193
0797c10d 11194 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
acfb50e4
VV
11195 return;
11196
11197 if (fw_health->tmr_counter) {
11198 fw_health->tmr_counter--;
11199 return;
11200 }
11201
11202 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11203 if (val == fw_health->last_fw_heartbeat)
11204 goto fw_reset;
11205
11206 fw_health->last_fw_heartbeat = val;
11207
11208 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11209 if (val != fw_health->last_fw_reset_cnt)
11210 goto fw_reset;
11211
11212 fw_health->tmr_counter = fw_health->tmr_multiplier;
11213 return;
11214
11215fw_reset:
11216 set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
11217 bnxt_queue_sp_work(bp);
11218}
11219
e99e88a9 11220static void bnxt_timer(struct timer_list *t)
c0c050c5 11221{
e99e88a9 11222 struct bnxt *bp = from_timer(bp, t, timer);
c0c050c5
MC
11223 struct net_device *dev = bp->dev;
11224
e0009404 11225 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
c0c050c5
MC
11226 return;
11227
11228 if (atomic_read(&bp->intr_sem) != 0)
11229 goto bnxt_restart_timer;
11230
acfb50e4
VV
11231 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
11232 bnxt_fw_health_check(bp);
11233
fea6b333 11234 if (bp->link_info.link_up && bp->stats_coal_ticks) {
3bdf56c4 11235 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
c213eae8 11236 bnxt_queue_sp_work(bp);
3bdf56c4 11237 }
5a84acbe
SP
11238
11239 if (bnxt_tc_flower_enabled(bp)) {
11240 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
11241 bnxt_queue_sp_work(bp);
11242 }
a1ef4a79 11243
87d67f59
PC
11244#ifdef CONFIG_RFS_ACCEL
11245 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
11246 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11247 bnxt_queue_sp_work(bp);
11248 }
11249#endif /*CONFIG_RFS_ACCEL*/
11250
a1ef4a79
MC
11251 if (bp->link_info.phy_retry) {
11252 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
acda6180 11253 bp->link_info.phy_retry = false;
a1ef4a79
MC
11254 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
11255 } else {
11256 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
11257 bnxt_queue_sp_work(bp);
11258 }
11259 }
ffd77621 11260
5313845f
MC
11261 if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
11262 netif_carrier_ok(dev)) {
ffd77621
MC
11263 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
11264 bnxt_queue_sp_work(bp);
11265 }
c0c050c5
MC
11266bnxt_restart_timer:
11267 mod_timer(&bp->timer, jiffies + bp->current_interval);
11268}
11269
a551ee94 11270static void bnxt_rtnl_lock_sp(struct bnxt *bp)
6988bd92 11271{
a551ee94
MC
11272 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
11273 * set. If the device is being closed, bnxt_close() may be holding
6988bd92
MC
11274 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
11275 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
11276 */
11277 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11278 rtnl_lock();
a551ee94
MC
11279}
11280
11281static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
11282{
6988bd92
MC
11283 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11284 rtnl_unlock();
11285}
11286
a551ee94
MC
11287/* Only called from bnxt_sp_task() */
11288static void bnxt_reset(struct bnxt *bp, bool silent)
11289{
11290 bnxt_rtnl_lock_sp(bp);
11291 if (test_bit(BNXT_STATE_OPEN, &bp->state))
11292 bnxt_reset_task(bp, silent);
11293 bnxt_rtnl_unlock_sp(bp);
11294}
11295
8fbf58e1
MC
11296/* Only called from bnxt_sp_task() */
11297static void bnxt_rx_ring_reset(struct bnxt *bp)
11298{
11299 int i;
11300
11301 bnxt_rtnl_lock_sp(bp);
11302 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11303 bnxt_rtnl_unlock_sp(bp);
11304 return;
11305 }
11306 /* Disable and flush TPA before resetting the RX ring */
11307 if (bp->flags & BNXT_FLAG_TPA)
11308 bnxt_set_tpa(bp, false);
11309 for (i = 0; i < bp->rx_nr_rings; i++) {
11310 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
11311 struct bnxt_cp_ring_info *cpr;
11312 int rc;
11313
11314 if (!rxr->bnapi->in_reset)
11315 continue;
11316
11317 rc = bnxt_hwrm_rx_ring_reset(bp, i);
11318 if (rc) {
11319 if (rc == -EINVAL || rc == -EOPNOTSUPP)
11320 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
11321 else
11322 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
11323 rc);
8fb35cd3 11324 bnxt_reset_task(bp, true);
8fbf58e1
MC
11325 break;
11326 }
11327 bnxt_free_one_rx_ring_skbs(bp, i);
11328 rxr->rx_prod = 0;
11329 rxr->rx_agg_prod = 0;
11330 rxr->rx_sw_agg_prod = 0;
11331 rxr->rx_next_cons = 0;
11332 rxr->bnapi->in_reset = false;
11333 bnxt_alloc_one_rx_ring(bp, i);
11334 cpr = &rxr->bnapi->cp_ring;
8a27d4b9 11335 cpr->sw_stats.rx.rx_resets++;
8fbf58e1
MC
11336 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11337 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
11338 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
11339 }
11340 if (bp->flags & BNXT_FLAG_TPA)
11341 bnxt_set_tpa(bp, true);
11342 bnxt_rtnl_unlock_sp(bp);
11343}
11344
230d1f0d
MC
11345static void bnxt_fw_reset_close(struct bnxt *bp)
11346{
f3a6d206 11347 bnxt_ulp_stop(bp);
4f036b2e
MC
11348 /* When firmware is in fatal state, quiesce device and disable
11349 * bus master to prevent any potential bad DMAs before freeing
11350 * kernel memory.
d4073028 11351 */
4f036b2e 11352 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
dab62e7c
MC
11353 u16 val = 0;
11354
11355 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
11356 if (val == 0xffff)
11357 bp->fw_reset_min_dsecs = 0;
4f036b2e
MC
11358 bnxt_tx_disable(bp);
11359 bnxt_disable_napi(bp);
11360 bnxt_disable_int_sync(bp);
11361 bnxt_free_irq(bp);
11362 bnxt_clear_int_mode(bp);
d4073028 11363 pci_disable_device(bp->pdev);
4f036b2e 11364 }
230d1f0d 11365 __bnxt_close_nic(bp, true, false);
ac797ced 11366 bnxt_vf_reps_free(bp);
230d1f0d
MC
11367 bnxt_clear_int_mode(bp);
11368 bnxt_hwrm_func_drv_unrgtr(bp);
d4073028
VV
11369 if (pci_is_enabled(bp->pdev))
11370 pci_disable_device(bp->pdev);
230d1f0d
MC
11371 bnxt_free_ctx_mem(bp);
11372 kfree(bp->ctx);
11373 bp->ctx = NULL;
11374}
11375
acfb50e4
VV
11376static bool is_bnxt_fw_ok(struct bnxt *bp)
11377{
11378 struct bnxt_fw_health *fw_health = bp->fw_health;
11379 bool no_heartbeat = false, has_reset = false;
11380 u32 val;
11381
11382 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11383 if (val == fw_health->last_fw_heartbeat)
11384 no_heartbeat = true;
11385
11386 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11387 if (val != fw_health->last_fw_reset_cnt)
11388 has_reset = true;
11389
11390 if (!no_heartbeat && has_reset)
11391 return true;
11392
11393 return false;
11394}
11395
d1db9e16
MC
11396/* rtnl_lock is acquired before calling this function */
11397static void bnxt_force_fw_reset(struct bnxt *bp)
11398{
11399 struct bnxt_fw_health *fw_health = bp->fw_health;
11400 u32 wait_dsecs;
11401
11402 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
11403 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11404 return;
11405
11406 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11407 bnxt_fw_reset_close(bp);
11408 wait_dsecs = fw_health->master_func_wait_dsecs;
11409 if (fw_health->master) {
11410 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
11411 wait_dsecs = 0;
11412 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11413 } else {
11414 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
11415 wait_dsecs = fw_health->normal_func_wait_dsecs;
11416 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11417 }
4037eb71
VV
11418
11419 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
d1db9e16
MC
11420 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
11421 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11422}
11423
11424void bnxt_fw_exception(struct bnxt *bp)
11425{
a2b31e27 11426 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
d1db9e16
MC
11427 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11428 bnxt_rtnl_lock_sp(bp);
11429 bnxt_force_fw_reset(bp);
11430 bnxt_rtnl_unlock_sp(bp);
11431}
11432
e72cb7d6
MC
11433/* Returns the number of registered VFs, or 1 if VF configuration is pending, or
11434 * < 0 on error.
11435 */
11436static int bnxt_get_registered_vfs(struct bnxt *bp)
230d1f0d 11437{
e72cb7d6 11438#ifdef CONFIG_BNXT_SRIOV
230d1f0d
MC
11439 int rc;
11440
e72cb7d6
MC
11441 if (!BNXT_PF(bp))
11442 return 0;
11443
11444 rc = bnxt_hwrm_func_qcfg(bp);
11445 if (rc) {
11446 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
11447 return rc;
11448 }
11449 if (bp->pf.registered_vfs)
11450 return bp->pf.registered_vfs;
11451 if (bp->sriov_cfg)
11452 return 1;
11453#endif
11454 return 0;
11455}
11456
11457void bnxt_fw_reset(struct bnxt *bp)
11458{
230d1f0d
MC
11459 bnxt_rtnl_lock_sp(bp);
11460 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11461 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
4037eb71 11462 int n = 0, tmo;
e72cb7d6 11463
230d1f0d 11464 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
e72cb7d6
MC
11465 if (bp->pf.active_vfs &&
11466 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11467 n = bnxt_get_registered_vfs(bp);
11468 if (n < 0) {
11469 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11470 n);
11471 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11472 dev_close(bp->dev);
11473 goto fw_reset_exit;
11474 } else if (n > 0) {
11475 u16 vf_tmo_dsecs = n * 10;
11476
11477 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11478 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11479 bp->fw_reset_state =
11480 BNXT_FW_RESET_STATE_POLL_VF;
11481 bnxt_queue_fw_reset_work(bp, HZ / 10);
11482 goto fw_reset_exit;
230d1f0d
MC
11483 }
11484 bnxt_fw_reset_close(bp);
4037eb71
VV
11485 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11486 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11487 tmo = HZ / 10;
11488 } else {
11489 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11490 tmo = bp->fw_reset_min_dsecs * HZ / 10;
11491 }
11492 bnxt_queue_fw_reset_work(bp, tmo);
230d1f0d
MC
11493 }
11494fw_reset_exit:
11495 bnxt_rtnl_unlock_sp(bp);
11496}
11497
ffd77621
MC
11498static void bnxt_chk_missed_irq(struct bnxt *bp)
11499{
11500 int i;
11501
11502 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11503 return;
11504
11505 for (i = 0; i < bp->cp_nr_rings; i++) {
11506 struct bnxt_napi *bnapi = bp->bnapi[i];
11507 struct bnxt_cp_ring_info *cpr;
11508 u32 fw_ring_id;
11509 int j;
11510
11511 if (!bnapi)
11512 continue;
11513
11514 cpr = &bnapi->cp_ring;
11515 for (j = 0; j < 2; j++) {
11516 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
11517 u32 val[2];
11518
11519 if (!cpr2 || cpr2->has_more_work ||
11520 !bnxt_has_work(bp, cpr2))
11521 continue;
11522
11523 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
11524 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
11525 continue;
11526 }
11527 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
11528 bnxt_dbg_hwrm_ring_info_get(bp,
11529 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
11530 fw_ring_id, &val[0], &val[1]);
9d8b5f05 11531 cpr->sw_stats.cmn.missed_irqs++;
ffd77621
MC
11532 }
11533 }
11534}
11535
c0c050c5
MC
11536static void bnxt_cfg_ntp_filters(struct bnxt *);
11537
8119e49b
MC
11538static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
11539{
11540 struct bnxt_link_info *link_info = &bp->link_info;
11541
11542 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
11543 link_info->autoneg = BNXT_AUTONEG_SPEED;
11544 if (bp->hwrm_spec_code >= 0x10201) {
11545 if (link_info->auto_pause_setting &
11546 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
11547 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11548 } else {
11549 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11550 }
11551 link_info->advertising = link_info->auto_link_speeds;
d058426e 11552 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
8119e49b
MC
11553 } else {
11554 link_info->req_link_speed = link_info->force_link_speed;
d058426e
EP
11555 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
11556 if (link_info->force_pam4_link_speed) {
11557 link_info->req_link_speed =
11558 link_info->force_pam4_link_speed;
11559 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
11560 }
8119e49b
MC
11561 link_info->req_duplex = link_info->duplex_setting;
11562 }
11563 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
11564 link_info->req_flow_ctrl =
11565 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
11566 else
11567 link_info->req_flow_ctrl = link_info->force_pause_setting;
11568}
11569
df97b34d
MC
11570static void bnxt_fw_echo_reply(struct bnxt *bp)
11571{
11572 struct bnxt_fw_health *fw_health = bp->fw_health;
11573 struct hwrm_func_echo_response_input req = {0};
11574
11575 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_ECHO_RESPONSE, -1, -1);
11576 req.event_data1 = cpu_to_le32(fw_health->echo_req_data1);
11577 req.event_data2 = cpu_to_le32(fw_health->echo_req_data2);
11578 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11579}
11580
c0c050c5
MC
11581static void bnxt_sp_task(struct work_struct *work)
11582{
11583 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
c0c050c5 11584
4cebdcec
MC
11585 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11586 smp_mb__after_atomic();
11587 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11588 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5 11589 return;
4cebdcec 11590 }
c0c050c5
MC
11591
11592 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
11593 bnxt_cfg_rx_mode(bp);
11594
11595 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
11596 bnxt_cfg_ntp_filters(bp);
c0c050c5
MC
11597 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
11598 bnxt_hwrm_exec_fwd_req(bp);
00db3cba 11599 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
531d1d26
MC
11600 bnxt_hwrm_port_qstats(bp, 0);
11601 bnxt_hwrm_port_qstats_ext(bp, 0);
fea6b333 11602 bnxt_accumulate_all_stats(bp);
00db3cba 11603 }
3bdf56c4 11604
0eaa24b9 11605 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
e2dc9b6e 11606 int rc;
0eaa24b9 11607
e2dc9b6e 11608 mutex_lock(&bp->link_lock);
0eaa24b9
MC
11609 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
11610 &bp->sp_event))
11611 bnxt_hwrm_phy_qcaps(bp);
11612
e2dc9b6e 11613 rc = bnxt_update_link(bp, true);
0eaa24b9
MC
11614 if (rc)
11615 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
11616 rc);
ca0c7538
VV
11617
11618 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
11619 &bp->sp_event))
11620 bnxt_init_ethtool_link_settings(bp);
11621 mutex_unlock(&bp->link_lock);
0eaa24b9 11622 }
a1ef4a79
MC
11623 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
11624 int rc;
11625
11626 mutex_lock(&bp->link_lock);
11627 rc = bnxt_update_phy_setting(bp);
11628 mutex_unlock(&bp->link_lock);
11629 if (rc) {
11630 netdev_warn(bp->dev, "update phy settings retry failed\n");
11631 } else {
11632 bp->link_info.phy_retry = false;
11633 netdev_info(bp->dev, "update phy settings retry succeeded\n");
11634 }
11635 }
90c694bb 11636 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
e2dc9b6e
MC
11637 mutex_lock(&bp->link_lock);
11638 bnxt_get_port_module_status(bp);
11639 mutex_unlock(&bp->link_lock);
90c694bb 11640 }
5a84acbe
SP
11641
11642 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
11643 bnxt_tc_flow_stats_work(bp);
11644
ffd77621
MC
11645 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
11646 bnxt_chk_missed_irq(bp);
11647
df97b34d
MC
11648 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
11649 bnxt_fw_echo_reply(bp);
11650
e2dc9b6e
MC
11651 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
11652 * must be the last functions to be called before exiting.
11653 */
6988bd92
MC
11654 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
11655 bnxt_reset(bp, false);
4cebdcec 11656
fc0f1929
MC
11657 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
11658 bnxt_reset(bp, true);
11659
8fbf58e1
MC
11660 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
11661 bnxt_rx_ring_reset(bp);
11662
657a33c8
VV
11663 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
11664 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
11665
acfb50e4
VV
11666 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
11667 if (!is_bnxt_fw_ok(bp))
11668 bnxt_devlink_health_report(bp,
11669 BNXT_FW_EXCEPTION_SP_EVENT);
11670 }
11671
4cebdcec
MC
11672 smp_mb__before_atomic();
11673 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5
MC
11674}
11675
d1e7925e 11676/* Under rtnl_lock */
98fdbe73
MC
11677int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
11678 int tx_xdp)
d1e7925e
MC
11679{
11680 int max_rx, max_tx, tx_sets = 1;
780baad4 11681 int tx_rings_needed, stats;
8f23d638 11682 int rx_rings = rx;
6fc2ffdf 11683 int cp, vnics, rc;
d1e7925e 11684
d1e7925e
MC
11685 if (tcs)
11686 tx_sets = tcs;
11687
11688 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
11689 if (rc)
11690 return rc;
11691
11692 if (max_rx < rx)
11693 return -ENOMEM;
11694
5f449249 11695 tx_rings_needed = tx * tx_sets + tx_xdp;
d1e7925e
MC
11696 if (max_tx < tx_rings_needed)
11697 return -ENOMEM;
11698
6fc2ffdf 11699 vnics = 1;
9b3d15e6 11700 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
6fc2ffdf
EW
11701 vnics += rx_rings;
11702
8f23d638
MC
11703 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11704 rx_rings <<= 1;
11705 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
780baad4
VV
11706 stats = cp;
11707 if (BNXT_NEW_RM(bp)) {
11c3ec7b 11708 cp += bnxt_get_ulp_msix_num(bp);
780baad4
VV
11709 stats += bnxt_get_ulp_stat_ctxs(bp);
11710 }
6fc2ffdf 11711 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
780baad4 11712 stats, vnics);
d1e7925e
MC
11713}
11714
17086399
SP
11715static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
11716{
11717 if (bp->bar2) {
11718 pci_iounmap(pdev, bp->bar2);
11719 bp->bar2 = NULL;
11720 }
11721
11722 if (bp->bar1) {
11723 pci_iounmap(pdev, bp->bar1);
11724 bp->bar1 = NULL;
11725 }
11726
11727 if (bp->bar0) {
11728 pci_iounmap(pdev, bp->bar0);
11729 bp->bar0 = NULL;
11730 }
11731}
11732
11733static void bnxt_cleanup_pci(struct bnxt *bp)
11734{
11735 bnxt_unmap_bars(bp, bp->pdev);
11736 pci_release_regions(bp->pdev);
f6824308
VV
11737 if (pci_is_enabled(bp->pdev))
11738 pci_disable_device(bp->pdev);
17086399
SP
11739}
11740
18775aa8
MC
11741static void bnxt_init_dflt_coal(struct bnxt *bp)
11742{
11743 struct bnxt_coal *coal;
11744
11745 /* Tick values in micro seconds.
11746 * 1 coal_buf x bufs_per_record = 1 completion record.
11747 */
11748 coal = &bp->rx_coal;
0c2ff8d7 11749 coal->coal_ticks = 10;
18775aa8
MC
11750 coal->coal_bufs = 30;
11751 coal->coal_ticks_irq = 1;
11752 coal->coal_bufs_irq = 2;
05abe4dd 11753 coal->idle_thresh = 50;
18775aa8
MC
11754 coal->bufs_per_record = 2;
11755 coal->budget = 64; /* NAPI budget */
11756
11757 coal = &bp->tx_coal;
11758 coal->coal_ticks = 28;
11759 coal->coal_bufs = 30;
11760 coal->coal_ticks_irq = 2;
11761 coal->coal_bufs_irq = 2;
11762 coal->bufs_per_record = 1;
11763
11764 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
11765}
11766
7c380918
MC
11767static int bnxt_fw_init_one_p1(struct bnxt *bp)
11768{
11769 int rc;
11770
11771 bp->fw_cap = 0;
11772 rc = bnxt_hwrm_ver_get(bp);
ba02629f
EP
11773 bnxt_try_map_fw_health_reg(bp);
11774 if (rc) {
b187e4ba
EP
11775 rc = bnxt_try_recover_fw(bp);
11776 if (rc)
11777 return rc;
11778 rc = bnxt_hwrm_ver_get(bp);
87f7ab8d
EP
11779 if (rc)
11780 return rc;
ba02629f 11781 }
7c380918
MC
11782
11783 if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
11784 rc = bnxt_alloc_kong_hwrm_resources(bp);
11785 if (rc)
11786 bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
11787 }
11788
11789 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
11790 bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
11791 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
11792 if (rc)
11793 return rc;
11794 }
4933f675
VV
11795 bnxt_nvm_cfg_ver_get(bp);
11796
7c380918
MC
11797 rc = bnxt_hwrm_func_reset(bp);
11798 if (rc)
11799 return -ENODEV;
11800
11801 bnxt_hwrm_fw_set_time(bp);
11802 return 0;
11803}
11804
11805static int bnxt_fw_init_one_p2(struct bnxt *bp)
11806{
11807 int rc;
11808
11809 /* Get the MAX capabilities for this function */
11810 rc = bnxt_hwrm_func_qcaps(bp);
11811 if (rc) {
11812 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
11813 rc);
11814 return -ENODEV;
11815 }
11816
11817 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
11818 if (rc)
11819 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
11820 rc);
11821
3e9ec2bb
EP
11822 if (bnxt_alloc_fw_health(bp)) {
11823 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
11824 } else {
11825 rc = bnxt_hwrm_error_recovery_qcfg(bp);
11826 if (rc)
11827 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
11828 rc);
11829 }
07f83d72 11830
2e882468 11831 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
7c380918
MC
11832 if (rc)
11833 return -ENODEV;
11834
11835 bnxt_hwrm_func_qcfg(bp);
11836 bnxt_hwrm_vnic_qcaps(bp);
11837 bnxt_hwrm_port_led_qcaps(bp);
11838 bnxt_ethtool_init(bp);
11839 bnxt_dcb_init(bp);
11840 return 0;
11841}
11842
ba642ab7
MC
11843static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
11844{
11845 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
11846 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
11847 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
11848 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
11849 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
c66c06c5 11850 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
ba642ab7
MC
11851 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
11852 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
11853 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
11854 }
11855}
11856
11857static void bnxt_set_dflt_rfs(struct bnxt *bp)
11858{
11859 struct net_device *dev = bp->dev;
11860
11861 dev->hw_features &= ~NETIF_F_NTUPLE;
11862 dev->features &= ~NETIF_F_NTUPLE;
11863 bp->flags &= ~BNXT_FLAG_RFS;
11864 if (bnxt_rfs_supported(bp)) {
11865 dev->hw_features |= NETIF_F_NTUPLE;
11866 if (bnxt_rfs_capable(bp)) {
11867 bp->flags |= BNXT_FLAG_RFS;
11868 dev->features |= NETIF_F_NTUPLE;
11869 }
11870 }
11871}
11872
11873static void bnxt_fw_init_one_p3(struct bnxt *bp)
11874{
11875 struct pci_dev *pdev = bp->pdev;
11876
11877 bnxt_set_dflt_rss_hash_type(bp);
11878 bnxt_set_dflt_rfs(bp);
11879
11880 bnxt_get_wol_settings(bp);
11881 if (bp->flags & BNXT_FLAG_WOL_CAP)
11882 device_set_wakeup_enable(&pdev->dev, bp->wol);
11883 else
11884 device_set_wakeup_capable(&pdev->dev, false);
11885
11886 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
11887 bnxt_hwrm_coal_params_qcaps(bp);
11888}
11889
0afd6a4e
MC
11890static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
11891
ec5d31e3
MC
11892static int bnxt_fw_init_one(struct bnxt *bp)
11893{
11894 int rc;
11895
11896 rc = bnxt_fw_init_one_p1(bp);
11897 if (rc) {
11898 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
11899 return rc;
11900 }
11901 rc = bnxt_fw_init_one_p2(bp);
11902 if (rc) {
11903 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
11904 return rc;
11905 }
0afd6a4e
MC
11906 rc = bnxt_probe_phy(bp, false);
11907 if (rc)
11908 return rc;
ec5d31e3
MC
11909 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
11910 if (rc)
11911 return rc;
937f188c
VV
11912
11913 /* In case fw capabilities have changed, destroy the unneeded
11914 * reporters and create newly capable ones.
11915 */
11916 bnxt_dl_fw_reporters_destroy(bp, false);
11917 bnxt_dl_fw_reporters_create(bp);
ec5d31e3
MC
11918 bnxt_fw_init_one_p3(bp);
11919 return 0;
11920}
11921
cbb51067
MC
11922static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
11923{
11924 struct bnxt_fw_health *fw_health = bp->fw_health;
11925 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
11926 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
11927 u32 reg_type, reg_off, delay_msecs;
11928
11929 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
11930 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
11931 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
11932 switch (reg_type) {
11933 case BNXT_FW_HEALTH_REG_TYPE_CFG:
11934 pci_write_config_dword(bp->pdev, reg_off, val);
11935 break;
11936 case BNXT_FW_HEALTH_REG_TYPE_GRC:
11937 writel(reg_off & BNXT_GRC_BASE_MASK,
11938 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
11939 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
df561f66 11940 fallthrough;
cbb51067
MC
11941 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
11942 writel(val, bp->bar0 + reg_off);
11943 break;
11944 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
11945 writel(val, bp->bar1 + reg_off);
11946 break;
11947 }
11948 if (delay_msecs) {
11949 pci_read_config_dword(bp->pdev, 0, &val);
11950 msleep(delay_msecs);
11951 }
11952}
11953
11954static void bnxt_reset_all(struct bnxt *bp)
11955{
11956 struct bnxt_fw_health *fw_health = bp->fw_health;
e07ab202
VV
11957 int i, rc;
11958
11959 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
87f7ab8d 11960 bnxt_fw_reset_via_optee(bp);
e07ab202 11961 bp->fw_reset_timestamp = jiffies;
e07ab202
VV
11962 return;
11963 }
cbb51067
MC
11964
11965 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
11966 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
11967 bnxt_fw_reset_writel(bp, i);
11968 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
11969 struct hwrm_fw_reset_input req = {0};
cbb51067
MC
11970
11971 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
11972 req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
11973 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
11974 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
11975 req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
11976 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
a2f3835c 11977 if (rc != -ENODEV)
cbb51067
MC
11978 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
11979 }
11980 bp->fw_reset_timestamp = jiffies;
11981}
11982
339eeb4b
MC
11983static bool bnxt_fw_reset_timeout(struct bnxt *bp)
11984{
11985 return time_after(jiffies, bp->fw_reset_timestamp +
11986 (bp->fw_reset_max_dsecs * HZ / 10));
11987}
11988
3958b1da
SK
11989static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
11990{
11991 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11992 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
11993 bnxt_ulp_start(bp, rc);
11994 bnxt_dl_health_status_update(bp, false);
11995 }
11996 bp->fw_reset_state = 0;
11997 dev_close(bp->dev);
11998}
11999
230d1f0d
MC
12000static void bnxt_fw_reset_task(struct work_struct *work)
12001{
12002 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
3958b1da 12003 int rc = 0;
230d1f0d
MC
12004
12005 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12006 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
12007 return;
12008 }
12009
12010 switch (bp->fw_reset_state) {
e72cb7d6
MC
12011 case BNXT_FW_RESET_STATE_POLL_VF: {
12012 int n = bnxt_get_registered_vfs(bp);
4037eb71 12013 int tmo;
e72cb7d6
MC
12014
12015 if (n < 0) {
230d1f0d 12016 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
e72cb7d6 12017 n, jiffies_to_msecs(jiffies -
230d1f0d
MC
12018 bp->fw_reset_timestamp));
12019 goto fw_reset_abort;
e72cb7d6 12020 } else if (n > 0) {
339eeb4b 12021 if (bnxt_fw_reset_timeout(bp)) {
230d1f0d
MC
12022 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12023 bp->fw_reset_state = 0;
e72cb7d6
MC
12024 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
12025 n);
230d1f0d
MC
12026 return;
12027 }
12028 bnxt_queue_fw_reset_work(bp, HZ / 10);
12029 return;
12030 }
12031 bp->fw_reset_timestamp = jiffies;
12032 rtnl_lock();
6cd657cb 12033 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
3958b1da 12034 bnxt_fw_reset_abort(bp, rc);
6cd657cb 12035 rtnl_unlock();
3958b1da 12036 return;
6cd657cb 12037 }
230d1f0d 12038 bnxt_fw_reset_close(bp);
4037eb71
VV
12039 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12040 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
12041 tmo = HZ / 10;
12042 } else {
12043 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12044 tmo = bp->fw_reset_min_dsecs * HZ / 10;
12045 }
230d1f0d 12046 rtnl_unlock();
4037eb71 12047 bnxt_queue_fw_reset_work(bp, tmo);
230d1f0d 12048 return;
e72cb7d6 12049 }
4037eb71
VV
12050 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
12051 u32 val;
12052
12053 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12054 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
339eeb4b 12055 !bnxt_fw_reset_timeout(bp)) {
4037eb71
VV
12056 bnxt_queue_fw_reset_work(bp, HZ / 5);
12057 return;
12058 }
12059
12060 if (!bp->fw_health->master) {
12061 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
12062
12063 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12064 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
12065 return;
12066 }
12067 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
12068 }
df561f66 12069 fallthrough;
c6a9e7aa 12070 case BNXT_FW_RESET_STATE_RESET_FW:
cbb51067
MC
12071 bnxt_reset_all(bp);
12072 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
c6a9e7aa 12073 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
cbb51067 12074 return;
230d1f0d 12075 case BNXT_FW_RESET_STATE_ENABLE_DEV:
43a440c4 12076 bnxt_inv_fw_health_reg(bp);
bae8a003
VV
12077 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
12078 !bp->fw_reset_min_dsecs) {
12079 u16 val;
12080
12081 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
12082 if (val == 0xffff) {
12083 if (bnxt_fw_reset_timeout(bp)) {
12084 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
3958b1da 12085 rc = -ETIMEDOUT;
bae8a003 12086 goto fw_reset_abort;
dab62e7c 12087 }
bae8a003
VV
12088 bnxt_queue_fw_reset_work(bp, HZ / 1000);
12089 return;
dab62e7c 12090 }
d1db9e16 12091 }
b4fff207 12092 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
230d1f0d
MC
12093 if (pci_enable_device(bp->pdev)) {
12094 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
3958b1da 12095 rc = -ENODEV;
230d1f0d
MC
12096 goto fw_reset_abort;
12097 }
12098 pci_set_master(bp->pdev);
12099 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
df561f66 12100 fallthrough;
230d1f0d
MC
12101 case BNXT_FW_RESET_STATE_POLL_FW:
12102 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
12103 rc = __bnxt_hwrm_ver_get(bp, true);
12104 if (rc) {
339eeb4b 12105 if (bnxt_fw_reset_timeout(bp)) {
230d1f0d 12106 netdev_err(bp->dev, "Firmware reset aborted\n");
fc8864e0 12107 goto fw_reset_abort_status;
230d1f0d
MC
12108 }
12109 bnxt_queue_fw_reset_work(bp, HZ / 5);
12110 return;
12111 }
12112 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
12113 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
df561f66 12114 fallthrough;
230d1f0d
MC
12115 case BNXT_FW_RESET_STATE_OPENING:
12116 while (!rtnl_trylock()) {
12117 bnxt_queue_fw_reset_work(bp, HZ / 10);
12118 return;
12119 }
12120 rc = bnxt_open(bp->dev);
12121 if (rc) {
3958b1da
SK
12122 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
12123 bnxt_fw_reset_abort(bp, rc);
12124 rtnl_unlock();
12125 return;
230d1f0d 12126 }
230d1f0d
MC
12127
12128 bp->fw_reset_state = 0;
12129 /* Make sure fw_reset_state is 0 before clearing the flag */
12130 smp_mb__before_atomic();
12131 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
f3a6d206 12132 bnxt_ulp_start(bp, rc);
12de2ead
MC
12133 if (!rc)
12134 bnxt_reenable_sriov(bp);
ac797ced
SB
12135 bnxt_vf_reps_alloc(bp);
12136 bnxt_vf_reps_open(bp);
737d7a6c 12137 bnxt_dl_health_recovery_done(bp);
e4e38237 12138 bnxt_dl_health_status_update(bp, true);
f3a6d206 12139 rtnl_unlock();
230d1f0d
MC
12140 break;
12141 }
12142 return;
12143
fc8864e0
MC
12144fw_reset_abort_status:
12145 if (bp->fw_health->status_reliable ||
12146 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
12147 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12148
12149 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
12150 }
230d1f0d 12151fw_reset_abort:
230d1f0d 12152 rtnl_lock();
3958b1da 12153 bnxt_fw_reset_abort(bp, rc);
230d1f0d
MC
12154 rtnl_unlock();
12155}
12156
c0c050c5
MC
12157static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
12158{
12159 int rc;
12160 struct bnxt *bp = netdev_priv(dev);
12161
12162 SET_NETDEV_DEV(dev, &pdev->dev);
12163
12164 /* enable device (incl. PCI PM wakeup), and bus-mastering */
12165 rc = pci_enable_device(pdev);
12166 if (rc) {
12167 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
12168 goto init_err;
12169 }
12170
12171 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12172 dev_err(&pdev->dev,
12173 "Cannot find PCI device base address, aborting\n");
12174 rc = -ENODEV;
12175 goto init_err_disable;
12176 }
12177
12178 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12179 if (rc) {
12180 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
12181 goto init_err_disable;
12182 }
12183
12184 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
12185 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12186 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
3383176e 12187 rc = -EIO;
c54bc3ce 12188 goto init_err_release;
c0c050c5
MC
12189 }
12190
12191 pci_set_master(pdev);
12192
12193 bp->dev = dev;
12194 bp->pdev = pdev;
12195
8ae24738
MC
12196 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
12197 * determines the BAR size.
12198 */
c0c050c5
MC
12199 bp->bar0 = pci_ioremap_bar(pdev, 0);
12200 if (!bp->bar0) {
12201 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
12202 rc = -ENOMEM;
12203 goto init_err_release;
12204 }
12205
c0c050c5
MC
12206 bp->bar2 = pci_ioremap_bar(pdev, 4);
12207 if (!bp->bar2) {
12208 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
12209 rc = -ENOMEM;
12210 goto init_err_release;
12211 }
12212
6316ea6d
SB
12213 pci_enable_pcie_error_reporting(pdev);
12214
c0c050c5 12215 INIT_WORK(&bp->sp_task, bnxt_sp_task);
230d1f0d 12216 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
c0c050c5
MC
12217
12218 spin_lock_init(&bp->ntp_fltr_lock);
697197e5
MC
12219#if BITS_PER_LONG == 32
12220 spin_lock_init(&bp->db_lock);
12221#endif
c0c050c5
MC
12222
12223 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
12224 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
12225
18775aa8 12226 bnxt_init_dflt_coal(bp);
51f30785 12227
e99e88a9 12228 timer_setup(&bp->timer, bnxt_timer, 0);
c0c050c5
MC
12229 bp->current_interval = BNXT_TIMER_INTERVAL;
12230
442a35a5
JK
12231 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
12232 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
12233
caefe526 12234 clear_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
12235 return 0;
12236
12237init_err_release:
17086399 12238 bnxt_unmap_bars(bp, pdev);
c0c050c5
MC
12239 pci_release_regions(pdev);
12240
12241init_err_disable:
12242 pci_disable_device(pdev);
12243
12244init_err:
12245 return rc;
12246}
12247
12248/* rtnl_lock held */
12249static int bnxt_change_mac_addr(struct net_device *dev, void *p)
12250{
12251 struct sockaddr *addr = p;
1fc2cfd0
JH
12252 struct bnxt *bp = netdev_priv(dev);
12253 int rc = 0;
c0c050c5
MC
12254
12255 if (!is_valid_ether_addr(addr->sa_data))
12256 return -EADDRNOTAVAIL;
12257
c1a7bdff
MC
12258 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
12259 return 0;
12260
28ea334b 12261 rc = bnxt_approve_mac(bp, addr->sa_data, true);
84c33dd3
MC
12262 if (rc)
12263 return rc;
bdd4347b 12264
c0c050c5 12265 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1fc2cfd0
JH
12266 if (netif_running(dev)) {
12267 bnxt_close_nic(bp, false, false);
12268 rc = bnxt_open_nic(bp, false, false);
12269 }
c0c050c5 12270
1fc2cfd0 12271 return rc;
c0c050c5
MC
12272}
12273
12274/* rtnl_lock held */
12275static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
12276{
12277 struct bnxt *bp = netdev_priv(dev);
12278
c0c050c5 12279 if (netif_running(dev))
a9b952d2 12280 bnxt_close_nic(bp, true, false);
c0c050c5
MC
12281
12282 dev->mtu = new_mtu;
12283 bnxt_set_ring_params(bp);
12284
12285 if (netif_running(dev))
a9b952d2 12286 return bnxt_open_nic(bp, true, false);
c0c050c5
MC
12287
12288 return 0;
12289}
12290
c5e3deb8 12291int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
c0c050c5
MC
12292{
12293 struct bnxt *bp = netdev_priv(dev);
3ffb6a39 12294 bool sh = false;
d1e7925e 12295 int rc;
16e5cc64 12296
c0c050c5 12297 if (tc > bp->max_tc) {
b451c8b6 12298 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
c0c050c5
MC
12299 tc, bp->max_tc);
12300 return -EINVAL;
12301 }
12302
12303 if (netdev_get_num_tc(dev) == tc)
12304 return 0;
12305
3ffb6a39
MC
12306 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
12307 sh = true;
12308
98fdbe73
MC
12309 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
12310 sh, tc, bp->tx_nr_rings_xdp);
d1e7925e
MC
12311 if (rc)
12312 return rc;
c0c050c5
MC
12313
12314 /* Needs to close the device and do hw resource re-allocations */
12315 if (netif_running(bp->dev))
12316 bnxt_close_nic(bp, true, false);
12317
12318 if (tc) {
12319 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
12320 netdev_set_num_tc(dev, tc);
12321 } else {
12322 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12323 netdev_reset_tc(dev);
12324 }
87e9b377 12325 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
3ffb6a39
MC
12326 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
12327 bp->tx_nr_rings + bp->rx_nr_rings;
c0c050c5
MC
12328
12329 if (netif_running(bp->dev))
12330 return bnxt_open_nic(bp, true, false);
12331
12332 return 0;
12333}
12334
9e0fd15d
JP
12335static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
12336 void *cb_priv)
c5e3deb8 12337{
9e0fd15d 12338 struct bnxt *bp = cb_priv;
de4784ca 12339
312324f1
JK
12340 if (!bnxt_tc_flower_enabled(bp) ||
12341 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
38cf0426 12342 return -EOPNOTSUPP;
c5e3deb8 12343
9e0fd15d
JP
12344 switch (type) {
12345 case TC_SETUP_CLSFLOWER:
12346 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
12347 default:
12348 return -EOPNOTSUPP;
12349 }
12350}
12351
627c89d0 12352LIST_HEAD(bnxt_block_cb_list);
955bcb6e 12353
2ae7408f
SP
12354static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
12355 void *type_data)
12356{
4e95bc26
PNA
12357 struct bnxt *bp = netdev_priv(dev);
12358
2ae7408f 12359 switch (type) {
9e0fd15d 12360 case TC_SETUP_BLOCK:
955bcb6e
PNA
12361 return flow_block_cb_setup_simple(type_data,
12362 &bnxt_block_cb_list,
4e95bc26
PNA
12363 bnxt_setup_tc_block_cb,
12364 bp, bp, true);
575ed7d3 12365 case TC_SETUP_QDISC_MQPRIO: {
2ae7408f
SP
12366 struct tc_mqprio_qopt *mqprio = type_data;
12367
12368 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
56f36acd 12369
2ae7408f
SP
12370 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
12371 }
12372 default:
12373 return -EOPNOTSUPP;
12374 }
c5e3deb8
MC
12375}
12376
c0c050c5
MC
12377#ifdef CONFIG_RFS_ACCEL
12378static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
12379 struct bnxt_ntuple_filter *f2)
12380{
12381 struct flow_keys *keys1 = &f1->fkeys;
12382 struct flow_keys *keys2 = &f2->fkeys;
12383
6fc7caa8
MC
12384 if (keys1->basic.n_proto != keys2->basic.n_proto ||
12385 keys1->basic.ip_proto != keys2->basic.ip_proto)
12386 return false;
12387
12388 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
12389 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
12390 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
12391 return false;
12392 } else {
12393 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
12394 sizeof(keys1->addrs.v6addrs.src)) ||
12395 memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
12396 sizeof(keys1->addrs.v6addrs.dst)))
12397 return false;
12398 }
12399
12400 if (keys1->ports.ports == keys2->ports.ports &&
61aad724 12401 keys1->control.flags == keys2->control.flags &&
a54c4d74
MC
12402 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
12403 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
c0c050c5
MC
12404 return true;
12405
12406 return false;
12407}
12408
12409static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
12410 u16 rxq_index, u32 flow_id)
12411{
12412 struct bnxt *bp = netdev_priv(dev);
12413 struct bnxt_ntuple_filter *fltr, *new_fltr;
12414 struct flow_keys *fkeys;
12415 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
a54c4d74 12416 int rc = 0, idx, bit_id, l2_idx = 0;
c0c050c5 12417 struct hlist_head *head;
f47d0e19 12418 u32 flags;
c0c050c5 12419
a54c4d74
MC
12420 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
12421 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12422 int off = 0, j;
12423
12424 netif_addr_lock_bh(dev);
12425 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
12426 if (ether_addr_equal(eth->h_dest,
12427 vnic->uc_list + off)) {
12428 l2_idx = j + 1;
12429 break;
12430 }
12431 }
12432 netif_addr_unlock_bh(dev);
12433 if (!l2_idx)
12434 return -EINVAL;
12435 }
c0c050c5
MC
12436 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
12437 if (!new_fltr)
12438 return -ENOMEM;
12439
12440 fkeys = &new_fltr->fkeys;
12441 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
12442 rc = -EPROTONOSUPPORT;
12443 goto err_free;
12444 }
12445
dda0e746
MC
12446 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
12447 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
c0c050c5
MC
12448 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
12449 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
12450 rc = -EPROTONOSUPPORT;
12451 goto err_free;
12452 }
dda0e746
MC
12453 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
12454 bp->hwrm_spec_code < 0x10601) {
12455 rc = -EPROTONOSUPPORT;
12456 goto err_free;
12457 }
f47d0e19
MC
12458 flags = fkeys->control.flags;
12459 if (((flags & FLOW_DIS_ENCAPSULATION) &&
12460 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
61aad724
MC
12461 rc = -EPROTONOSUPPORT;
12462 goto err_free;
12463 }
c0c050c5 12464
a54c4d74 12465 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
c0c050c5
MC
12466 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
12467
12468 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
12469 head = &bp->ntp_fltr_hash_tbl[idx];
12470 rcu_read_lock();
12471 hlist_for_each_entry_rcu(fltr, head, hash) {
12472 if (bnxt_fltr_match(fltr, new_fltr)) {
12473 rcu_read_unlock();
12474 rc = 0;
12475 goto err_free;
12476 }
12477 }
12478 rcu_read_unlock();
12479
12480 spin_lock_bh(&bp->ntp_fltr_lock);
84e86b98
MC
12481 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
12482 BNXT_NTP_FLTR_MAX_FLTR, 0);
12483 if (bit_id < 0) {
c0c050c5
MC
12484 spin_unlock_bh(&bp->ntp_fltr_lock);
12485 rc = -ENOMEM;
12486 goto err_free;
12487 }
12488
84e86b98 12489 new_fltr->sw_id = (u16)bit_id;
c0c050c5 12490 new_fltr->flow_id = flow_id;
a54c4d74 12491 new_fltr->l2_fltr_idx = l2_idx;
c0c050c5
MC
12492 new_fltr->rxq = rxq_index;
12493 hlist_add_head_rcu(&new_fltr->hash, head);
12494 bp->ntp_fltr_count++;
12495 spin_unlock_bh(&bp->ntp_fltr_lock);
12496
12497 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
c213eae8 12498 bnxt_queue_sp_work(bp);
c0c050c5
MC
12499
12500 return new_fltr->sw_id;
12501
12502err_free:
12503 kfree(new_fltr);
12504 return rc;
12505}
12506
12507static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12508{
12509 int i;
12510
12511 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
12512 struct hlist_head *head;
12513 struct hlist_node *tmp;
12514 struct bnxt_ntuple_filter *fltr;
12515 int rc;
12516
12517 head = &bp->ntp_fltr_hash_tbl[i];
12518 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
12519 bool del = false;
12520
12521 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
12522 if (rps_may_expire_flow(bp->dev, fltr->rxq,
12523 fltr->flow_id,
12524 fltr->sw_id)) {
12525 bnxt_hwrm_cfa_ntuple_filter_free(bp,
12526 fltr);
12527 del = true;
12528 }
12529 } else {
12530 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
12531 fltr);
12532 if (rc)
12533 del = true;
12534 else
12535 set_bit(BNXT_FLTR_VALID, &fltr->state);
12536 }
12537
12538 if (del) {
12539 spin_lock_bh(&bp->ntp_fltr_lock);
12540 hlist_del_rcu(&fltr->hash);
12541 bp->ntp_fltr_count--;
12542 spin_unlock_bh(&bp->ntp_fltr_lock);
12543 synchronize_rcu();
12544 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
12545 kfree(fltr);
12546 }
12547 }
12548 }
19241368 12549 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
9a005c38 12550 netdev_info(bp->dev, "Receive PF driver unload event!\n");
c0c050c5
MC
12551}
12552
12553#else
12554
12555static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12556{
12557}
12558
12559#endif /* CONFIG_RFS_ACCEL */
12560
442a35a5 12561static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
c0c050c5 12562{
442a35a5
JK
12563 struct bnxt *bp = netdev_priv(netdev);
12564 struct udp_tunnel_info ti;
12565 unsigned int cmd;
c0c050c5 12566
442a35a5 12567 udp_tunnel_nic_get_port(netdev, table, 0, &ti);
1698d600
MC
12568 if (ti.type == UDP_TUNNEL_TYPE_VXLAN) {
12569 bp->vxlan_port = ti.port;
442a35a5 12570 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
1698d600
MC
12571 } else {
12572 bp->nge_port = ti.port;
442a35a5 12573 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
1698d600 12574 }
7cdd5fc3 12575
442a35a5
JK
12576 if (ti.port)
12577 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
ad51b8e9 12578
442a35a5 12579 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
c0c050c5
MC
12580}
12581
442a35a5
JK
12582static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
12583 .sync_table = bnxt_udp_tunnel_sync,
12584 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
12585 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
12586 .tables = {
12587 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
12588 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
12589 },
12590};
c0c050c5 12591
39d8ba2e
MC
12592static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12593 struct net_device *dev, u32 filter_mask,
12594 int nlflags)
12595{
12596 struct bnxt *bp = netdev_priv(dev);
12597
12598 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
12599 nlflags, filter_mask, NULL);
12600}
12601
12602static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
2fd527b7 12603 u16 flags, struct netlink_ext_ack *extack)
39d8ba2e
MC
12604{
12605 struct bnxt *bp = netdev_priv(dev);
12606 struct nlattr *attr, *br_spec;
12607 int rem, rc = 0;
12608
12609 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
12610 return -EOPNOTSUPP;
12611
12612 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12613 if (!br_spec)
12614 return -EINVAL;
12615
12616 nla_for_each_nested(attr, br_spec, rem) {
12617 u16 mode;
12618
12619 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12620 continue;
12621
12622 if (nla_len(attr) < sizeof(mode))
12623 return -EINVAL;
12624
12625 mode = nla_get_u16(attr);
12626 if (mode == bp->br_mode)
12627 break;
12628
12629 rc = bnxt_hwrm_set_br_mode(bp, mode);
12630 if (!rc)
12631 bp->br_mode = mode;
12632 break;
12633 }
12634 return rc;
12635}
12636
52d5254a
FF
12637int bnxt_get_port_parent_id(struct net_device *dev,
12638 struct netdev_phys_item_id *ppid)
c124a62f 12639{
52d5254a
FF
12640 struct bnxt *bp = netdev_priv(dev);
12641
c124a62f
SP
12642 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
12643 return -EOPNOTSUPP;
12644
12645 /* The PF and it's VF-reps only support the switchdev framework */
d061b241 12646 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
c124a62f
SP
12647 return -EOPNOTSUPP;
12648
b014232f
VV
12649 ppid->id_len = sizeof(bp->dsn);
12650 memcpy(ppid->id, bp->dsn, ppid->id_len);
c124a62f 12651
52d5254a 12652 return 0;
c124a62f
SP
12653}
12654
c9c49a65
JP
12655static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
12656{
12657 struct bnxt *bp = netdev_priv(dev);
12658
12659 return &bp->dl_port;
12660}
12661
c0c050c5
MC
12662static const struct net_device_ops bnxt_netdev_ops = {
12663 .ndo_open = bnxt_open,
12664 .ndo_start_xmit = bnxt_start_xmit,
12665 .ndo_stop = bnxt_close,
12666 .ndo_get_stats64 = bnxt_get_stats64,
12667 .ndo_set_rx_mode = bnxt_set_rx_mode,
a7605370 12668 .ndo_eth_ioctl = bnxt_ioctl,
c0c050c5
MC
12669 .ndo_validate_addr = eth_validate_addr,
12670 .ndo_set_mac_address = bnxt_change_mac_addr,
12671 .ndo_change_mtu = bnxt_change_mtu,
12672 .ndo_fix_features = bnxt_fix_features,
12673 .ndo_set_features = bnxt_set_features,
1698d600 12674 .ndo_features_check = bnxt_features_check,
c0c050c5
MC
12675 .ndo_tx_timeout = bnxt_tx_timeout,
12676#ifdef CONFIG_BNXT_SRIOV
12677 .ndo_get_vf_config = bnxt_get_vf_config,
12678 .ndo_set_vf_mac = bnxt_set_vf_mac,
12679 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
12680 .ndo_set_vf_rate = bnxt_set_vf_bw,
12681 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
12682 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
746df139 12683 .ndo_set_vf_trust = bnxt_set_vf_trust,
c0c050c5
MC
12684#endif
12685 .ndo_setup_tc = bnxt_setup_tc,
12686#ifdef CONFIG_RFS_ACCEL
12687 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
12688#endif
f4e63525 12689 .ndo_bpf = bnxt_xdp,
f18c2b77 12690 .ndo_xdp_xmit = bnxt_xdp_xmit,
39d8ba2e
MC
12691 .ndo_bridge_getlink = bnxt_bridge_getlink,
12692 .ndo_bridge_setlink = bnxt_bridge_setlink,
c9c49a65 12693 .ndo_get_devlink_port = bnxt_get_devlink_port,
c0c050c5
MC
12694};
12695
12696static void bnxt_remove_one(struct pci_dev *pdev)
12697{
12698 struct net_device *dev = pci_get_drvdata(pdev);
12699 struct bnxt *bp = netdev_priv(dev);
12700
7e334fc8 12701 if (BNXT_PF(bp))
c0c050c5
MC
12702 bnxt_sriov_disable(bp);
12703
21d6a11e
VV
12704 if (BNXT_PF(bp))
12705 devlink_port_type_clear(&bp->dl_port);
93cb62d9 12706
a521c8a0 12707 bnxt_ptp_clear(bp);
21d6a11e
VV
12708 pci_disable_pcie_error_reporting(pdev);
12709 unregister_netdev(dev);
b16939b5 12710 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
21d6a11e 12711 /* Flush any pending tasks */
631ce27a
VV
12712 cancel_work_sync(&bp->sp_task);
12713 cancel_delayed_work_sync(&bp->fw_reset_task);
b16939b5
VV
12714 bp->sp_event = 0;
12715
7e334fc8 12716 bnxt_dl_fw_reporters_destroy(bp, true);
cda2cab0 12717 bnxt_dl_unregister(bp);
2ae7408f 12718 bnxt_shutdown_tc(bp);
c0c050c5 12719
7809592d 12720 bnxt_clear_int_mode(bp);
be58a0da 12721 bnxt_hwrm_func_drv_unrgtr(bp);
c0c050c5 12722 bnxt_free_hwrm_resources(bp);
e605db80 12723 bnxt_free_hwrm_short_cmd_req(bp);
eb513658 12724 bnxt_ethtool_free(bp);
7df4ae9f 12725 bnxt_dcb_free(bp);
a588e458
MC
12726 kfree(bp->edev);
12727 bp->edev = NULL;
ae5c42f0
MC
12728 kfree(bp->ptp_cfg);
12729 bp->ptp_cfg = NULL;
8280b38e
VV
12730 kfree(bp->fw_health);
12731 bp->fw_health = NULL;
c20dc142 12732 bnxt_cleanup_pci(bp);
98f04cf0
MC
12733 bnxt_free_ctx_mem(bp);
12734 kfree(bp->ctx);
12735 bp->ctx = NULL;
1667cbf6
MC
12736 kfree(bp->rss_indir_tbl);
12737 bp->rss_indir_tbl = NULL;
fd3ab1c7 12738 bnxt_free_port_stats(bp);
c0c050c5 12739 free_netdev(dev);
c0c050c5
MC
12740}
12741
ba642ab7 12742static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
c0c050c5
MC
12743{
12744 int rc = 0;
12745 struct bnxt_link_info *link_info = &bp->link_info;
c0c050c5 12746
b0d28207 12747 bp->phy_flags = 0;
170ce013
MC
12748 rc = bnxt_hwrm_phy_qcaps(bp);
12749 if (rc) {
12750 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
12751 rc);
12752 return rc;
12753 }
dade5e15
MC
12754 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
12755 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
12756 else
12757 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
43a5107d
MC
12758 if (!fw_dflt)
12759 return 0;
12760
c0c050c5
MC
12761 rc = bnxt_update_link(bp, false);
12762 if (rc) {
12763 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
12764 rc);
12765 return rc;
12766 }
12767
93ed8117
MC
12768 /* Older firmware does not have supported_auto_speeds, so assume
12769 * that all supported speeds can be autonegotiated.
12770 */
12771 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
12772 link_info->support_auto_speeds = link_info->support_speeds;
12773
8119e49b 12774 bnxt_init_ethtool_link_settings(bp);
ba642ab7 12775 return 0;
c0c050c5
MC
12776}
12777
12778static int bnxt_get_max_irq(struct pci_dev *pdev)
12779{
12780 u16 ctrl;
12781
12782 if (!pdev->msix_cap)
12783 return 1;
12784
12785 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
12786 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
12787}
12788
6e6c5a57
MC
12789static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12790 int *max_cp)
c0c050c5 12791{
6a4f2947 12792 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
e30fbc33 12793 int max_ring_grps = 0, max_irq;
c0c050c5 12794
6a4f2947
MC
12795 *max_tx = hw_resc->max_tx_rings;
12796 *max_rx = hw_resc->max_rx_rings;
e30fbc33
MC
12797 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
12798 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
12799 bnxt_get_ulp_msix_num(bp),
c027c6b4 12800 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
e30fbc33
MC
12801 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
12802 *max_cp = min_t(int, *max_cp, max_irq);
6a4f2947 12803 max_ring_grps = hw_resc->max_hw_ring_grps;
76595193
PS
12804 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
12805 *max_cp -= 1;
12806 *max_rx -= 2;
12807 }
c0c050c5
MC
12808 if (bp->flags & BNXT_FLAG_AGG_RINGS)
12809 *max_rx >>= 1;
e30fbc33
MC
12810 if (bp->flags & BNXT_FLAG_CHIP_P5) {
12811 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
12812 /* On P5 chips, max_cp output param should be available NQs */
12813 *max_cp = max_irq;
12814 }
b72d4a68 12815 *max_rx = min_t(int, *max_rx, max_ring_grps);
6e6c5a57
MC
12816}
12817
12818int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
12819{
12820 int rx, tx, cp;
12821
12822 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
78f058a4
MC
12823 *max_rx = rx;
12824 *max_tx = tx;
6e6c5a57
MC
12825 if (!rx || !tx || !cp)
12826 return -ENOMEM;
12827
6e6c5a57
MC
12828 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
12829}
12830
e4060d30
MC
12831static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12832 bool shared)
12833{
12834 int rc;
12835
12836 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
bdbd1eb5
MC
12837 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
12838 /* Not enough rings, try disabling agg rings. */
12839 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
12840 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
07f4fde5
MC
12841 if (rc) {
12842 /* set BNXT_FLAG_AGG_RINGS back for consistency */
12843 bp->flags |= BNXT_FLAG_AGG_RINGS;
bdbd1eb5 12844 return rc;
07f4fde5 12845 }
bdbd1eb5 12846 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
1054aee8
MC
12847 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12848 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
bdbd1eb5
MC
12849 bnxt_set_ring_params(bp);
12850 }
e4060d30
MC
12851
12852 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
12853 int max_cp, max_stat, max_irq;
12854
12855 /* Reserve minimum resources for RoCE */
12856 max_cp = bnxt_get_max_func_cp_rings(bp);
12857 max_stat = bnxt_get_max_func_stat_ctxs(bp);
12858 max_irq = bnxt_get_max_func_irqs(bp);
12859 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
12860 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
12861 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
12862 return 0;
12863
12864 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
12865 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
12866 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
12867 max_cp = min_t(int, max_cp, max_irq);
12868 max_cp = min_t(int, max_cp, max_stat);
12869 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
12870 if (rc)
12871 rc = 0;
12872 }
12873 return rc;
12874}
12875
58ea801a
MC
12876/* In initial default shared ring setting, each shared ring must have a
12877 * RX/TX ring pair.
12878 */
12879static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
12880{
12881 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
12882 bp->rx_nr_rings = bp->cp_nr_rings;
12883 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
12884 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12885}
12886
702c221c 12887static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
6e6c5a57
MC
12888{
12889 int dflt_rings, max_rx_rings, max_tx_rings, rc;
6e6c5a57 12890
2773dfb2
MC
12891 if (!bnxt_can_reserve_rings(bp))
12892 return 0;
12893
6e6c5a57
MC
12894 if (sh)
12895 bp->flags |= BNXT_FLAG_SHARED_RINGS;
d629522e 12896 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
1d3ef13d
MC
12897 /* Reduce default rings on multi-port cards so that total default
12898 * rings do not exceed CPU count.
12899 */
12900 if (bp->port_count > 1) {
12901 int max_rings =
12902 max_t(int, num_online_cpus() / bp->port_count, 1);
12903
12904 dflt_rings = min_t(int, dflt_rings, max_rings);
12905 }
e4060d30 12906 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
6e6c5a57
MC
12907 if (rc)
12908 return rc;
12909 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
12910 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
58ea801a
MC
12911 if (sh)
12912 bnxt_trim_dflt_sh_rings(bp);
12913 else
12914 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
12915 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
391be5c2 12916
674f50a5 12917 rc = __bnxt_reserve_rings(bp);
391be5c2
MC
12918 if (rc)
12919 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
58ea801a
MC
12920 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12921 if (sh)
12922 bnxt_trim_dflt_sh_rings(bp);
391be5c2 12923
674f50a5
MC
12924 /* Rings may have been trimmed, re-reserve the trimmed rings. */
12925 if (bnxt_need_reserve_rings(bp)) {
12926 rc = __bnxt_reserve_rings(bp);
12927 if (rc)
12928 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
12929 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12930 }
76595193
PS
12931 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
12932 bp->rx_nr_rings++;
12933 bp->cp_nr_rings++;
12934 }
5d765a5e
VV
12935 if (rc) {
12936 bp->tx_nr_rings = 0;
12937 bp->rx_nr_rings = 0;
12938 }
6e6c5a57 12939 return rc;
c0c050c5
MC
12940}
12941
47558acd
MC
12942static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
12943{
12944 int rc;
12945
12946 if (bp->tx_nr_rings)
12947 return 0;
12948
6b95c3e9
MC
12949 bnxt_ulp_irq_stop(bp);
12950 bnxt_clear_int_mode(bp);
47558acd
MC
12951 rc = bnxt_set_dflt_rings(bp, true);
12952 if (rc) {
12953 netdev_err(bp->dev, "Not enough rings available.\n");
6b95c3e9 12954 goto init_dflt_ring_err;
47558acd
MC
12955 }
12956 rc = bnxt_init_int_mode(bp);
12957 if (rc)
6b95c3e9
MC
12958 goto init_dflt_ring_err;
12959
47558acd
MC
12960 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12961 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
12962 bp->flags |= BNXT_FLAG_RFS;
12963 bp->dev->features |= NETIF_F_NTUPLE;
12964 }
6b95c3e9
MC
12965init_dflt_ring_err:
12966 bnxt_ulp_irq_restart(bp, rc);
12967 return rc;
47558acd
MC
12968}
12969
80fcaf46 12970int bnxt_restore_pf_fw_resources(struct bnxt *bp)
7b08f661 12971{
80fcaf46
MC
12972 int rc;
12973
7b08f661
MC
12974 ASSERT_RTNL();
12975 bnxt_hwrm_func_qcaps(bp);
1a037782
VD
12976
12977 if (netif_running(bp->dev))
12978 __bnxt_close_nic(bp, true, false);
12979
ec86f14e 12980 bnxt_ulp_irq_stop(bp);
80fcaf46
MC
12981 bnxt_clear_int_mode(bp);
12982 rc = bnxt_init_int_mode(bp);
ec86f14e 12983 bnxt_ulp_irq_restart(bp, rc);
1a037782
VD
12984
12985 if (netif_running(bp->dev)) {
12986 if (rc)
12987 dev_close(bp->dev);
12988 else
12989 rc = bnxt_open_nic(bp, true, false);
12990 }
12991
80fcaf46 12992 return rc;
7b08f661
MC
12993}
12994
a22a6ac2
MC
12995static int bnxt_init_mac_addr(struct bnxt *bp)
12996{
12997 int rc = 0;
12998
12999 if (BNXT_PF(bp)) {
13000 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
13001 } else {
13002#ifdef CONFIG_BNXT_SRIOV
13003 struct bnxt_vf_info *vf = &bp->vf;
28ea334b 13004 bool strict_approval = true;
a22a6ac2
MC
13005
13006 if (is_valid_ether_addr(vf->mac_addr)) {
91cdda40 13007 /* overwrite netdev dev_addr with admin VF MAC */
a22a6ac2 13008 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
28ea334b
MC
13009 /* Older PF driver or firmware may not approve this
13010 * correctly.
13011 */
13012 strict_approval = false;
a22a6ac2
MC
13013 } else {
13014 eth_hw_addr_random(bp->dev);
a22a6ac2 13015 }
28ea334b 13016 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
a22a6ac2
MC
13017#endif
13018 }
13019 return rc;
13020}
13021
a0d0fd70
VV
13022#define BNXT_VPD_LEN 512
13023static void bnxt_vpd_read_info(struct bnxt *bp)
13024{
13025 struct pci_dev *pdev = bp->pdev;
492adcf4 13026 int i, len, pos, ro_size, size;
a0d0fd70
VV
13027 ssize_t vpd_size;
13028 u8 *vpd_data;
13029
13030 vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL);
13031 if (!vpd_data)
13032 return;
13033
13034 vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data);
13035 if (vpd_size <= 0) {
13036 netdev_err(bp->dev, "Unable to read VPD\n");
13037 goto exit;
13038 }
13039
4cf0abbc 13040 i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
a0d0fd70
VV
13041 if (i < 0) {
13042 netdev_err(bp->dev, "VPD READ-Only not found\n");
13043 goto exit;
13044 }
13045
13046 ro_size = pci_vpd_lrdt_size(&vpd_data[i]);
13047 i += PCI_VPD_LRDT_TAG_SIZE;
13048 if (i + ro_size > vpd_size)
13049 goto exit;
13050
13051 pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
13052 PCI_VPD_RO_KEYWORD_PARTNO);
13053 if (pos < 0)
13054 goto read_sn;
13055
13056 len = pci_vpd_info_field_size(&vpd_data[pos]);
13057 pos += PCI_VPD_INFO_FLD_HDR_SIZE;
13058 if (len + pos > vpd_size)
13059 goto read_sn;
13060
492adcf4
VV
13061 size = min(len, BNXT_VPD_FLD_LEN - 1);
13062 memcpy(bp->board_partno, &vpd_data[pos], size);
a0d0fd70
VV
13063
13064read_sn:
13065 pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
13066 PCI_VPD_RO_KEYWORD_SERIALNO);
13067 if (pos < 0)
13068 goto exit;
13069
13070 len = pci_vpd_info_field_size(&vpd_data[pos]);
13071 pos += PCI_VPD_INFO_FLD_HDR_SIZE;
13072 if (len + pos > vpd_size)
13073 goto exit;
13074
492adcf4
VV
13075 size = min(len, BNXT_VPD_FLD_LEN - 1);
13076 memcpy(bp->board_serialno, &vpd_data[pos], size);
a0d0fd70
VV
13077exit:
13078 kfree(vpd_data);
13079}
13080
03213a99
JP
13081static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
13082{
13083 struct pci_dev *pdev = bp->pdev;
8d85b75b 13084 u64 qword;
03213a99 13085
8d85b75b
JK
13086 qword = pci_get_dsn(pdev);
13087 if (!qword) {
13088 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
03213a99
JP
13089 return -EOPNOTSUPP;
13090 }
13091
8d85b75b
JK
13092 put_unaligned_le64(qword, dsn);
13093
d061b241 13094 bp->flags |= BNXT_FLAG_DSN_VALID;
03213a99
JP
13095 return 0;
13096}
13097
8ae24738
MC
13098static int bnxt_map_db_bar(struct bnxt *bp)
13099{
13100 if (!bp->db_size)
13101 return -ENODEV;
13102 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
13103 if (!bp->bar1)
13104 return -ENOMEM;
13105 return 0;
13106}
13107
c0c050c5
MC
13108static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
13109{
c0c050c5
MC
13110 struct net_device *dev;
13111 struct bnxt *bp;
6e6c5a57 13112 int rc, max_irqs;
c0c050c5 13113
4e00338a 13114 if (pci_is_bridge(pdev))
fa853dda
PS
13115 return -ENODEV;
13116
8743db4a
VV
13117 /* Clear any pending DMA transactions from crash kernel
13118 * while loading driver in capture kernel.
13119 */
13120 if (is_kdump_kernel()) {
13121 pci_clear_master(pdev);
13122 pcie_flr(pdev);
13123 }
13124
c0c050c5
MC
13125 max_irqs = bnxt_get_max_irq(pdev);
13126 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
13127 if (!dev)
13128 return -ENOMEM;
13129
13130 bp = netdev_priv(dev);
8fb35cd3 13131 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
9c1fabdf 13132 bnxt_set_max_func_irqs(bp, max_irqs);
c0c050c5
MC
13133
13134 if (bnxt_vf_pciid(ent->driver_data))
13135 bp->flags |= BNXT_FLAG_VF;
13136
2bcfa6f6 13137 if (pdev->msix_cap)
c0c050c5 13138 bp->flags |= BNXT_FLAG_MSIX_CAP;
c0c050c5
MC
13139
13140 rc = bnxt_init_board(pdev, dev);
13141 if (rc < 0)
13142 goto init_err_free;
13143
13144 dev->netdev_ops = &bnxt_netdev_ops;
13145 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
13146 dev->ethtool_ops = &bnxt_ethtool_ops;
c0c050c5
MC
13147 pci_set_drvdata(pdev, dev);
13148
3e8060fa
PS
13149 rc = bnxt_alloc_hwrm_resources(bp);
13150 if (rc)
17086399 13151 goto init_err_pci_clean;
3e8060fa
PS
13152
13153 mutex_init(&bp->hwrm_cmd_lock);
ba642ab7 13154 mutex_init(&bp->link_lock);
7c380918
MC
13155
13156 rc = bnxt_fw_init_one_p1(bp);
3e8060fa 13157 if (rc)
17086399 13158 goto init_err_pci_clean;
3e8060fa 13159
3e3c09b0
VV
13160 if (BNXT_PF(bp))
13161 bnxt_vpd_read_info(bp);
13162
9d6b648c 13163 if (BNXT_CHIP_P5(bp)) {
e38287b7 13164 bp->flags |= BNXT_FLAG_CHIP_P5;
9d6b648c
MC
13165 if (BNXT_CHIP_SR2(bp))
13166 bp->flags |= BNXT_FLAG_CHIP_SR2;
13167 }
e38287b7 13168
5fa65524
EP
13169 rc = bnxt_alloc_rss_indir_tbl(bp);
13170 if (rc)
13171 goto init_err_pci_clean;
13172
7c380918 13173 rc = bnxt_fw_init_one_p2(bp);
3c2217a6
MC
13174 if (rc)
13175 goto init_err_pci_clean;
13176
8ae24738
MC
13177 rc = bnxt_map_db_bar(bp);
13178 if (rc) {
13179 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
13180 rc);
13181 goto init_err_pci_clean;
13182 }
13183
c0c050c5
MC
13184 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13185 NETIF_F_TSO | NETIF_F_TSO6 |
13186 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
7e13318d 13187 NETIF_F_GSO_IPXIP4 |
152971ee
AD
13188 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13189 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
3e8060fa
PS
13190 NETIF_F_RXCSUM | NETIF_F_GRO;
13191
e38287b7 13192 if (BNXT_SUPPORTS_TPA(bp))
3e8060fa 13193 dev->hw_features |= NETIF_F_LRO;
c0c050c5 13194
c0c050c5
MC
13195 dev->hw_enc_features =
13196 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13197 NETIF_F_TSO | NETIF_F_TSO6 |
13198 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
152971ee 13199 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
7e13318d 13200 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
442a35a5
JK
13201 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
13202
152971ee
AD
13203 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
13204 NETIF_F_GSO_GRE_CSUM;
c0c050c5 13205 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
1da63ddd
EP
13206 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
13207 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13208 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
13209 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
e38287b7 13210 if (BNXT_SUPPORTS_TPA(bp))
1054aee8 13211 dev->hw_features |= NETIF_F_GRO_HW;
c0c050c5 13212 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
1054aee8
MC
13213 if (dev->features & NETIF_F_GRO_HW)
13214 dev->features &= ~NETIF_F_LRO;
c0c050c5
MC
13215 dev->priv_flags |= IFF_UNICAST_FLT;
13216
13217#ifdef CONFIG_BNXT_SRIOV
13218 init_waitqueue_head(&bp->sriov_cfg_wait);
4ab0c6a8 13219 mutex_init(&bp->sriov_lock);
c0c050c5 13220#endif
e38287b7
MC
13221 if (BNXT_SUPPORTS_TPA(bp)) {
13222 bp->gro_func = bnxt_gro_func_5730x;
67912c36 13223 if (BNXT_CHIP_P4(bp))
e38287b7 13224 bp->gro_func = bnxt_gro_func_5731x;
67912c36
MC
13225 else if (BNXT_CHIP_P5(bp))
13226 bp->gro_func = bnxt_gro_func_5750x;
e38287b7
MC
13227 }
13228 if (!BNXT_CHIP_P4_PLUS(bp))
434c975a 13229 bp->flags |= BNXT_FLAG_DOUBLE_DB;
309369c9 13230
a22a6ac2
MC
13231 rc = bnxt_init_mac_addr(bp);
13232 if (rc) {
13233 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
13234 rc = -EADDRNOTAVAIL;
13235 goto init_err_pci_clean;
13236 }
c0c050c5 13237
2e9217d1
VV
13238 if (BNXT_PF(bp)) {
13239 /* Read the adapter's DSN to use as the eswitch switch_id */
b014232f 13240 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
2e9217d1 13241 }
567b2abe 13242
7eb9bb3a
MC
13243 /* MTU range: 60 - FW defined max */
13244 dev->min_mtu = ETH_ZLEN;
13245 dev->max_mtu = bp->max_mtu;
13246
ba642ab7 13247 rc = bnxt_probe_phy(bp, true);
d5430d31
MC
13248 if (rc)
13249 goto init_err_pci_clean;
13250
c61fb99c 13251 bnxt_set_rx_skb_mode(bp, false);
c0c050c5
MC
13252 bnxt_set_tpa_flags(bp);
13253 bnxt_set_ring_params(bp);
702c221c 13254 rc = bnxt_set_dflt_rings(bp, true);
bdbd1eb5
MC
13255 if (rc) {
13256 netdev_err(bp->dev, "Not enough rings available.\n");
13257 rc = -ENOMEM;
17086399 13258 goto init_err_pci_clean;
bdbd1eb5 13259 }
c0c050c5 13260
ba642ab7 13261 bnxt_fw_init_one_p3(bp);
2bcfa6f6 13262
a196e96b 13263 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
c0c050c5
MC
13264 bp->flags |= BNXT_FLAG_STRIP_VLAN;
13265
7809592d 13266 rc = bnxt_init_int_mode(bp);
c0c050c5 13267 if (rc)
17086399 13268 goto init_err_pci_clean;
c0c050c5 13269
832aed16
MC
13270 /* No TC has been set yet and rings may have been trimmed due to
13271 * limited MSIX, so we re-initialize the TX rings per TC.
13272 */
13273 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13274
c213eae8
MC
13275 if (BNXT_PF(bp)) {
13276 if (!bnxt_pf_wq) {
13277 bnxt_pf_wq =
13278 create_singlethread_workqueue("bnxt_pf_wq");
13279 if (!bnxt_pf_wq) {
13280 dev_err(&pdev->dev, "Unable to create workqueue.\n");
b5f796b6 13281 rc = -ENOMEM;
c213eae8
MC
13282 goto init_err_pci_clean;
13283 }
13284 }
18c7015c
JK
13285 rc = bnxt_init_tc(bp);
13286 if (rc)
13287 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
13288 rc);
c213eae8 13289 }
2ae7408f 13290
190eda1a 13291 bnxt_inv_fw_health_reg(bp);
cda2cab0
VV
13292 bnxt_dl_register(bp);
13293
7809592d
MC
13294 rc = register_netdev(dev);
13295 if (rc)
cda2cab0 13296 goto init_err_cleanup;
7809592d 13297
cda2cab0
VV
13298 if (BNXT_PF(bp))
13299 devlink_port_type_eth_set(&bp->dl_port, bp->dev);
7e334fc8 13300 bnxt_dl_fw_reporters_create(bp);
4ab0c6a8 13301
c0c050c5
MC
13302 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
13303 board_info[ent->driver_data].name,
13304 (long)pci_resource_start(pdev, 0), dev->dev_addr);
af125b75 13305 pcie_print_link_status(pdev);
90c4f788 13306
df3875ec 13307 pci_save_state(pdev);
c0c050c5
MC
13308 return 0;
13309
cda2cab0
VV
13310init_err_cleanup:
13311 bnxt_dl_unregister(bp);
2ae7408f 13312 bnxt_shutdown_tc(bp);
7809592d
MC
13313 bnxt_clear_int_mode(bp);
13314
17086399 13315init_err_pci_clean:
bdb38602 13316 bnxt_hwrm_func_drv_unrgtr(bp);
f9099d61 13317 bnxt_free_hwrm_short_cmd_req(bp);
a2bf74f4 13318 bnxt_free_hwrm_resources(bp);
03400aaa 13319 bnxt_ethtool_free(bp);
a521c8a0 13320 bnxt_ptp_clear(bp);
ae5c42f0
MC
13321 kfree(bp->ptp_cfg);
13322 bp->ptp_cfg = NULL;
07f83d72
MC
13323 kfree(bp->fw_health);
13324 bp->fw_health = NULL;
17086399 13325 bnxt_cleanup_pci(bp);
62bfb932
MC
13326 bnxt_free_ctx_mem(bp);
13327 kfree(bp->ctx);
13328 bp->ctx = NULL;
1667cbf6
MC
13329 kfree(bp->rss_indir_tbl);
13330 bp->rss_indir_tbl = NULL;
c0c050c5
MC
13331
13332init_err_free:
13333 free_netdev(dev);
13334 return rc;
13335}
13336
d196ece7
MC
13337static void bnxt_shutdown(struct pci_dev *pdev)
13338{
13339 struct net_device *dev = pci_get_drvdata(pdev);
13340 struct bnxt *bp;
13341
13342 if (!dev)
13343 return;
13344
13345 rtnl_lock();
13346 bp = netdev_priv(dev);
13347 if (!bp)
13348 goto shutdown_exit;
13349
13350 if (netif_running(dev))
13351 dev_close(dev);
13352
a7f3f939 13353 bnxt_ulp_shutdown(bp);
5567ae4a
VV
13354 bnxt_clear_int_mode(bp);
13355 pci_disable_device(pdev);
a7f3f939 13356
d196ece7 13357 if (system_state == SYSTEM_POWER_OFF) {
d196ece7
MC
13358 pci_wake_from_d3(pdev, bp->wol);
13359 pci_set_power_state(pdev, PCI_D3hot);
13360 }
13361
13362shutdown_exit:
13363 rtnl_unlock();
13364}
13365
f65a2044
MC
13366#ifdef CONFIG_PM_SLEEP
13367static int bnxt_suspend(struct device *device)
13368{
f521eaa9 13369 struct net_device *dev = dev_get_drvdata(device);
f65a2044
MC
13370 struct bnxt *bp = netdev_priv(dev);
13371 int rc = 0;
13372
13373 rtnl_lock();
6a68749d 13374 bnxt_ulp_stop(bp);
f65a2044
MC
13375 if (netif_running(dev)) {
13376 netif_device_detach(dev);
13377 rc = bnxt_close(dev);
13378 }
13379 bnxt_hwrm_func_drv_unrgtr(bp);
ef02af8c 13380 pci_disable_device(bp->pdev);
f9b69d7f
VV
13381 bnxt_free_ctx_mem(bp);
13382 kfree(bp->ctx);
13383 bp->ctx = NULL;
f65a2044
MC
13384 rtnl_unlock();
13385 return rc;
13386}
13387
13388static int bnxt_resume(struct device *device)
13389{
f521eaa9 13390 struct net_device *dev = dev_get_drvdata(device);
f65a2044
MC
13391 struct bnxt *bp = netdev_priv(dev);
13392 int rc = 0;
13393
13394 rtnl_lock();
ef02af8c
MC
13395 rc = pci_enable_device(bp->pdev);
13396 if (rc) {
13397 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
13398 rc);
13399 goto resume_exit;
13400 }
13401 pci_set_master(bp->pdev);
f92335d8 13402 if (bnxt_hwrm_ver_get(bp)) {
f65a2044
MC
13403 rc = -ENODEV;
13404 goto resume_exit;
13405 }
13406 rc = bnxt_hwrm_func_reset(bp);
13407 if (rc) {
13408 rc = -EBUSY;
13409 goto resume_exit;
13410 }
f92335d8 13411
2084ccf6
MC
13412 rc = bnxt_hwrm_func_qcaps(bp);
13413 if (rc)
f9b69d7f 13414 goto resume_exit;
f92335d8
VV
13415
13416 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
13417 rc = -ENODEV;
13418 goto resume_exit;
13419 }
13420
f65a2044
MC
13421 bnxt_get_wol_settings(bp);
13422 if (netif_running(dev)) {
13423 rc = bnxt_open(dev);
13424 if (!rc)
13425 netif_device_attach(dev);
13426 }
13427
13428resume_exit:
6a68749d 13429 bnxt_ulp_start(bp, rc);
59ae2101
MC
13430 if (!rc)
13431 bnxt_reenable_sriov(bp);
f65a2044
MC
13432 rtnl_unlock();
13433 return rc;
13434}
13435
13436static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
13437#define BNXT_PM_OPS (&bnxt_pm_ops)
13438
13439#else
13440
13441#define BNXT_PM_OPS NULL
13442
13443#endif /* CONFIG_PM_SLEEP */
13444
6316ea6d
SB
13445/**
13446 * bnxt_io_error_detected - called when PCI error is detected
13447 * @pdev: Pointer to PCI device
13448 * @state: The current pci connection state
13449 *
13450 * This function is called after a PCI bus error affecting
13451 * this device has been detected.
13452 */
13453static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
13454 pci_channel_state_t state)
13455{
13456 struct net_device *netdev = pci_get_drvdata(pdev);
a588e458 13457 struct bnxt *bp = netdev_priv(netdev);
6316ea6d
SB
13458
13459 netdev_info(netdev, "PCI I/O error detected\n");
13460
13461 rtnl_lock();
13462 netif_device_detach(netdev);
13463
a588e458
MC
13464 bnxt_ulp_stop(bp);
13465
6316ea6d
SB
13466 if (state == pci_channel_io_perm_failure) {
13467 rtnl_unlock();
13468 return PCI_ERS_RESULT_DISCONNECT;
13469 }
13470
f75d9a0a
VV
13471 if (state == pci_channel_io_frozen)
13472 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
13473
6316ea6d
SB
13474 if (netif_running(netdev))
13475 bnxt_close(netdev);
13476
c81cfb62
KA
13477 if (pci_is_enabled(pdev))
13478 pci_disable_device(pdev);
6e2f8388
MC
13479 bnxt_free_ctx_mem(bp);
13480 kfree(bp->ctx);
13481 bp->ctx = NULL;
6316ea6d
SB
13482 rtnl_unlock();
13483
13484 /* Request a slot slot reset. */
13485 return PCI_ERS_RESULT_NEED_RESET;
13486}
13487
13488/**
13489 * bnxt_io_slot_reset - called after the pci bus has been reset.
13490 * @pdev: Pointer to PCI device
13491 *
13492 * Restart the card from scratch, as if from a cold-boot.
13493 * At this point, the card has exprienced a hard reset,
13494 * followed by fixups by BIOS, and has its config space
13495 * set up identically to what it was at cold boot.
13496 */
13497static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
13498{
fb1e6e56 13499 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
6316ea6d
SB
13500 struct net_device *netdev = pci_get_drvdata(pdev);
13501 struct bnxt *bp = netdev_priv(netdev);
f75d9a0a 13502 int err = 0, off;
6316ea6d
SB
13503
13504 netdev_info(bp->dev, "PCI Slot Reset\n");
13505
13506 rtnl_lock();
13507
13508 if (pci_enable_device(pdev)) {
13509 dev_err(&pdev->dev,
13510 "Cannot re-enable PCI device after reset.\n");
13511 } else {
13512 pci_set_master(pdev);
f75d9a0a
VV
13513 /* Upon fatal error, our device internal logic that latches to
13514 * BAR value is getting reset and will restore only upon
13515 * rewritting the BARs.
13516 *
13517 * As pci_restore_state() does not re-write the BARs if the
13518 * value is same as saved value earlier, driver needs to
13519 * write the BARs to 0 to force restore, in case of fatal error.
13520 */
13521 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
13522 &bp->state)) {
13523 for (off = PCI_BASE_ADDRESS_0;
13524 off <= PCI_BASE_ADDRESS_5; off += 4)
13525 pci_write_config_dword(bp->pdev, off, 0);
13526 }
df3875ec
VV
13527 pci_restore_state(pdev);
13528 pci_save_state(pdev);
6316ea6d 13529
aa8ed021 13530 err = bnxt_hwrm_func_reset(bp);
fb1e6e56 13531 if (!err)
6e2f8388 13532 result = PCI_ERS_RESULT_RECOVERED;
bae361c5 13533 }
6316ea6d
SB
13534
13535 rtnl_unlock();
13536
bae361c5 13537 return result;
6316ea6d
SB
13538}
13539
13540/**
13541 * bnxt_io_resume - called when traffic can start flowing again.
13542 * @pdev: Pointer to PCI device
13543 *
13544 * This callback is called when the error recovery driver tells
13545 * us that its OK to resume normal operation.
13546 */
13547static void bnxt_io_resume(struct pci_dev *pdev)
13548{
13549 struct net_device *netdev = pci_get_drvdata(pdev);
fb1e6e56
VV
13550 struct bnxt *bp = netdev_priv(netdev);
13551 int err;
6316ea6d 13552
fb1e6e56 13553 netdev_info(bp->dev, "PCI Slot Resume\n");
6316ea6d
SB
13554 rtnl_lock();
13555
fb1e6e56
VV
13556 err = bnxt_hwrm_func_qcaps(bp);
13557 if (!err && netif_running(netdev))
13558 err = bnxt_open(netdev);
13559
13560 bnxt_ulp_start(bp, err);
13561 if (!err) {
13562 bnxt_reenable_sriov(bp);
13563 netif_device_attach(netdev);
13564 }
6316ea6d
SB
13565
13566 rtnl_unlock();
13567}
13568
13569static const struct pci_error_handlers bnxt_err_handler = {
13570 .error_detected = bnxt_io_error_detected,
13571 .slot_reset = bnxt_io_slot_reset,
13572 .resume = bnxt_io_resume
13573};
13574
c0c050c5
MC
13575static struct pci_driver bnxt_pci_driver = {
13576 .name = DRV_MODULE_NAME,
13577 .id_table = bnxt_pci_tbl,
13578 .probe = bnxt_init_one,
13579 .remove = bnxt_remove_one,
d196ece7 13580 .shutdown = bnxt_shutdown,
f65a2044 13581 .driver.pm = BNXT_PM_OPS,
6316ea6d 13582 .err_handler = &bnxt_err_handler,
c0c050c5
MC
13583#if defined(CONFIG_BNXT_SRIOV)
13584 .sriov_configure = bnxt_sriov_configure,
13585#endif
13586};
13587
c213eae8
MC
13588static int __init bnxt_init(void)
13589{
cabfb09d 13590 bnxt_debug_init();
c213eae8
MC
13591 return pci_register_driver(&bnxt_pci_driver);
13592}
13593
13594static void __exit bnxt_exit(void)
13595{
13596 pci_unregister_driver(&bnxt_pci_driver);
13597 if (bnxt_pf_wq)
13598 destroy_workqueue(bnxt_pf_wq);
cabfb09d 13599 bnxt_debug_exit();
c213eae8
MC
13600}
13601
13602module_init(bnxt_init);
13603module_exit(bnxt_exit);