]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/broadcom/bnxt/bnxt.c
Merge tag 'efi-urgent-for-v5.15' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
CommitLineData
c0c050c5
MC
1/* Broadcom NetXtreme-C/E network driver.
2 *
11f15ed3 3 * Copyright (c) 2014-2016 Broadcom Corporation
c6cc32a2 4 * Copyright (c) 2016-2019 Broadcom Limited
c0c050c5
MC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12
13#include <linux/stringify.h>
14#include <linux/kernel.h>
15#include <linux/timer.h>
16#include <linux/errno.h>
17#include <linux/ioport.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/interrupt.h>
21#include <linux/pci.h>
22#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/skbuff.h>
25#include <linux/dma-mapping.h>
26#include <linux/bitops.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29#include <linux/delay.h>
30#include <asm/byteorder.h>
31#include <asm/page.h>
32#include <linux/time.h>
33#include <linux/mii.h>
0ca12be9 34#include <linux/mdio.h>
c0c050c5
MC
35#include <linux/if.h>
36#include <linux/if_vlan.h>
32e8239c 37#include <linux/if_bridge.h>
5ac67d8b 38#include <linux/rtc.h>
c6d30e83 39#include <linux/bpf.h>
c0c050c5
MC
40#include <net/ip.h>
41#include <net/tcp.h>
42#include <net/udp.h>
43#include <net/checksum.h>
44#include <net/ip6_checksum.h>
ad51b8e9 45#include <net/udp_tunnel.h>
c0c050c5
MC
46#include <linux/workqueue.h>
47#include <linux/prefetch.h>
48#include <linux/cache.h>
49#include <linux/log2.h>
50#include <linux/aer.h>
51#include <linux/bitmap.h>
ae5c42f0
MC
52#include <linux/ptp_clock_kernel.h>
53#include <linux/timecounter.h>
c0c050c5 54#include <linux/cpu_rmap.h>
56f0fd80 55#include <linux/cpumask.h>
2ae7408f 56#include <net/pkt_cls.h>
cde49a42
VV
57#include <linux/hwmon.h>
58#include <linux/hwmon-sysfs.h>
322b87ca 59#include <net/page_pool.h>
c0c050c5
MC
60
61#include "bnxt_hsi.h"
62#include "bnxt.h"
3c8c20db 63#include "bnxt_hwrm.h"
a588e458 64#include "bnxt_ulp.h"
c0c050c5
MC
65#include "bnxt_sriov.h"
66#include "bnxt_ethtool.h"
7df4ae9f 67#include "bnxt_dcb.h"
c6d30e83 68#include "bnxt_xdp.h"
ae5c42f0 69#include "bnxt_ptp.h"
4ab0c6a8 70#include "bnxt_vfr.h"
2ae7408f 71#include "bnxt_tc.h"
3c467bf3 72#include "bnxt_devlink.h"
cabfb09d 73#include "bnxt_debugfs.h"
c0c050c5
MC
74
75#define BNXT_TX_TIMEOUT (5 * HZ)
e8d8c5d8
JK
76#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
77 NETIF_MSG_TX_ERR)
c0c050c5 78
c0c050c5
MC
79MODULE_LICENSE("GPL");
80MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
c0c050c5
MC
81
82#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
83#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
84#define BNXT_RX_COPY_THRESH 256
85
4419dbe6 86#define BNXT_TX_PUSH_THRESH 164
c0c050c5
MC
87
88enum board_idx {
fbc9a523 89 BCM57301,
c0c050c5
MC
90 BCM57302,
91 BCM57304,
1f681688 92 BCM57417_NPAR,
fa853dda 93 BCM58700,
b24eb6ae
MC
94 BCM57311,
95 BCM57312,
fbc9a523 96 BCM57402,
c0c050c5
MC
97 BCM57404,
98 BCM57406,
1f681688
MC
99 BCM57402_NPAR,
100 BCM57407,
b24eb6ae
MC
101 BCM57412,
102 BCM57414,
103 BCM57416,
104 BCM57417,
1f681688 105 BCM57412_NPAR,
5049e33b 106 BCM57314,
1f681688
MC
107 BCM57417_SFP,
108 BCM57416_SFP,
109 BCM57404_NPAR,
110 BCM57406_NPAR,
111 BCM57407_SFP,
adbc8305 112 BCM57407_NPAR,
1f681688
MC
113 BCM57414_NPAR,
114 BCM57416_NPAR,
32b40798
DK
115 BCM57452,
116 BCM57454,
92abef36 117 BCM5745x_NPAR,
1ab968d2 118 BCM57508,
c6cc32a2 119 BCM57504,
51fec80d 120 BCM57502,
49c98421
MC
121 BCM57508_NPAR,
122 BCM57504_NPAR,
123 BCM57502_NPAR,
4a58139b 124 BCM58802,
8ed693b7 125 BCM58804,
4a58139b 126 BCM58808,
adbc8305
MC
127 NETXTREME_E_VF,
128 NETXTREME_C_VF,
618784e3 129 NETXTREME_S_VF,
7fbf359b
MC
130 NETXTREME_C_VF_HV,
131 NETXTREME_E_VF_HV,
b16b6891 132 NETXTREME_E_P5_VF,
7fbf359b 133 NETXTREME_E_P5_VF_HV,
c0c050c5
MC
134};
135
136/* indexed by enum above */
137static const struct {
138 char *name;
139} board_info[] = {
27573a7d
SB
140 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
141 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
142 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
143 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
144 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
145 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
146 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
147 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
148 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
149 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
150 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
151 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
152 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
153 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
154 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
155 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
156 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
157 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
158 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
159 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
160 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
161 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
162 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
163 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
164 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
165 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
166 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
167 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
92abef36 168 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
1ab968d2 169 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
c6cc32a2 170 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
51fec80d 171 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
49c98421
MC
172 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
173 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
174 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
27573a7d 175 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
8ed693b7 176 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
27573a7d
SB
177 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
178 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
179 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
618784e3 180 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
7fbf359b
MC
181 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
182 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
b16b6891 183 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
7fbf359b 184 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
c0c050c5
MC
185};
186
187static const struct pci_device_id bnxt_pci_tbl[] = {
92abef36
VV
188 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
189 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
4a58139b 190 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
adbc8305 191 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
fbc9a523 192 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
c0c050c5
MC
193 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
194 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
1f681688 195 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
fa853dda 196 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
b24eb6ae
MC
197 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
198 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
fbc9a523 199 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
c0c050c5
MC
200 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
201 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
1f681688
MC
202 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
203 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
b24eb6ae
MC
204 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
205 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
206 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
207 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
1f681688 208 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
5049e33b 209 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
1f681688
MC
210 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
211 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
212 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
213 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
214 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
adbc8305
MC
215 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
216 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
1f681688 217 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
adbc8305 218 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
1f681688 219 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
adbc8305 220 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
4a58139b 221 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
32b40798 222 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
1ab968d2 223 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
c6cc32a2 224 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
51fec80d 225 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
49c98421
MC
226 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
227 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
228 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
229 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
230 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
231 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
4a58139b 232 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
8ed693b7 233 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
c0c050c5 234#ifdef CONFIG_BNXT_SRIOV
c7ef35eb 235 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
7fbf359b
MC
236 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
237 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
c7ef35eb 238 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
7fbf359b 239 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
adbc8305 240 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
7fbf359b
MC
241 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
242 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
243 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
244 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
adbc8305
MC
245 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
246 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
247 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
248 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
249 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
7fbf359b 250 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
51fec80d 251 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
b16b6891 252 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
7fbf359b
MC
253 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
254 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
618784e3 255 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
c0c050c5
MC
256#endif
257 { 0 }
258};
259
260MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
261
262static const u16 bnxt_vf_req_snif[] = {
263 HWRM_FUNC_CFG,
91cdda40 264 HWRM_FUNC_VF_CFG,
c0c050c5
MC
265 HWRM_PORT_PHY_QCFG,
266 HWRM_CFA_L2_FILTER_ALLOC,
267};
268
25be8623 269static const u16 bnxt_async_events_arr[] = {
87c374de 270 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
b1613e78 271 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
87c374de
MC
272 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
273 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
274 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
275 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
b1613e78 276 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
2151fe08 277 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
7e914027 278 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
a44daa8f 279 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
68f684e2 280 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
8d4bd96b 281 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
df97b34d 282 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
099fdeda 283 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
abf90ac2 284 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
25be8623
MC
285};
286
c213eae8
MC
287static struct workqueue_struct *bnxt_pf_wq;
288
c0c050c5
MC
289static bool bnxt_vf_pciid(enum board_idx idx)
290{
618784e3 291 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
7fbf359b 292 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
ab21494b
AG
293 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
294 idx == NETXTREME_E_P5_VF_HV);
c0c050c5
MC
295}
296
297#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
298#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
299#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
300
c0c050c5
MC
301#define BNXT_CP_DB_IRQ_DIS(db) \
302 writel(DB_CP_IRQ_DIS_FLAGS, db)
303
697197e5
MC
304#define BNXT_DB_CQ(db, idx) \
305 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
306
307#define BNXT_DB_NQ_P5(db, idx) \
c6132f6f
MC
308 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), \
309 (db)->doorbell)
697197e5
MC
310
311#define BNXT_DB_CQ_ARM(db, idx) \
312 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
313
314#define BNXT_DB_NQ_ARM_P5(db, idx) \
c6132f6f
MC
315 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx),\
316 (db)->doorbell)
697197e5
MC
317
318static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
319{
320 if (bp->flags & BNXT_FLAG_CHIP_P5)
321 BNXT_DB_NQ_P5(db, idx);
322 else
323 BNXT_DB_CQ(db, idx);
324}
325
326static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
327{
328 if (bp->flags & BNXT_FLAG_CHIP_P5)
329 BNXT_DB_NQ_ARM_P5(db, idx);
330 else
331 BNXT_DB_CQ_ARM(db, idx);
332}
333
334static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
335{
336 if (bp->flags & BNXT_FLAG_CHIP_P5)
c6132f6f
MC
337 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
338 RING_CMP(idx), db->doorbell);
697197e5
MC
339 else
340 BNXT_DB_CQ(db, idx);
341}
342
38413406 343const u16 bnxt_lhint_arr[] = {
c0c050c5
MC
344 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
345 TX_BD_FLAGS_LHINT_512_TO_1023,
346 TX_BD_FLAGS_LHINT_1024_TO_2047,
347 TX_BD_FLAGS_LHINT_1024_TO_2047,
348 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
349 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
350 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
351 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
352 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
353 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
354 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
355 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
356 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
357 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
358 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
359 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
360 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
361 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
362 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
363};
364
ee5c7fb3
SP
365static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
366{
367 struct metadata_dst *md_dst = skb_metadata_dst(skb);
368
369 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
370 return 0;
371
372 return md_dst->u.port_info.port_id;
373}
374
e8d8c5d8
JK
375static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
376 u16 prod)
377{
378 bnxt_db_write(bp, &txr->tx_db, prod);
379 txr->kick_pending = 0;
380}
381
3c603136
JK
382static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
383 struct bnxt_tx_ring_info *txr,
384 struct netdev_queue *txq)
385{
386 netif_tx_stop_queue(txq);
387
388 /* netif_tx_stop_queue() must be done before checking
389 * tx index in bnxt_tx_avail() below, because in
390 * bnxt_tx_int(), we update tx index before checking for
391 * netif_tx_queue_stopped().
392 */
393 smp_mb();
5bed8b07 394 if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) {
3c603136
JK
395 netif_tx_wake_queue(txq);
396 return false;
397 }
398
399 return true;
400}
401
c0c050c5
MC
402static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
403{
404 struct bnxt *bp = netdev_priv(dev);
405 struct tx_bd *txbd;
406 struct tx_bd_ext *txbd1;
407 struct netdev_queue *txq;
408 int i;
409 dma_addr_t mapping;
410 unsigned int length, pad = 0;
411 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
412 u16 prod, last_frag;
413 struct pci_dev *pdev = bp->pdev;
c0c050c5
MC
414 struct bnxt_tx_ring_info *txr;
415 struct bnxt_sw_tx_bd *tx_buf;
dade5e15 416 __le32 lflags = 0;
c0c050c5
MC
417
418 i = skb_get_queue_mapping(skb);
419 if (unlikely(i >= bp->tx_nr_rings)) {
420 dev_kfree_skb_any(skb);
fb9f7190 421 atomic_long_inc(&dev->tx_dropped);
c0c050c5
MC
422 return NETDEV_TX_OK;
423 }
424
c0c050c5 425 txq = netdev_get_tx_queue(dev, i);
a960dec9 426 txr = &bp->tx_ring[bp->tx_ring_map[i]];
c0c050c5
MC
427 prod = txr->tx_prod;
428
429 free_size = bnxt_tx_avail(bp, txr);
430 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
e8d8c5d8
JK
431 /* We must have raced with NAPI cleanup */
432 if (net_ratelimit() && txr->kick_pending)
433 netif_warn(bp, tx_err, dev,
434 "bnxt: ring busy w/ flush pending!\n");
3c603136
JK
435 if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
436 return NETDEV_TX_BUSY;
c0c050c5
MC
437 }
438
439 length = skb->len;
440 len = skb_headlen(skb);
441 last_frag = skb_shinfo(skb)->nr_frags;
442
443 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
444
445 txbd->tx_bd_opaque = prod;
446
447 tx_buf = &txr->tx_buf_ring[prod];
448 tx_buf->skb = skb;
449 tx_buf->nr_frags = last_frag;
450
451 vlan_tag_flags = 0;
ee5c7fb3 452 cfa_action = bnxt_xmit_get_cfa_action(skb);
c0c050c5
MC
453 if (skb_vlan_tag_present(skb)) {
454 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
455 skb_vlan_tag_get(skb);
456 /* Currently supports 8021Q, 8021AD vlan offloads
457 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
458 */
459 if (skb->vlan_proto == htons(ETH_P_8021Q))
460 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
461 }
462
83bb623c
PC
463 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
464 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
465
466 if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
467 atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
9e266807
MC
468 if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
469 &ptp->tx_hdr_off)) {
470 if (vlan_tag_flags)
471 ptp->tx_hdr_off += VLAN_HLEN;
83bb623c
PC
472 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
473 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
474 } else {
475 atomic_inc(&bp->ptp_cfg->tx_avail);
476 }
477 }
dade5e15
MC
478 }
479
83bb623c
PC
480 if (unlikely(skb->no_fcs))
481 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
482
483 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
484 !lflags) {
4419dbe6
MC
485 struct tx_push_buffer *tx_push_buf = txr->tx_push;
486 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
487 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
697197e5 488 void __iomem *db = txr->tx_db.doorbell;
4419dbe6
MC
489 void *pdata = tx_push_buf->data;
490 u64 *end;
491 int j, push_len;
c0c050c5
MC
492
493 /* Set COAL_NOW to be ready quickly for the next push */
494 tx_push->tx_bd_len_flags_type =
495 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
496 TX_BD_TYPE_LONG_TX_BD |
497 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
498 TX_BD_FLAGS_COAL_NOW |
499 TX_BD_FLAGS_PACKET_END |
500 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
501
502 if (skb->ip_summed == CHECKSUM_PARTIAL)
503 tx_push1->tx_bd_hsize_lflags =
504 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
505 else
506 tx_push1->tx_bd_hsize_lflags = 0;
507
508 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
ee5c7fb3
SP
509 tx_push1->tx_bd_cfa_action =
510 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
c0c050c5 511
fbb0fa8b
MC
512 end = pdata + length;
513 end = PTR_ALIGN(end, 8) - 1;
4419dbe6
MC
514 *end = 0;
515
c0c050c5
MC
516 skb_copy_from_linear_data(skb, pdata, len);
517 pdata += len;
518 for (j = 0; j < last_frag; j++) {
519 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
520 void *fptr;
521
522 fptr = skb_frag_address_safe(frag);
523 if (!fptr)
524 goto normal_tx;
525
526 memcpy(pdata, fptr, skb_frag_size(frag));
527 pdata += skb_frag_size(frag);
528 }
529
4419dbe6
MC
530 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
531 txbd->tx_bd_haddr = txr->data_mapping;
c0c050c5
MC
532 prod = NEXT_TX(prod);
533 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
534 memcpy(txbd, tx_push1, sizeof(*txbd));
535 prod = NEXT_TX(prod);
4419dbe6 536 tx_push->doorbell =
c0c050c5
MC
537 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
538 txr->tx_prod = prod;
539
b9a8460a 540 tx_buf->is_push = 1;
c0c050c5 541 netdev_tx_sent_queue(txq, skb->len);
b9a8460a 542 wmb(); /* Sync is_push and byte queue before pushing data */
c0c050c5 543
4419dbe6
MC
544 push_len = (length + sizeof(*tx_push) + 7) / 8;
545 if (push_len > 16) {
697197e5
MC
546 __iowrite64_copy(db, tx_push_buf, 16);
547 __iowrite32_copy(db + 4, tx_push_buf + 1,
9d13744b 548 (push_len - 16) << 1);
4419dbe6 549 } else {
697197e5 550 __iowrite64_copy(db, tx_push_buf, push_len);
4419dbe6 551 }
c0c050c5 552
c0c050c5
MC
553 goto tx_done;
554 }
555
556normal_tx:
557 if (length < BNXT_MIN_PKT_SIZE) {
558 pad = BNXT_MIN_PKT_SIZE - length;
e8d8c5d8 559 if (skb_pad(skb, pad))
c0c050c5 560 /* SKB already freed. */
e8d8c5d8 561 goto tx_kick_pending;
c0c050c5
MC
562 length = BNXT_MIN_PKT_SIZE;
563 }
564
565 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
566
e8d8c5d8
JK
567 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
568 goto tx_free;
c0c050c5
MC
569
570 dma_unmap_addr_set(tx_buf, mapping, mapping);
571 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
572 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
573
574 txbd->tx_bd_haddr = cpu_to_le64(mapping);
575
576 prod = NEXT_TX(prod);
577 txbd1 = (struct tx_bd_ext *)
578 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
579
dade5e15 580 txbd1->tx_bd_hsize_lflags = lflags;
c0c050c5
MC
581 if (skb_is_gso(skb)) {
582 u32 hdr_len;
583
584 if (skb->encapsulation)
585 hdr_len = skb_inner_network_offset(skb) +
586 skb_inner_network_header_len(skb) +
587 inner_tcp_hdrlen(skb);
588 else
589 hdr_len = skb_transport_offset(skb) +
590 tcp_hdrlen(skb);
591
dade5e15 592 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
c0c050c5
MC
593 TX_BD_FLAGS_T_IPID |
594 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
595 length = skb_shinfo(skb)->gso_size;
596 txbd1->tx_bd_mss = cpu_to_le32(length);
597 length += hdr_len;
598 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
dade5e15 599 txbd1->tx_bd_hsize_lflags |=
c0c050c5
MC
600 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
601 txbd1->tx_bd_mss = 0;
602 }
603
604 length >>= 9;
2b3c6885
MC
605 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
606 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
607 skb->len);
608 i = 0;
609 goto tx_dma_error;
610 }
c0c050c5
MC
611 flags |= bnxt_lhint_arr[length];
612 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
613
614 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
ee5c7fb3
SP
615 txbd1->tx_bd_cfa_action =
616 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
c0c050c5
MC
617 for (i = 0; i < last_frag; i++) {
618 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
619
620 prod = NEXT_TX(prod);
621 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
622
623 len = skb_frag_size(frag);
624 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
625 DMA_TO_DEVICE);
626
627 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
628 goto tx_dma_error;
629
630 tx_buf = &txr->tx_buf_ring[prod];
631 dma_unmap_addr_set(tx_buf, mapping, mapping);
632
633 txbd->tx_bd_haddr = cpu_to_le64(mapping);
634
635 flags = len << TX_BD_LEN_SHIFT;
636 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
637 }
638
639 flags &= ~TX_BD_LEN;
640 txbd->tx_bd_len_flags_type =
641 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
642 TX_BD_FLAGS_PACKET_END);
643
644 netdev_tx_sent_queue(txq, skb->len);
645
83bb623c
PC
646 skb_tx_timestamp(skb);
647
c0c050c5
MC
648 /* Sync BD data before updating doorbell */
649 wmb();
650
651 prod = NEXT_TX(prod);
652 txr->tx_prod = prod;
653
6b16f9ee 654 if (!netdev_xmit_more() || netif_xmit_stopped(txq))
e8d8c5d8
JK
655 bnxt_txr_db_kick(bp, txr, prod);
656 else
657 txr->kick_pending = 1;
c0c050c5
MC
658
659tx_done:
660
c0c050c5 661 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
6b16f9ee 662 if (netdev_xmit_more() && !tx_buf->is_push)
e8d8c5d8 663 bnxt_txr_db_kick(bp, txr, prod);
c0c050c5 664
3c603136 665 bnxt_txr_netif_try_stop_queue(bp, txr, txq);
c0c050c5
MC
666 }
667 return NETDEV_TX_OK;
668
669tx_dma_error:
83bb623c
PC
670 if (BNXT_TX_PTP_IS_SET(lflags))
671 atomic_inc(&bp->ptp_cfg->tx_avail);
672
c0c050c5
MC
673 last_frag = i;
674
675 /* start back at beginning and unmap skb */
676 prod = txr->tx_prod;
677 tx_buf = &txr->tx_buf_ring[prod];
c0c050c5 678 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
df70303d 679 skb_headlen(skb), DMA_TO_DEVICE);
c0c050c5
MC
680 prod = NEXT_TX(prod);
681
682 /* unmap remaining mapped pages */
683 for (i = 0; i < last_frag; i++) {
684 prod = NEXT_TX(prod);
685 tx_buf = &txr->tx_buf_ring[prod];
686 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
687 skb_frag_size(&skb_shinfo(skb)->frags[i]),
df70303d 688 DMA_TO_DEVICE);
c0c050c5
MC
689 }
690
e8d8c5d8 691tx_free:
c0c050c5 692 dev_kfree_skb_any(skb);
e8d8c5d8
JK
693tx_kick_pending:
694 if (txr->kick_pending)
695 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
696 txr->tx_buf_ring[txr->tx_prod].skb = NULL;
fb9f7190 697 atomic_long_inc(&dev->tx_dropped);
c0c050c5
MC
698 return NETDEV_TX_OK;
699}
700
701static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
702{
b6ab4b01 703 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
a960dec9 704 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
c0c050c5
MC
705 u16 cons = txr->tx_cons;
706 struct pci_dev *pdev = bp->pdev;
707 int i;
708 unsigned int tx_bytes = 0;
709
710 for (i = 0; i < nr_pkts; i++) {
711 struct bnxt_sw_tx_bd *tx_buf;
83bb623c 712 bool compl_deferred = false;
c0c050c5
MC
713 struct sk_buff *skb;
714 int j, last;
715
716 tx_buf = &txr->tx_buf_ring[cons];
717 cons = NEXT_TX(cons);
718 skb = tx_buf->skb;
719 tx_buf->skb = NULL;
720
721 if (tx_buf->is_push) {
722 tx_buf->is_push = 0;
723 goto next_tx_int;
724 }
725
726 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
df70303d 727 skb_headlen(skb), DMA_TO_DEVICE);
c0c050c5
MC
728 last = tx_buf->nr_frags;
729
730 for (j = 0; j < last; j++) {
731 cons = NEXT_TX(cons);
732 tx_buf = &txr->tx_buf_ring[cons];
733 dma_unmap_page(
734 &pdev->dev,
735 dma_unmap_addr(tx_buf, mapping),
736 skb_frag_size(&skb_shinfo(skb)->frags[j]),
df70303d 737 DMA_TO_DEVICE);
c0c050c5 738 }
83bb623c
PC
739 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
740 if (bp->flags & BNXT_FLAG_CHIP_P5) {
741 if (!bnxt_get_tx_ts_p5(bp, skb))
742 compl_deferred = true;
743 else
744 atomic_inc(&bp->ptp_cfg->tx_avail);
745 }
746 }
c0c050c5
MC
747
748next_tx_int:
749 cons = NEXT_TX(cons);
750
751 tx_bytes += skb->len;
83bb623c
PC
752 if (!compl_deferred)
753 dev_kfree_skb_any(skb);
c0c050c5
MC
754 }
755
756 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
757 txr->tx_cons = cons;
758
759 /* Need to make the tx_cons update visible to bnxt_start_xmit()
760 * before checking for netif_tx_queue_stopped(). Without the
761 * memory barrier, there is a small possibility that bnxt_start_xmit()
762 * will miss it and cause the queue to be stopped forever.
763 */
764 smp_mb();
765
766 if (unlikely(netif_tx_queue_stopped(txq)) &&
5bed8b07 767 bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh &&
3c603136
JK
768 READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
769 netif_tx_wake_queue(txq);
c0c050c5
MC
770}
771
c61fb99c 772static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
322b87ca 773 struct bnxt_rx_ring_info *rxr,
c61fb99c
MC
774 gfp_t gfp)
775{
776 struct device *dev = &bp->pdev->dev;
777 struct page *page;
778
322b87ca 779 page = page_pool_dev_alloc_pages(rxr->page_pool);
c61fb99c
MC
780 if (!page)
781 return NULL;
782
c519fe9a
SN
783 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
784 DMA_ATTR_WEAK_ORDERING);
c61fb99c 785 if (dma_mapping_error(dev, *mapping)) {
322b87ca 786 page_pool_recycle_direct(rxr->page_pool, page);
c61fb99c
MC
787 return NULL;
788 }
789 *mapping += bp->rx_dma_offset;
790 return page;
791}
792
c0c050c5
MC
793static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
794 gfp_t gfp)
795{
796 u8 *data;
797 struct pci_dev *pdev = bp->pdev;
798
799 data = kmalloc(bp->rx_buf_size, gfp);
800 if (!data)
801 return NULL;
802
c519fe9a
SN
803 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
804 bp->rx_buf_use_size, bp->rx_dir,
805 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
806
807 if (dma_mapping_error(&pdev->dev, *mapping)) {
808 kfree(data);
809 data = NULL;
810 }
811 return data;
812}
813
38413406
MC
814int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
815 u16 prod, gfp_t gfp)
c0c050c5
MC
816{
817 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
818 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
c0c050c5
MC
819 dma_addr_t mapping;
820
c61fb99c 821 if (BNXT_RX_PAGE_MODE(bp)) {
322b87ca
AG
822 struct page *page =
823 __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
c0c050c5 824
c61fb99c
MC
825 if (!page)
826 return -ENOMEM;
827
828 rx_buf->data = page;
829 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
830 } else {
831 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
832
833 if (!data)
834 return -ENOMEM;
835
836 rx_buf->data = data;
837 rx_buf->data_ptr = data + bp->rx_offset;
838 }
11cd119d 839 rx_buf->mapping = mapping;
c0c050c5
MC
840
841 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
c0c050c5
MC
842 return 0;
843}
844
c6d30e83 845void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
c0c050c5
MC
846{
847 u16 prod = rxr->rx_prod;
848 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
849 struct rx_bd *cons_bd, *prod_bd;
850
851 prod_rx_buf = &rxr->rx_buf_ring[prod];
852 cons_rx_buf = &rxr->rx_buf_ring[cons];
853
854 prod_rx_buf->data = data;
6bb19474 855 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
c0c050c5 856
11cd119d 857 prod_rx_buf->mapping = cons_rx_buf->mapping;
c0c050c5
MC
858
859 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
860 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
861
862 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
863}
864
865static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
866{
867 u16 next, max = rxr->rx_agg_bmap_size;
868
869 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
870 if (next >= max)
871 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
872 return next;
873}
874
875static inline int bnxt_alloc_rx_page(struct bnxt *bp,
876 struct bnxt_rx_ring_info *rxr,
877 u16 prod, gfp_t gfp)
878{
879 struct rx_bd *rxbd =
880 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
881 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
882 struct pci_dev *pdev = bp->pdev;
883 struct page *page;
884 dma_addr_t mapping;
885 u16 sw_prod = rxr->rx_sw_agg_prod;
89d0a06c 886 unsigned int offset = 0;
c0c050c5 887
89d0a06c
MC
888 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
889 page = rxr->rx_page;
890 if (!page) {
891 page = alloc_page(gfp);
892 if (!page)
893 return -ENOMEM;
894 rxr->rx_page = page;
895 rxr->rx_page_offset = 0;
896 }
897 offset = rxr->rx_page_offset;
898 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
899 if (rxr->rx_page_offset == PAGE_SIZE)
900 rxr->rx_page = NULL;
901 else
902 get_page(page);
903 } else {
904 page = alloc_page(gfp);
905 if (!page)
906 return -ENOMEM;
907 }
c0c050c5 908
c519fe9a 909 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
df70303d 910 BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
c519fe9a 911 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
912 if (dma_mapping_error(&pdev->dev, mapping)) {
913 __free_page(page);
914 return -EIO;
915 }
916
917 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
918 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
919
920 __set_bit(sw_prod, rxr->rx_agg_bmap);
921 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
922 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
923
924 rx_agg_buf->page = page;
89d0a06c 925 rx_agg_buf->offset = offset;
c0c050c5
MC
926 rx_agg_buf->mapping = mapping;
927 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
928 rxbd->rx_bd_opaque = sw_prod;
929 return 0;
930}
931
4a228a3a
MC
932static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
933 struct bnxt_cp_ring_info *cpr,
934 u16 cp_cons, u16 curr)
935{
936 struct rx_agg_cmp *agg;
937
938 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
939 agg = (struct rx_agg_cmp *)
940 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
941 return agg;
942}
943
bfcd8d79
MC
944static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
945 struct bnxt_rx_ring_info *rxr,
946 u16 agg_id, u16 curr)
947{
948 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
949
950 return &tpa_info->agg_arr[curr];
951}
952
4a228a3a
MC
953static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
954 u16 start, u32 agg_bufs, bool tpa)
c0c050c5 955{
e44758b7 956 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5 957 struct bnxt *bp = bnapi->bp;
b6ab4b01 958 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
959 u16 prod = rxr->rx_agg_prod;
960 u16 sw_prod = rxr->rx_sw_agg_prod;
bfcd8d79 961 bool p5_tpa = false;
c0c050c5
MC
962 u32 i;
963
bfcd8d79
MC
964 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
965 p5_tpa = true;
966
c0c050c5
MC
967 for (i = 0; i < agg_bufs; i++) {
968 u16 cons;
969 struct rx_agg_cmp *agg;
970 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
971 struct rx_bd *prod_bd;
972 struct page *page;
973
bfcd8d79
MC
974 if (p5_tpa)
975 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
976 else
977 agg = bnxt_get_agg(bp, cpr, idx, start + i);
c0c050c5
MC
978 cons = agg->rx_agg_cmp_opaque;
979 __clear_bit(cons, rxr->rx_agg_bmap);
980
981 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
982 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
983
984 __set_bit(sw_prod, rxr->rx_agg_bmap);
985 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
986 cons_rx_buf = &rxr->rx_agg_ring[cons];
987
988 /* It is possible for sw_prod to be equal to cons, so
989 * set cons_rx_buf->page to NULL first.
990 */
991 page = cons_rx_buf->page;
992 cons_rx_buf->page = NULL;
993 prod_rx_buf->page = page;
89d0a06c 994 prod_rx_buf->offset = cons_rx_buf->offset;
c0c050c5
MC
995
996 prod_rx_buf->mapping = cons_rx_buf->mapping;
997
998 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
999
1000 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
1001 prod_bd->rx_bd_opaque = sw_prod;
1002
1003 prod = NEXT_RX_AGG(prod);
1004 sw_prod = NEXT_RX_AGG(sw_prod);
c0c050c5
MC
1005 }
1006 rxr->rx_agg_prod = prod;
1007 rxr->rx_sw_agg_prod = sw_prod;
1008}
1009
c61fb99c
MC
1010static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1011 struct bnxt_rx_ring_info *rxr,
1012 u16 cons, void *data, u8 *data_ptr,
1013 dma_addr_t dma_addr,
1014 unsigned int offset_and_len)
1015{
1016 unsigned int payload = offset_and_len >> 16;
1017 unsigned int len = offset_and_len & 0xffff;
d7840976 1018 skb_frag_t *frag;
c61fb99c
MC
1019 struct page *page = data;
1020 u16 prod = rxr->rx_prod;
1021 struct sk_buff *skb;
1022 int off, err;
1023
1024 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1025 if (unlikely(err)) {
1026 bnxt_reuse_rx_data(rxr, cons, data);
1027 return NULL;
1028 }
1029 dma_addr -= bp->rx_dma_offset;
c519fe9a
SN
1030 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
1031 DMA_ATTR_WEAK_ORDERING);
3071c517 1032 page_pool_release_page(rxr->page_pool, page);
c61fb99c
MC
1033
1034 if (unlikely(!payload))
c43f1255 1035 payload = eth_get_headlen(bp->dev, data_ptr, len);
c61fb99c
MC
1036
1037 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1038 if (!skb) {
1039 __free_page(page);
1040 return NULL;
1041 }
1042
1043 off = (void *)data_ptr - page_address(page);
1044 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
1045 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1046 payload + NET_IP_ALIGN);
1047
1048 frag = &skb_shinfo(skb)->frags[0];
1049 skb_frag_size_sub(frag, payload);
b54c9d5b 1050 skb_frag_off_add(frag, payload);
c61fb99c
MC
1051 skb->data_len -= payload;
1052 skb->tail += payload;
1053
1054 return skb;
1055}
1056
c0c050c5
MC
1057static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1058 struct bnxt_rx_ring_info *rxr, u16 cons,
6bb19474
MC
1059 void *data, u8 *data_ptr,
1060 dma_addr_t dma_addr,
1061 unsigned int offset_and_len)
c0c050c5 1062{
6bb19474 1063 u16 prod = rxr->rx_prod;
c0c050c5 1064 struct sk_buff *skb;
6bb19474 1065 int err;
c0c050c5
MC
1066
1067 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1068 if (unlikely(err)) {
1069 bnxt_reuse_rx_data(rxr, cons, data);
1070 return NULL;
1071 }
1072
1073 skb = build_skb(data, 0);
c519fe9a
SN
1074 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1075 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
1076 if (!skb) {
1077 kfree(data);
1078 return NULL;
1079 }
1080
b3dba77c 1081 skb_reserve(skb, bp->rx_offset);
6bb19474 1082 skb_put(skb, offset_and_len & 0xffff);
c0c050c5
MC
1083 return skb;
1084}
1085
e44758b7
MC
1086static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
1087 struct bnxt_cp_ring_info *cpr,
4a228a3a
MC
1088 struct sk_buff *skb, u16 idx,
1089 u32 agg_bufs, bool tpa)
c0c050c5 1090{
e44758b7 1091 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5 1092 struct pci_dev *pdev = bp->pdev;
b6ab4b01 1093 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5 1094 u16 prod = rxr->rx_agg_prod;
bfcd8d79 1095 bool p5_tpa = false;
c0c050c5
MC
1096 u32 i;
1097
bfcd8d79
MC
1098 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1099 p5_tpa = true;
1100
c0c050c5
MC
1101 for (i = 0; i < agg_bufs; i++) {
1102 u16 cons, frag_len;
1103 struct rx_agg_cmp *agg;
1104 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1105 struct page *page;
1106 dma_addr_t mapping;
1107
bfcd8d79
MC
1108 if (p5_tpa)
1109 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1110 else
1111 agg = bnxt_get_agg(bp, cpr, idx, i);
c0c050c5
MC
1112 cons = agg->rx_agg_cmp_opaque;
1113 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1114 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1115
1116 cons_rx_buf = &rxr->rx_agg_ring[cons];
89d0a06c
MC
1117 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1118 cons_rx_buf->offset, frag_len);
c0c050c5
MC
1119 __clear_bit(cons, rxr->rx_agg_bmap);
1120
1121 /* It is possible for bnxt_alloc_rx_page() to allocate
1122 * a sw_prod index that equals the cons index, so we
1123 * need to clear the cons entry now.
1124 */
11cd119d 1125 mapping = cons_rx_buf->mapping;
c0c050c5
MC
1126 page = cons_rx_buf->page;
1127 cons_rx_buf->page = NULL;
1128
1129 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1130 struct skb_shared_info *shinfo;
1131 unsigned int nr_frags;
1132
1133 shinfo = skb_shinfo(skb);
1134 nr_frags = --shinfo->nr_frags;
1135 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1136
1137 dev_kfree_skb(skb);
1138
1139 cons_rx_buf->page = page;
1140
1141 /* Update prod since possibly some pages have been
1142 * allocated already.
1143 */
1144 rxr->rx_agg_prod = prod;
4a228a3a 1145 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
c0c050c5
MC
1146 return NULL;
1147 }
1148
c519fe9a 1149 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
df70303d 1150 DMA_FROM_DEVICE,
c519fe9a 1151 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
1152
1153 skb->data_len += frag_len;
1154 skb->len += frag_len;
1155 skb->truesize += PAGE_SIZE;
1156
1157 prod = NEXT_RX_AGG(prod);
c0c050c5
MC
1158 }
1159 rxr->rx_agg_prod = prod;
1160 return skb;
1161}
1162
1163static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1164 u8 agg_bufs, u32 *raw_cons)
1165{
1166 u16 last;
1167 struct rx_agg_cmp *agg;
1168
1169 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1170 last = RING_CMP(*raw_cons);
1171 agg = (struct rx_agg_cmp *)
1172 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1173 return RX_AGG_CMP_VALID(agg, *raw_cons);
1174}
1175
1176static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1177 unsigned int len,
1178 dma_addr_t mapping)
1179{
1180 struct bnxt *bp = bnapi->bp;
1181 struct pci_dev *pdev = bp->pdev;
1182 struct sk_buff *skb;
1183
1184 skb = napi_alloc_skb(&bnapi->napi, len);
1185 if (!skb)
1186 return NULL;
1187
745fc05c
MC
1188 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1189 bp->rx_dir);
c0c050c5 1190
6bb19474
MC
1191 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1192 len + NET_IP_ALIGN);
c0c050c5 1193
745fc05c
MC
1194 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1195 bp->rx_dir);
c0c050c5
MC
1196
1197 skb_put(skb, len);
1198 return skb;
1199}
1200
e44758b7 1201static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
fa7e2812
MC
1202 u32 *raw_cons, void *cmp)
1203{
fa7e2812
MC
1204 struct rx_cmp *rxcmp = cmp;
1205 u32 tmp_raw_cons = *raw_cons;
1206 u8 cmp_type, agg_bufs = 0;
1207
1208 cmp_type = RX_CMP_TYPE(rxcmp);
1209
1210 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1211 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1212 RX_CMP_AGG_BUFS) >>
1213 RX_CMP_AGG_BUFS_SHIFT;
1214 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1215 struct rx_tpa_end_cmp *tpa_end = cmp;
1216
bfcd8d79
MC
1217 if (bp->flags & BNXT_FLAG_CHIP_P5)
1218 return 0;
1219
4a228a3a 1220 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
fa7e2812
MC
1221 }
1222
1223 if (agg_bufs) {
1224 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1225 return -EBUSY;
1226 }
1227 *raw_cons = tmp_raw_cons;
1228 return 0;
1229}
1230
230d1f0d
MC
1231static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1232{
b148bb23
MC
1233 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1234 return;
1235
230d1f0d
MC
1236 if (BNXT_PF(bp))
1237 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1238 else
1239 schedule_delayed_work(&bp->fw_reset_task, delay);
1240}
1241
c213eae8
MC
1242static void bnxt_queue_sp_work(struct bnxt *bp)
1243{
1244 if (BNXT_PF(bp))
1245 queue_work(bnxt_pf_wq, &bp->sp_task);
1246 else
1247 schedule_work(&bp->sp_task);
1248}
1249
fa7e2812
MC
1250static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1251{
1252 if (!rxr->bnapi->in_reset) {
1253 rxr->bnapi->in_reset = true;
8fbf58e1
MC
1254 if (bp->flags & BNXT_FLAG_CHIP_P5)
1255 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1256 else
1257 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
c213eae8 1258 bnxt_queue_sp_work(bp);
fa7e2812
MC
1259 }
1260 rxr->rx_next_cons = 0xffff;
1261}
1262
ec4d8e7c
MC
1263static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1264{
1265 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1266 u16 idx = agg_id & MAX_TPA_P5_MASK;
1267
1268 if (test_bit(idx, map->agg_idx_bmap))
1269 idx = find_first_zero_bit(map->agg_idx_bmap,
1270 BNXT_AGG_IDX_BMAP_SIZE);
1271 __set_bit(idx, map->agg_idx_bmap);
1272 map->agg_id_tbl[agg_id] = idx;
1273 return idx;
1274}
1275
1276static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1277{
1278 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1279
1280 __clear_bit(idx, map->agg_idx_bmap);
1281}
1282
1283static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1284{
1285 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1286
1287 return map->agg_id_tbl[agg_id];
1288}
1289
c0c050c5
MC
1290static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1291 struct rx_tpa_start_cmp *tpa_start,
1292 struct rx_tpa_start_cmp_ext *tpa_start1)
1293{
c0c050c5 1294 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
bfcd8d79
MC
1295 struct bnxt_tpa_info *tpa_info;
1296 u16 cons, prod, agg_id;
c0c050c5
MC
1297 struct rx_bd *prod_bd;
1298 dma_addr_t mapping;
1299
ec4d8e7c 1300 if (bp->flags & BNXT_FLAG_CHIP_P5) {
bfcd8d79 1301 agg_id = TPA_START_AGG_ID_P5(tpa_start);
ec4d8e7c
MC
1302 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1303 } else {
bfcd8d79 1304 agg_id = TPA_START_AGG_ID(tpa_start);
ec4d8e7c 1305 }
c0c050c5
MC
1306 cons = tpa_start->rx_tpa_start_cmp_opaque;
1307 prod = rxr->rx_prod;
1308 cons_rx_buf = &rxr->rx_buf_ring[cons];
1309 prod_rx_buf = &rxr->rx_buf_ring[prod];
1310 tpa_info = &rxr->rx_tpa[agg_id];
1311
bfcd8d79
MC
1312 if (unlikely(cons != rxr->rx_next_cons ||
1313 TPA_START_ERROR(tpa_start))) {
1314 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1315 cons, rxr->rx_next_cons,
1316 TPA_START_ERROR_CODE(tpa_start1));
fa7e2812
MC
1317 bnxt_sched_reset(bp, rxr);
1318 return;
1319 }
ee5c7fb3
SP
1320 /* Store cfa_code in tpa_info to use in tpa_end
1321 * completion processing.
1322 */
1323 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
c0c050c5 1324 prod_rx_buf->data = tpa_info->data;
6bb19474 1325 prod_rx_buf->data_ptr = tpa_info->data_ptr;
c0c050c5
MC
1326
1327 mapping = tpa_info->mapping;
11cd119d 1328 prod_rx_buf->mapping = mapping;
c0c050c5
MC
1329
1330 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1331
1332 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1333
1334 tpa_info->data = cons_rx_buf->data;
6bb19474 1335 tpa_info->data_ptr = cons_rx_buf->data_ptr;
c0c050c5 1336 cons_rx_buf->data = NULL;
11cd119d 1337 tpa_info->mapping = cons_rx_buf->mapping;
c0c050c5
MC
1338
1339 tpa_info->len =
1340 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1341 RX_TPA_START_CMP_LEN_SHIFT;
1342 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1343 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1344
1345 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1346 tpa_info->gso_type = SKB_GSO_TCPV4;
1347 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
50f011b6 1348 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
c0c050c5
MC
1349 tpa_info->gso_type = SKB_GSO_TCPV6;
1350 tpa_info->rss_hash =
1351 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1352 } else {
1353 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1354 tpa_info->gso_type = 0;
871127e6 1355 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
c0c050c5
MC
1356 }
1357 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1358 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
94758f8d 1359 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
bfcd8d79 1360 tpa_info->agg_count = 0;
c0c050c5
MC
1361
1362 rxr->rx_prod = NEXT_RX(prod);
1363 cons = NEXT_RX(cons);
376a5b86 1364 rxr->rx_next_cons = NEXT_RX(cons);
c0c050c5
MC
1365 cons_rx_buf = &rxr->rx_buf_ring[cons];
1366
1367 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1368 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1369 cons_rx_buf->data = NULL;
1370}
1371
4a228a3a 1372static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
c0c050c5
MC
1373{
1374 if (agg_bufs)
4a228a3a 1375 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
c0c050c5
MC
1376}
1377
bee5a188
MC
1378#ifdef CONFIG_INET
1379static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1380{
1381 struct udphdr *uh = NULL;
1382
1383 if (ip_proto == htons(ETH_P_IP)) {
1384 struct iphdr *iph = (struct iphdr *)skb->data;
1385
1386 if (iph->protocol == IPPROTO_UDP)
1387 uh = (struct udphdr *)(iph + 1);
1388 } else {
1389 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1390
1391 if (iph->nexthdr == IPPROTO_UDP)
1392 uh = (struct udphdr *)(iph + 1);
1393 }
1394 if (uh) {
1395 if (uh->check)
1396 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1397 else
1398 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1399 }
1400}
1401#endif
1402
94758f8d
MC
1403static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1404 int payload_off, int tcp_ts,
1405 struct sk_buff *skb)
1406{
1407#ifdef CONFIG_INET
1408 struct tcphdr *th;
1409 int len, nw_off;
1410 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1411 u32 hdr_info = tpa_info->hdr_info;
1412 bool loopback = false;
1413
1414 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1415 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1416 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1417
1418 /* If the packet is an internal loopback packet, the offsets will
1419 * have an extra 4 bytes.
1420 */
1421 if (inner_mac_off == 4) {
1422 loopback = true;
1423 } else if (inner_mac_off > 4) {
1424 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1425 ETH_HLEN - 2));
1426
1427 /* We only support inner iPv4/ipv6. If we don't see the
1428 * correct protocol ID, it must be a loopback packet where
1429 * the offsets are off by 4.
1430 */
09a7636a 1431 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
94758f8d
MC
1432 loopback = true;
1433 }
1434 if (loopback) {
1435 /* internal loopback packet, subtract all offsets by 4 */
1436 inner_ip_off -= 4;
1437 inner_mac_off -= 4;
1438 outer_ip_off -= 4;
1439 }
1440
1441 nw_off = inner_ip_off - ETH_HLEN;
1442 skb_set_network_header(skb, nw_off);
1443 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1444 struct ipv6hdr *iph = ipv6_hdr(skb);
1445
1446 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1447 len = skb->len - skb_transport_offset(skb);
1448 th = tcp_hdr(skb);
1449 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1450 } else {
1451 struct iphdr *iph = ip_hdr(skb);
1452
1453 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1454 len = skb->len - skb_transport_offset(skb);
1455 th = tcp_hdr(skb);
1456 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1457 }
1458
1459 if (inner_mac_off) { /* tunnel */
94758f8d
MC
1460 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1461 ETH_HLEN - 2));
1462
bee5a188 1463 bnxt_gro_tunnel(skb, proto);
94758f8d
MC
1464 }
1465#endif
1466 return skb;
1467}
1468
67912c36
MC
1469static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1470 int payload_off, int tcp_ts,
1471 struct sk_buff *skb)
1472{
1473#ifdef CONFIG_INET
1474 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1475 u32 hdr_info = tpa_info->hdr_info;
1476 int iphdr_len, nw_off;
1477
1478 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1479 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1480 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1481
1482 nw_off = inner_ip_off - ETH_HLEN;
1483 skb_set_network_header(skb, nw_off);
1484 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1485 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1486 skb_set_transport_header(skb, nw_off + iphdr_len);
1487
1488 if (inner_mac_off) { /* tunnel */
1489 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1490 ETH_HLEN - 2));
1491
1492 bnxt_gro_tunnel(skb, proto);
1493 }
1494#endif
1495 return skb;
1496}
1497
c0c050c5
MC
1498#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1499#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1500
309369c9
MC
1501static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1502 int payload_off, int tcp_ts,
c0c050c5
MC
1503 struct sk_buff *skb)
1504{
d1611c3a 1505#ifdef CONFIG_INET
c0c050c5 1506 struct tcphdr *th;
719ca811 1507 int len, nw_off, tcp_opt_len = 0;
27e24189 1508
309369c9 1509 if (tcp_ts)
c0c050c5
MC
1510 tcp_opt_len = 12;
1511
c0c050c5
MC
1512 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1513 struct iphdr *iph;
1514
1515 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1516 ETH_HLEN;
1517 skb_set_network_header(skb, nw_off);
1518 iph = ip_hdr(skb);
1519 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1520 len = skb->len - skb_transport_offset(skb);
1521 th = tcp_hdr(skb);
1522 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1523 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1524 struct ipv6hdr *iph;
1525
1526 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1527 ETH_HLEN;
1528 skb_set_network_header(skb, nw_off);
1529 iph = ipv6_hdr(skb);
1530 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1531 len = skb->len - skb_transport_offset(skb);
1532 th = tcp_hdr(skb);
1533 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1534 } else {
1535 dev_kfree_skb_any(skb);
1536 return NULL;
1537 }
c0c050c5 1538
bee5a188
MC
1539 if (nw_off) /* tunnel */
1540 bnxt_gro_tunnel(skb, skb->protocol);
c0c050c5
MC
1541#endif
1542 return skb;
1543}
1544
309369c9
MC
1545static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1546 struct bnxt_tpa_info *tpa_info,
1547 struct rx_tpa_end_cmp *tpa_end,
1548 struct rx_tpa_end_cmp_ext *tpa_end1,
1549 struct sk_buff *skb)
1550{
1551#ifdef CONFIG_INET
1552 int payload_off;
1553 u16 segs;
1554
1555 segs = TPA_END_TPA_SEGS(tpa_end);
1556 if (segs == 1)
1557 return skb;
1558
1559 NAPI_GRO_CB(skb)->count = segs;
1560 skb_shinfo(skb)->gso_size =
1561 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1562 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
bfcd8d79
MC
1563 if (bp->flags & BNXT_FLAG_CHIP_P5)
1564 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1565 else
1566 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
309369c9 1567 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
5910906c
MC
1568 if (likely(skb))
1569 tcp_gro_complete(skb);
309369c9
MC
1570#endif
1571 return skb;
1572}
1573
ee5c7fb3
SP
1574/* Given the cfa_code of a received packet determine which
1575 * netdev (vf-rep or PF) the packet is destined to.
1576 */
1577static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1578{
1579 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1580
1581 /* if vf-rep dev is NULL, the must belongs to the PF */
1582 return dev ? dev : bp->dev;
1583}
1584
c0c050c5 1585static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
e44758b7 1586 struct bnxt_cp_ring_info *cpr,
c0c050c5
MC
1587 u32 *raw_cons,
1588 struct rx_tpa_end_cmp *tpa_end,
1589 struct rx_tpa_end_cmp_ext *tpa_end1,
4e5dbbda 1590 u8 *event)
c0c050c5 1591{
e44758b7 1592 struct bnxt_napi *bnapi = cpr->bnapi;
b6ab4b01 1593 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
6bb19474 1594 u8 *data_ptr, agg_bufs;
c0c050c5
MC
1595 unsigned int len;
1596 struct bnxt_tpa_info *tpa_info;
1597 dma_addr_t mapping;
1598 struct sk_buff *skb;
bfcd8d79 1599 u16 idx = 0, agg_id;
6bb19474 1600 void *data;
bfcd8d79 1601 bool gro;
c0c050c5 1602
fa7e2812 1603 if (unlikely(bnapi->in_reset)) {
e44758b7 1604 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
fa7e2812
MC
1605
1606 if (rc < 0)
1607 return ERR_PTR(-EBUSY);
1608 return NULL;
1609 }
1610
bfcd8d79
MC
1611 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1612 agg_id = TPA_END_AGG_ID_P5(tpa_end);
ec4d8e7c 1613 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
bfcd8d79
MC
1614 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1615 tpa_info = &rxr->rx_tpa[agg_id];
1616 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1617 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1618 agg_bufs, tpa_info->agg_count);
1619 agg_bufs = tpa_info->agg_count;
1620 }
1621 tpa_info->agg_count = 0;
1622 *event |= BNXT_AGG_EVENT;
ec4d8e7c 1623 bnxt_free_agg_idx(rxr, agg_id);
bfcd8d79
MC
1624 idx = agg_id;
1625 gro = !!(bp->flags & BNXT_FLAG_GRO);
1626 } else {
1627 agg_id = TPA_END_AGG_ID(tpa_end);
1628 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1629 tpa_info = &rxr->rx_tpa[agg_id];
1630 idx = RING_CMP(*raw_cons);
1631 if (agg_bufs) {
1632 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1633 return ERR_PTR(-EBUSY);
1634
1635 *event |= BNXT_AGG_EVENT;
1636 idx = NEXT_CMP(idx);
1637 }
1638 gro = !!TPA_END_GRO(tpa_end);
1639 }
c0c050c5 1640 data = tpa_info->data;
6bb19474
MC
1641 data_ptr = tpa_info->data_ptr;
1642 prefetch(data_ptr);
c0c050c5
MC
1643 len = tpa_info->len;
1644 mapping = tpa_info->mapping;
1645
69c149e2 1646 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
4a228a3a 1647 bnxt_abort_tpa(cpr, idx, agg_bufs);
69c149e2
MC
1648 if (agg_bufs > MAX_SKB_FRAGS)
1649 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1650 agg_bufs, (int)MAX_SKB_FRAGS);
c0c050c5
MC
1651 return NULL;
1652 }
1653
1654 if (len <= bp->rx_copy_thresh) {
6bb19474 1655 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
c0c050c5 1656 if (!skb) {
4a228a3a 1657 bnxt_abort_tpa(cpr, idx, agg_bufs);
907fd4a2 1658 cpr->sw_stats.rx.rx_oom_discards += 1;
c0c050c5
MC
1659 return NULL;
1660 }
1661 } else {
1662 u8 *new_data;
1663 dma_addr_t new_mapping;
1664
1665 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1666 if (!new_data) {
4a228a3a 1667 bnxt_abort_tpa(cpr, idx, agg_bufs);
907fd4a2 1668 cpr->sw_stats.rx.rx_oom_discards += 1;
c0c050c5
MC
1669 return NULL;
1670 }
1671
1672 tpa_info->data = new_data;
b3dba77c 1673 tpa_info->data_ptr = new_data + bp->rx_offset;
c0c050c5
MC
1674 tpa_info->mapping = new_mapping;
1675
1676 skb = build_skb(data, 0);
c519fe9a
SN
1677 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1678 bp->rx_buf_use_size, bp->rx_dir,
1679 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
1680
1681 if (!skb) {
1682 kfree(data);
4a228a3a 1683 bnxt_abort_tpa(cpr, idx, agg_bufs);
907fd4a2 1684 cpr->sw_stats.rx.rx_oom_discards += 1;
c0c050c5
MC
1685 return NULL;
1686 }
b3dba77c 1687 skb_reserve(skb, bp->rx_offset);
c0c050c5
MC
1688 skb_put(skb, len);
1689 }
1690
1691 if (agg_bufs) {
4a228a3a 1692 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
c0c050c5
MC
1693 if (!skb) {
1694 /* Page reuse already handled by bnxt_rx_pages(). */
907fd4a2 1695 cpr->sw_stats.rx.rx_oom_discards += 1;
c0c050c5
MC
1696 return NULL;
1697 }
1698 }
ee5c7fb3
SP
1699
1700 skb->protocol =
1701 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
c0c050c5
MC
1702
1703 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1704 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1705
8852ddb4 1706 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
a196e96b 1707 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
96bdd4b9
MC
1708 __be16 vlan_proto = htons(tpa_info->metadata >>
1709 RX_CMP_FLAGS2_METADATA_TPID_SFT);
ed7bc602 1710 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
c0c050c5 1711
96bdd4b9
MC
1712 if (eth_type_vlan(vlan_proto)) {
1713 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1714 } else {
1715 dev_kfree_skb(skb);
1716 return NULL;
1717 }
c0c050c5
MC
1718 }
1719
1720 skb_checksum_none_assert(skb);
1721 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1722 skb->ip_summed = CHECKSUM_UNNECESSARY;
1723 skb->csum_level =
1724 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1725 }
1726
bfcd8d79 1727 if (gro)
309369c9 1728 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
c0c050c5
MC
1729
1730 return skb;
1731}
1732
8fe88ce7
MC
1733static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1734 struct rx_agg_cmp *rx_agg)
1735{
1736 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1737 struct bnxt_tpa_info *tpa_info;
1738
ec4d8e7c 1739 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
8fe88ce7
MC
1740 tpa_info = &rxr->rx_tpa[agg_id];
1741 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1742 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1743}
1744
ee5c7fb3
SP
1745static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1746 struct sk_buff *skb)
1747{
1748 if (skb->dev != bp->dev) {
1749 /* this packet belongs to a vf-rep */
1750 bnxt_vf_rep_rx(bp, skb);
1751 return;
1752 }
1753 skb_record_rx_queue(skb, bnapi->index);
1754 napi_gro_receive(&bnapi->napi, skb);
1755}
1756
c0c050c5
MC
1757/* returns the following:
1758 * 1 - 1 packet successfully received
1759 * 0 - successful TPA_START, packet not completed yet
1760 * -EBUSY - completion ring does not have all the agg buffers yet
1761 * -ENOMEM - packet aborted due to out of memory
1762 * -EIO - packet aborted due to hw error indicated in BD
1763 */
e44758b7
MC
1764static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1765 u32 *raw_cons, u8 *event)
c0c050c5 1766{
e44758b7 1767 struct bnxt_napi *bnapi = cpr->bnapi;
b6ab4b01 1768 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
1769 struct net_device *dev = bp->dev;
1770 struct rx_cmp *rxcmp;
1771 struct rx_cmp_ext *rxcmp1;
1772 u32 tmp_raw_cons = *raw_cons;
ee5c7fb3 1773 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
c0c050c5
MC
1774 struct bnxt_sw_rx_bd *rx_buf;
1775 unsigned int len;
6bb19474 1776 u8 *data_ptr, agg_bufs, cmp_type;
c0c050c5
MC
1777 dma_addr_t dma_addr;
1778 struct sk_buff *skb;
7f5515d1 1779 u32 flags, misc;
6bb19474 1780 void *data;
c0c050c5
MC
1781 int rc = 0;
1782
1783 rxcmp = (struct rx_cmp *)
1784 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1785
8fe88ce7
MC
1786 cmp_type = RX_CMP_TYPE(rxcmp);
1787
1788 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1789 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1790 goto next_rx_no_prod_no_len;
1791 }
1792
c0c050c5
MC
1793 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1794 cp_cons = RING_CMP(tmp_raw_cons);
1795 rxcmp1 = (struct rx_cmp_ext *)
1796 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1797
1798 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1799 return -EBUSY;
1800
828affc2
MC
1801 /* The valid test of the entry must be done first before
1802 * reading any further.
1803 */
1804 dma_rmb();
c0c050c5
MC
1805 prod = rxr->rx_prod;
1806
1807 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1808 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1809 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1810
4e5dbbda 1811 *event |= BNXT_RX_EVENT;
e7e70fa6 1812 goto next_rx_no_prod_no_len;
c0c050c5
MC
1813
1814 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
e44758b7 1815 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
c0c050c5 1816 (struct rx_tpa_end_cmp *)rxcmp,
4e5dbbda 1817 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
c0c050c5 1818
1fac4b2f 1819 if (IS_ERR(skb))
c0c050c5
MC
1820 return -EBUSY;
1821
1822 rc = -ENOMEM;
1823 if (likely(skb)) {
ee5c7fb3 1824 bnxt_deliver_skb(bp, bnapi, skb);
c0c050c5
MC
1825 rc = 1;
1826 }
4e5dbbda 1827 *event |= BNXT_RX_EVENT;
e7e70fa6 1828 goto next_rx_no_prod_no_len;
c0c050c5
MC
1829 }
1830
1831 cons = rxcmp->rx_cmp_opaque;
fa7e2812 1832 if (unlikely(cons != rxr->rx_next_cons)) {
bbd6f0a9 1833 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
fa7e2812 1834
1b5c8b63
MC
1835 /* 0xffff is forced error, don't print it */
1836 if (rxr->rx_next_cons != 0xffff)
1837 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1838 cons, rxr->rx_next_cons);
fa7e2812 1839 bnxt_sched_reset(bp, rxr);
bbd6f0a9
MC
1840 if (rc1)
1841 return rc1;
1842 goto next_rx_no_prod_no_len;
fa7e2812 1843 }
a1b0e4e6
MC
1844 rx_buf = &rxr->rx_buf_ring[cons];
1845 data = rx_buf->data;
1846 data_ptr = rx_buf->data_ptr;
6bb19474 1847 prefetch(data_ptr);
c0c050c5 1848
c61fb99c
MC
1849 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1850 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
c0c050c5
MC
1851
1852 if (agg_bufs) {
1853 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1854 return -EBUSY;
1855
1856 cp_cons = NEXT_CMP(cp_cons);
4e5dbbda 1857 *event |= BNXT_AGG_EVENT;
c0c050c5 1858 }
4e5dbbda 1859 *event |= BNXT_RX_EVENT;
c0c050c5
MC
1860
1861 rx_buf->data = NULL;
1862 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
8e44e96c
MC
1863 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1864
c0c050c5
MC
1865 bnxt_reuse_rx_data(rxr, cons, data);
1866 if (agg_bufs)
4a228a3a
MC
1867 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1868 false);
c0c050c5
MC
1869
1870 rc = -EIO;
8e44e96c 1871 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
9d8b5f05 1872 bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
8d4bd96b
MC
1873 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1874 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
8fbf58e1
MC
1875 netdev_warn_once(bp->dev, "RX buffer error %x\n",
1876 rx_err);
19b3751f
MC
1877 bnxt_sched_reset(bp, rxr);
1878 }
8e44e96c 1879 }
0b397b17 1880 goto next_rx_no_len;
c0c050c5
MC
1881 }
1882
7f5515d1
PC
1883 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
1884 len = flags >> RX_CMP_LEN_SHIFT;
11cd119d 1885 dma_addr = rx_buf->mapping;
c0c050c5 1886
c6d30e83
MC
1887 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1888 rc = 1;
1889 goto next_rx;
1890 }
1891
c0c050c5 1892 if (len <= bp->rx_copy_thresh) {
6bb19474 1893 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
c0c050c5
MC
1894 bnxt_reuse_rx_data(rxr, cons, data);
1895 if (!skb) {
296d5b54 1896 if (agg_bufs)
4a228a3a
MC
1897 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1898 agg_bufs, false);
907fd4a2 1899 cpr->sw_stats.rx.rx_oom_discards += 1;
c0c050c5
MC
1900 rc = -ENOMEM;
1901 goto next_rx;
1902 }
1903 } else {
c61fb99c
MC
1904 u32 payload;
1905
c6d30e83
MC
1906 if (rx_buf->data_ptr == data_ptr)
1907 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1908 else
1909 payload = 0;
6bb19474 1910 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
c61fb99c 1911 payload | len);
c0c050c5 1912 if (!skb) {
907fd4a2 1913 cpr->sw_stats.rx.rx_oom_discards += 1;
c0c050c5
MC
1914 rc = -ENOMEM;
1915 goto next_rx;
1916 }
1917 }
1918
1919 if (agg_bufs) {
4a228a3a 1920 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
c0c050c5 1921 if (!skb) {
907fd4a2 1922 cpr->sw_stats.rx.rx_oom_discards += 1;
c0c050c5
MC
1923 rc = -ENOMEM;
1924 goto next_rx;
1925 }
1926 }
1927
1928 if (RX_CMP_HASH_VALID(rxcmp)) {
1929 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1930 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1931
1932 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1933 if (hash_type != 1 && hash_type != 3)
1934 type = PKT_HASH_TYPE_L3;
1935 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1936 }
1937
ee5c7fb3
SP
1938 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1939 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
c0c050c5 1940
8852ddb4
MC
1941 if ((rxcmp1->rx_cmp_flags2 &
1942 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
a196e96b 1943 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
c0c050c5 1944 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
ed7bc602 1945 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
96bdd4b9
MC
1946 __be16 vlan_proto = htons(meta_data >>
1947 RX_CMP_FLAGS2_METADATA_TPID_SFT);
c0c050c5 1948
96bdd4b9
MC
1949 if (eth_type_vlan(vlan_proto)) {
1950 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1951 } else {
1952 dev_kfree_skb(skb);
1953 goto next_rx;
1954 }
c0c050c5
MC
1955 }
1956
1957 skb_checksum_none_assert(skb);
1958 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1959 if (dev->features & NETIF_F_RXCSUM) {
1960 skb->ip_summed = CHECKSUM_UNNECESSARY;
1961 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1962 }
1963 } else {
665e350d
SB
1964 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1965 if (dev->features & NETIF_F_RXCSUM)
9d8b5f05 1966 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
665e350d 1967 }
c0c050c5
MC
1968 }
1969
7f5515d1
PC
1970 if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
1971 RX_CMP_FLAGS_ITYPE_PTP_W_TS)) {
1972 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1973 u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
1974 u64 ns, ts;
1975
1976 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
1977 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
1978
1979 spin_lock_bh(&ptp->ptp_lock);
1980 ns = timecounter_cyc2time(&ptp->tc, ts);
1981 spin_unlock_bh(&ptp->ptp_lock);
1982 memset(skb_hwtstamps(skb), 0,
1983 sizeof(*skb_hwtstamps(skb)));
1984 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
1985 }
1986 }
1987 }
ee5c7fb3 1988 bnxt_deliver_skb(bp, bnapi, skb);
c0c050c5
MC
1989 rc = 1;
1990
1991next_rx:
6a8788f2
AG
1992 cpr->rx_packets += 1;
1993 cpr->rx_bytes += len;
e7e70fa6 1994
0b397b17
MC
1995next_rx_no_len:
1996 rxr->rx_prod = NEXT_RX(prod);
1997 rxr->rx_next_cons = NEXT_RX(cons);
1998
e7e70fa6 1999next_rx_no_prod_no_len:
c0c050c5
MC
2000 *raw_cons = tmp_raw_cons;
2001
2002 return rc;
2003}
2004
2270bc5d
MC
2005/* In netpoll mode, if we are using a combined completion ring, we need to
2006 * discard the rx packets and recycle the buffers.
2007 */
e44758b7
MC
2008static int bnxt_force_rx_discard(struct bnxt *bp,
2009 struct bnxt_cp_ring_info *cpr,
2270bc5d
MC
2010 u32 *raw_cons, u8 *event)
2011{
2270bc5d
MC
2012 u32 tmp_raw_cons = *raw_cons;
2013 struct rx_cmp_ext *rxcmp1;
2014 struct rx_cmp *rxcmp;
2015 u16 cp_cons;
2016 u8 cmp_type;
40bedf7c 2017 int rc;
2270bc5d
MC
2018
2019 cp_cons = RING_CMP(tmp_raw_cons);
2020 rxcmp = (struct rx_cmp *)
2021 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2022
2023 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2024 cp_cons = RING_CMP(tmp_raw_cons);
2025 rxcmp1 = (struct rx_cmp_ext *)
2026 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2027
2028 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2029 return -EBUSY;
2030
828affc2
MC
2031 /* The valid test of the entry must be done first before
2032 * reading any further.
2033 */
2034 dma_rmb();
2270bc5d
MC
2035 cmp_type = RX_CMP_TYPE(rxcmp);
2036 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2037 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2038 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2039 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2040 struct rx_tpa_end_cmp_ext *tpa_end1;
2041
2042 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2043 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2044 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2045 }
40bedf7c
JK
2046 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2047 if (rc && rc != -EBUSY)
2048 cpr->sw_stats.rx.rx_netpoll_discards += 1;
2049 return rc;
2270bc5d
MC
2050}
2051
7e914027
MC
2052u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2053{
2054 struct bnxt_fw_health *fw_health = bp->fw_health;
2055 u32 reg = fw_health->regs[reg_idx];
2056 u32 reg_type, reg_off, val = 0;
2057
2058 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2059 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2060 switch (reg_type) {
2061 case BNXT_FW_HEALTH_REG_TYPE_CFG:
2062 pci_read_config_dword(bp->pdev, reg_off, &val);
2063 break;
2064 case BNXT_FW_HEALTH_REG_TYPE_GRC:
2065 reg_off = fw_health->mapped_regs[reg_idx];
df561f66 2066 fallthrough;
7e914027
MC
2067 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2068 val = readl(bp->bar0 + reg_off);
2069 break;
2070 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2071 val = readl(bp->bar1 + reg_off);
2072 break;
2073 }
2074 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2075 val &= fw_health->fw_reset_inprog_reg_mask;
2076 return val;
2077}
2078
8d4bd96b
MC
2079static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2080{
2081 int i;
2082
2083 for (i = 0; i < bp->rx_nr_rings; i++) {
2084 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2085 struct bnxt_ring_grp_info *grp_info;
2086
2087 grp_info = &bp->grp_info[grp_idx];
2088 if (grp_info->agg_fw_ring_id == ring_id)
2089 return grp_idx;
2090 }
2091 return INVALID_HW_RING_ID;
2092}
2093
abf90ac2
PC
2094static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2095{
2096 switch (BNXT_EVENT_ERROR_REPORT_TYPE(data1)) {
2097 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2098 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2099 BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2100 break;
2101 default:
2102 netdev_err(bp->dev, "FW reported unknown error type\n");
2103 break;
2104 }
2105}
2106
4bb13abf 2107#define BNXT_GET_EVENT_PORT(data) \
87c374de
MC
2108 ((data) & \
2109 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
4bb13abf 2110
8d4bd96b
MC
2111#define BNXT_EVENT_RING_TYPE(data2) \
2112 ((data2) & \
2113 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2114
2115#define BNXT_EVENT_RING_TYPE_RX(data2) \
2116 (BNXT_EVENT_RING_TYPE(data2) == \
2117 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2118
c0c050c5
MC
2119static int bnxt_async_event_process(struct bnxt *bp,
2120 struct hwrm_async_event_cmpl *cmpl)
2121{
2122 u16 event_id = le16_to_cpu(cmpl->event_id);
03ab8ca1
MC
2123 u32 data1 = le32_to_cpu(cmpl->event_data1);
2124 u32 data2 = le32_to_cpu(cmpl->event_data2);
c0c050c5
MC
2125
2126 /* TODO CHIMP_FW: Define event id's for link change, error etc */
2127 switch (event_id) {
87c374de 2128 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
8cbde117
MC
2129 struct bnxt_link_info *link_info = &bp->link_info;
2130
2131 if (BNXT_VF(bp))
2132 goto async_event_process_exit;
a8168b6c
MC
2133
2134 /* print unsupported speed warning in forced speed mode only */
2135 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2136 (data1 & 0x20000)) {
8cbde117
MC
2137 u16 fw_speed = link_info->force_link_speed;
2138 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2139
a8168b6c
MC
2140 if (speed != SPEED_UNKNOWN)
2141 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2142 speed);
8cbde117 2143 }
286ef9d6 2144 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
8cbde117 2145 }
df561f66 2146 fallthrough;
b1613e78
MC
2147 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2148 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2149 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
df561f66 2150 fallthrough;
87c374de 2151 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
c0c050c5 2152 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
19241368 2153 break;
87c374de 2154 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
19241368 2155 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
c0c050c5 2156 break;
87c374de 2157 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
4bb13abf
MC
2158 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2159
2160 if (BNXT_VF(bp))
2161 break;
2162
2163 if (bp->pf.port_id != port_id)
2164 break;
2165
4bb13abf
MC
2166 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2167 break;
2168 }
87c374de 2169 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
fc0f1929
MC
2170 if (BNXT_PF(bp))
2171 goto async_event_process_exit;
2172 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2173 break;
5863b10a
MC
2174 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2175 char *fatal_str = "non-fatal";
2176
8280b38e
VV
2177 if (!bp->fw_health)
2178 goto async_event_process_exit;
2179
2151fe08
MC
2180 bp->fw_reset_timestamp = jiffies;
2181 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2182 if (!bp->fw_reset_min_dsecs)
2183 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2184 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2185 if (!bp->fw_reset_max_dsecs)
2186 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
acfb50e4 2187 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
5863b10a 2188 fatal_str = "fatal";
acfb50e4 2189 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
5863b10a 2190 }
871127e6
MC
2191 netif_warn(bp, hw, bp->dev,
2192 "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2193 fatal_str, data1, data2,
2194 bp->fw_reset_min_dsecs * 100,
2195 bp->fw_reset_max_dsecs * 100);
2151fe08
MC
2196 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2197 break;
5863b10a 2198 }
7e914027
MC
2199 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2200 struct bnxt_fw_health *fw_health = bp->fw_health;
7e914027
MC
2201
2202 if (!fw_health)
2203 goto async_event_process_exit;
2204
1b2b9183
MC
2205 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2206 fw_health->enabled = false;
f4d95c3c
MC
2207 netif_info(bp, drv, bp->dev,
2208 "Error recovery info: error recovery[0]\n");
7e914027 2209 break;
f4d95c3c 2210 }
1b2b9183 2211 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
7e914027
MC
2212 fw_health->tmr_multiplier =
2213 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2214 bp->current_interval * 10);
2215 fw_health->tmr_counter = fw_health->tmr_multiplier;
eca4cf12 2216 if (!fw_health->enabled)
1b2b9183
MC
2217 fw_health->last_fw_heartbeat =
2218 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
eca4cf12
MC
2219 fw_health->last_fw_reset_cnt =
2220 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
f4d95c3c
MC
2221 netif_info(bp, drv, bp->dev,
2222 "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
2223 fw_health->master, fw_health->last_fw_reset_cnt,
2224 bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
1b2b9183
MC
2225 if (!fw_health->enabled) {
2226 /* Make sure tmr_counter is set and visible to
2227 * bnxt_health_check() before setting enabled to true.
2228 */
2229 smp_wmb();
2230 fw_health->enabled = true;
2231 }
7e914027
MC
2232 goto async_event_process_exit;
2233 }
a44daa8f 2234 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
871127e6
MC
2235 netif_notice(bp, hw, bp->dev,
2236 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2237 data1, data2);
a44daa8f 2238 goto async_event_process_exit;
8d4bd96b 2239 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
8d4bd96b
MC
2240 struct bnxt_rx_ring_info *rxr;
2241 u16 grp_idx;
2242
2243 if (bp->flags & BNXT_FLAG_CHIP_P5)
2244 goto async_event_process_exit;
2245
2246 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2247 BNXT_EVENT_RING_TYPE(data2), data1);
2248 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2249 goto async_event_process_exit;
2250
2251 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2252 if (grp_idx == INVALID_HW_RING_ID) {
2253 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2254 data1);
2255 goto async_event_process_exit;
2256 }
2257 rxr = bp->bnapi[grp_idx]->rx_ring;
2258 bnxt_sched_reset(bp, rxr);
2259 goto async_event_process_exit;
2260 }
df97b34d
MC
2261 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2262 struct bnxt_fw_health *fw_health = bp->fw_health;
2263
2264 netif_notice(bp, hw, bp->dev,
2265 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2266 data1, data2);
2267 if (fw_health) {
2268 fw_health->echo_req_data1 = data1;
2269 fw_health->echo_req_data2 = data2;
2270 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2271 break;
2272 }
2273 goto async_event_process_exit;
2274 }
099fdeda
PC
2275 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2276 bnxt_ptp_pps_event(bp, data1, data2);
abf90ac2
PC
2277 goto async_event_process_exit;
2278 }
2279 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2280 bnxt_event_error_report(bp, data1, data2);
099fdeda
PC
2281 goto async_event_process_exit;
2282 }
68f684e2
EP
2283 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2284 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2285
2286 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2287 goto async_event_process_exit;
2288 }
c0c050c5 2289 default:
19241368 2290 goto async_event_process_exit;
c0c050c5 2291 }
c213eae8 2292 bnxt_queue_sp_work(bp);
19241368 2293async_event_process_exit:
a588e458 2294 bnxt_ulp_async_events(bp, cmpl);
c0c050c5
MC
2295 return 0;
2296}
2297
2298static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2299{
2300 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2301 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2302 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2303 (struct hwrm_fwd_req_cmpl *)txcmp;
2304
2305 switch (cmpl_type) {
2306 case CMPL_BASE_TYPE_HWRM_DONE:
2307 seq_id = le16_to_cpu(h_cmpl->sequence_id);
68f684e2 2308 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
c0c050c5
MC
2309 break;
2310
2311 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2312 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2313
2314 if ((vf_id < bp->pf.first_vf_id) ||
2315 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2316 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2317 vf_id);
2318 return -EINVAL;
2319 }
2320
2321 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2322 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
c213eae8 2323 bnxt_queue_sp_work(bp);
c0c050c5
MC
2324 break;
2325
2326 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2327 bnxt_async_event_process(bp,
2328 (struct hwrm_async_event_cmpl *)txcmp);
cc9fd180 2329 break;
c0c050c5
MC
2330
2331 default:
2332 break;
2333 }
2334
2335 return 0;
2336}
2337
2338static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2339{
2340 struct bnxt_napi *bnapi = dev_instance;
2341 struct bnxt *bp = bnapi->bp;
2342 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2343 u32 cons = RING_CMP(cpr->cp_raw_cons);
2344
6a8788f2 2345 cpr->event_ctr++;
c0c050c5
MC
2346 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2347 napi_schedule(&bnapi->napi);
2348 return IRQ_HANDLED;
2349}
2350
2351static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2352{
2353 u32 raw_cons = cpr->cp_raw_cons;
2354 u16 cons = RING_CMP(raw_cons);
2355 struct tx_cmp *txcmp;
2356
2357 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2358
2359 return TX_CMP_VALID(txcmp, raw_cons);
2360}
2361
c0c050c5
MC
2362static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2363{
2364 struct bnxt_napi *bnapi = dev_instance;
2365 struct bnxt *bp = bnapi->bp;
2366 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2367 u32 cons = RING_CMP(cpr->cp_raw_cons);
2368 u32 int_status;
2369
2370 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2371
2372 if (!bnxt_has_work(bp, cpr)) {
11809490 2373 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
c0c050c5
MC
2374 /* return if erroneous interrupt */
2375 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2376 return IRQ_NONE;
2377 }
2378
2379 /* disable ring IRQ */
697197e5 2380 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
c0c050c5
MC
2381
2382 /* Return here if interrupt is shared and is disabled. */
2383 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2384 return IRQ_HANDLED;
2385
2386 napi_schedule(&bnapi->napi);
2387 return IRQ_HANDLED;
2388}
2389
3675b92f
MC
2390static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2391 int budget)
c0c050c5 2392{
e44758b7 2393 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5
MC
2394 u32 raw_cons = cpr->cp_raw_cons;
2395 u32 cons;
2396 int tx_pkts = 0;
2397 int rx_pkts = 0;
4e5dbbda 2398 u8 event = 0;
c0c050c5
MC
2399 struct tx_cmp *txcmp;
2400
0fcec985 2401 cpr->has_more_work = 0;
340ac85e 2402 cpr->had_work_done = 1;
c0c050c5
MC
2403 while (1) {
2404 int rc;
2405
2406 cons = RING_CMP(raw_cons);
2407 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2408
2409 if (!TX_CMP_VALID(txcmp, raw_cons))
2410 break;
2411
67a95e20
MC
2412 /* The valid test of the entry must be done first before
2413 * reading any further.
2414 */
b67daab0 2415 dma_rmb();
c0c050c5
MC
2416 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2417 tx_pkts++;
2418 /* return full budget so NAPI will complete. */
5bed8b07 2419 if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
c0c050c5 2420 rx_pkts = budget;
73f21c65 2421 raw_cons = NEXT_RAW_CMP(raw_cons);
0fcec985
MC
2422 if (budget)
2423 cpr->has_more_work = 1;
73f21c65
MC
2424 break;
2425 }
c0c050c5 2426 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2270bc5d 2427 if (likely(budget))
e44758b7 2428 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2270bc5d 2429 else
e44758b7 2430 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2270bc5d 2431 &event);
c0c050c5
MC
2432 if (likely(rc >= 0))
2433 rx_pkts += rc;
903649e7
MC
2434 /* Increment rx_pkts when rc is -ENOMEM to count towards
2435 * the NAPI budget. Otherwise, we may potentially loop
2436 * here forever if we consistently cannot allocate
2437 * buffers.
2438 */
2edbdb31 2439 else if (rc == -ENOMEM && budget)
903649e7 2440 rx_pkts++;
c0c050c5
MC
2441 else if (rc == -EBUSY) /* partial completion */
2442 break;
c0c050c5
MC
2443 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2444 CMPL_BASE_TYPE_HWRM_DONE) ||
2445 (TX_CMP_TYPE(txcmp) ==
2446 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2447 (TX_CMP_TYPE(txcmp) ==
2448 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2449 bnxt_hwrm_handler(bp, txcmp);
2450 }
2451 raw_cons = NEXT_RAW_CMP(raw_cons);
2452
0fcec985
MC
2453 if (rx_pkts && rx_pkts == budget) {
2454 cpr->has_more_work = 1;
c0c050c5 2455 break;
0fcec985 2456 }
c0c050c5
MC
2457 }
2458
f18c2b77
AG
2459 if (event & BNXT_REDIRECT_EVENT)
2460 xdp_do_flush_map();
2461
38413406
MC
2462 if (event & BNXT_TX_EVENT) {
2463 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
38413406
MC
2464 u16 prod = txr->tx_prod;
2465
2466 /* Sync BD data before updating doorbell */
2467 wmb();
2468
697197e5 2469 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
38413406
MC
2470 }
2471
c0c050c5 2472 cpr->cp_raw_cons = raw_cons;
3675b92f
MC
2473 bnapi->tx_pkts += tx_pkts;
2474 bnapi->events |= event;
2475 return rx_pkts;
2476}
c0c050c5 2477
3675b92f
MC
2478static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2479{
2480 if (bnapi->tx_pkts) {
2481 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2482 bnapi->tx_pkts = 0;
2483 }
c0c050c5 2484
8fbf58e1 2485 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
b6ab4b01 2486 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5 2487
3675b92f 2488 if (bnapi->events & BNXT_AGG_EVENT)
697197e5 2489 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
e8f267b0 2490 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
c0c050c5 2491 }
3675b92f
MC
2492 bnapi->events = 0;
2493}
2494
2495static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2496 int budget)
2497{
2498 struct bnxt_napi *bnapi = cpr->bnapi;
2499 int rx_pkts;
2500
2501 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2502
2503 /* ACK completion ring before freeing tx ring and producing new
2504 * buffers in rx/agg rings to prevent overflowing the completion
2505 * ring.
2506 */
2507 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2508
2509 __bnxt_poll_work_done(bp, bnapi);
c0c050c5
MC
2510 return rx_pkts;
2511}
2512
10bbdaf5
PS
2513static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2514{
2515 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2516 struct bnxt *bp = bnapi->bp;
2517 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2518 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2519 struct tx_cmp *txcmp;
2520 struct rx_cmp_ext *rxcmp1;
2521 u32 cp_cons, tmp_raw_cons;
2522 u32 raw_cons = cpr->cp_raw_cons;
2523 u32 rx_pkts = 0;
4e5dbbda 2524 u8 event = 0;
10bbdaf5
PS
2525
2526 while (1) {
2527 int rc;
2528
2529 cp_cons = RING_CMP(raw_cons);
2530 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2531
2532 if (!TX_CMP_VALID(txcmp, raw_cons))
2533 break;
2534
828affc2
MC
2535 /* The valid test of the entry must be done first before
2536 * reading any further.
2537 */
2538 dma_rmb();
10bbdaf5
PS
2539 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2540 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2541 cp_cons = RING_CMP(tmp_raw_cons);
2542 rxcmp1 = (struct rx_cmp_ext *)
2543 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2544
2545 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2546 break;
2547
2548 /* force an error to recycle the buffer */
2549 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2550 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2551
e44758b7 2552 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2edbdb31 2553 if (likely(rc == -EIO) && budget)
10bbdaf5
PS
2554 rx_pkts++;
2555 else if (rc == -EBUSY) /* partial completion */
2556 break;
2557 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2558 CMPL_BASE_TYPE_HWRM_DONE)) {
2559 bnxt_hwrm_handler(bp, txcmp);
2560 } else {
2561 netdev_err(bp->dev,
2562 "Invalid completion received on special ring\n");
2563 }
2564 raw_cons = NEXT_RAW_CMP(raw_cons);
2565
2566 if (rx_pkts == budget)
2567 break;
2568 }
2569
2570 cpr->cp_raw_cons = raw_cons;
697197e5
MC
2571 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2572 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
10bbdaf5 2573
434c975a 2574 if (event & BNXT_AGG_EVENT)
697197e5 2575 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
10bbdaf5
PS
2576
2577 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
6ad20165 2578 napi_complete_done(napi, rx_pkts);
697197e5 2579 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
10bbdaf5
PS
2580 }
2581 return rx_pkts;
2582}
2583
c0c050c5
MC
2584static int bnxt_poll(struct napi_struct *napi, int budget)
2585{
2586 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2587 struct bnxt *bp = bnapi->bp;
2588 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2589 int work_done = 0;
2590
0da65f49
MC
2591 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2592 napi_complete(napi);
2593 return 0;
2594 }
c0c050c5 2595 while (1) {
e44758b7 2596 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
c0c050c5 2597
73f21c65
MC
2598 if (work_done >= budget) {
2599 if (!budget)
697197e5 2600 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
c0c050c5 2601 break;
73f21c65 2602 }
c0c050c5
MC
2603
2604 if (!bnxt_has_work(bp, cpr)) {
e7b95691 2605 if (napi_complete_done(napi, work_done))
697197e5 2606 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
c0c050c5
MC
2607 break;
2608 }
2609 }
6a8788f2 2610 if (bp->flags & BNXT_FLAG_DIM) {
f06d0ca4 2611 struct dim_sample dim_sample = {};
6a8788f2 2612
8960b389
TG
2613 dim_update_sample(cpr->event_ctr,
2614 cpr->rx_packets,
2615 cpr->rx_bytes,
2616 &dim_sample);
6a8788f2
AG
2617 net_dim(&cpr->dim, dim_sample);
2618 }
c0c050c5
MC
2619 return work_done;
2620}
2621
0fcec985
MC
2622static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2623{
2624 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2625 int i, work_done = 0;
2626
2627 for (i = 0; i < 2; i++) {
2628 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2629
2630 if (cpr2) {
2631 work_done += __bnxt_poll_work(bp, cpr2,
2632 budget - work_done);
2633 cpr->has_more_work |= cpr2->has_more_work;
2634 }
2635 }
2636 return work_done;
2637}
2638
2639static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
340ac85e 2640 u64 dbr_type)
0fcec985
MC
2641{
2642 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2643 int i;
2644
2645 for (i = 0; i < 2; i++) {
2646 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2647 struct bnxt_db_info *db;
2648
340ac85e 2649 if (cpr2 && cpr2->had_work_done) {
0fcec985 2650 db = &cpr2->cp_db;
c6132f6f
MC
2651 bnxt_writeq(bp, db->db_key64 | dbr_type |
2652 RING_CMP(cpr2->cp_raw_cons), db->doorbell);
0fcec985
MC
2653 cpr2->had_work_done = 0;
2654 }
2655 }
2656 __bnxt_poll_work_done(bp, bnapi);
2657}
2658
2659static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2660{
2661 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2662 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2663 u32 raw_cons = cpr->cp_raw_cons;
2664 struct bnxt *bp = bnapi->bp;
2665 struct nqe_cn *nqcmp;
2666 int work_done = 0;
2667 u32 cons;
2668
0da65f49
MC
2669 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2670 napi_complete(napi);
2671 return 0;
2672 }
0fcec985
MC
2673 if (cpr->has_more_work) {
2674 cpr->has_more_work = 0;
2675 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
0fcec985
MC
2676 }
2677 while (1) {
2678 cons = RING_CMP(raw_cons);
2679 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2680
2681 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
54a9062f
MC
2682 if (cpr->has_more_work)
2683 break;
2684
340ac85e 2685 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
0fcec985
MC
2686 cpr->cp_raw_cons = raw_cons;
2687 if (napi_complete_done(napi, work_done))
2688 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2689 cpr->cp_raw_cons);
2690 return work_done;
2691 }
2692
2693 /* The valid test of the entry must be done first before
2694 * reading any further.
2695 */
2696 dma_rmb();
2697
2698 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2699 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2700 struct bnxt_cp_ring_info *cpr2;
2701
2702 cpr2 = cpr->cp_ring_arr[idx];
2703 work_done += __bnxt_poll_work(bp, cpr2,
2704 budget - work_done);
54a9062f 2705 cpr->has_more_work |= cpr2->has_more_work;
0fcec985
MC
2706 } else {
2707 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2708 }
2709 raw_cons = NEXT_RAW_CMP(raw_cons);
0fcec985 2710 }
340ac85e 2711 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
389a877a
MC
2712 if (raw_cons != cpr->cp_raw_cons) {
2713 cpr->cp_raw_cons = raw_cons;
2714 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2715 }
0fcec985
MC
2716 return work_done;
2717}
2718
c0c050c5
MC
2719static void bnxt_free_tx_skbs(struct bnxt *bp)
2720{
2721 int i, max_idx;
2722 struct pci_dev *pdev = bp->pdev;
2723
b6ab4b01 2724 if (!bp->tx_ring)
c0c050c5
MC
2725 return;
2726
2727 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2728 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2729 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
2730 int j;
2731
1affc01f
EP
2732 if (!txr->tx_buf_ring)
2733 continue;
2734
c0c050c5
MC
2735 for (j = 0; j < max_idx;) {
2736 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
f18c2b77 2737 struct sk_buff *skb;
c0c050c5
MC
2738 int k, last;
2739
f18c2b77
AG
2740 if (i < bp->tx_nr_rings_xdp &&
2741 tx_buf->action == XDP_REDIRECT) {
2742 dma_unmap_single(&pdev->dev,
2743 dma_unmap_addr(tx_buf, mapping),
2744 dma_unmap_len(tx_buf, len),
df70303d 2745 DMA_TO_DEVICE);
f18c2b77
AG
2746 xdp_return_frame(tx_buf->xdpf);
2747 tx_buf->action = 0;
2748 tx_buf->xdpf = NULL;
2749 j++;
2750 continue;
2751 }
2752
2753 skb = tx_buf->skb;
c0c050c5
MC
2754 if (!skb) {
2755 j++;
2756 continue;
2757 }
2758
2759 tx_buf->skb = NULL;
2760
2761 if (tx_buf->is_push) {
2762 dev_kfree_skb(skb);
2763 j += 2;
2764 continue;
2765 }
2766
2767 dma_unmap_single(&pdev->dev,
2768 dma_unmap_addr(tx_buf, mapping),
2769 skb_headlen(skb),
df70303d 2770 DMA_TO_DEVICE);
c0c050c5
MC
2771
2772 last = tx_buf->nr_frags;
2773 j += 2;
d612a579
MC
2774 for (k = 0; k < last; k++, j++) {
2775 int ring_idx = j & bp->tx_ring_mask;
c0c050c5
MC
2776 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2777
d612a579 2778 tx_buf = &txr->tx_buf_ring[ring_idx];
c0c050c5
MC
2779 dma_unmap_page(
2780 &pdev->dev,
2781 dma_unmap_addr(tx_buf, mapping),
df70303d 2782 skb_frag_size(frag), DMA_TO_DEVICE);
c0c050c5
MC
2783 }
2784 dev_kfree_skb(skb);
2785 }
2786 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2787 }
2788}
2789
975bc99a 2790static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
c0c050c5 2791{
975bc99a 2792 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
c0c050c5 2793 struct pci_dev *pdev = bp->pdev;
975bc99a
MC
2794 struct bnxt_tpa_idx_map *map;
2795 int i, max_idx, max_agg_idx;
c0c050c5
MC
2796
2797 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2798 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
975bc99a
MC
2799 if (!rxr->rx_tpa)
2800 goto skip_rx_tpa_free;
c0c050c5 2801
975bc99a
MC
2802 for (i = 0; i < bp->max_tpa; i++) {
2803 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2804 u8 *data = tpa_info->data;
c0c050c5 2805
975bc99a
MC
2806 if (!data)
2807 continue;
c0c050c5 2808
975bc99a
MC
2809 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2810 bp->rx_buf_use_size, bp->rx_dir,
2811 DMA_ATTR_WEAK_ORDERING);
c0c050c5 2812
975bc99a 2813 tpa_info->data = NULL;
c0c050c5 2814
975bc99a
MC
2815 kfree(data);
2816 }
c0c050c5 2817
975bc99a 2818skip_rx_tpa_free:
1affc01f
EP
2819 if (!rxr->rx_buf_ring)
2820 goto skip_rx_buf_free;
2821
975bc99a
MC
2822 for (i = 0; i < max_idx; i++) {
2823 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2824 dma_addr_t mapping = rx_buf->mapping;
2825 void *data = rx_buf->data;
c0c050c5 2826
975bc99a
MC
2827 if (!data)
2828 continue;
c0c050c5 2829
975bc99a
MC
2830 rx_buf->data = NULL;
2831 if (BNXT_RX_PAGE_MODE(bp)) {
2832 mapping -= bp->rx_dma_offset;
2833 dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
2834 bp->rx_dir,
2835 DMA_ATTR_WEAK_ORDERING);
2836 page_pool_recycle_direct(rxr->page_pool, data);
2837 } else {
2838 dma_unmap_single_attrs(&pdev->dev, mapping,
2839 bp->rx_buf_use_size, bp->rx_dir,
2840 DMA_ATTR_WEAK_ORDERING);
2841 kfree(data);
c0c050c5 2842 }
975bc99a 2843 }
1affc01f
EP
2844
2845skip_rx_buf_free:
2846 if (!rxr->rx_agg_ring)
2847 goto skip_rx_agg_free;
2848
975bc99a
MC
2849 for (i = 0; i < max_agg_idx; i++) {
2850 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2851 struct page *page = rx_agg_buf->page;
c0c050c5 2852
975bc99a
MC
2853 if (!page)
2854 continue;
c0c050c5 2855
975bc99a 2856 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
df70303d 2857 BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
975bc99a 2858 DMA_ATTR_WEAK_ORDERING);
c0c050c5 2859
975bc99a
MC
2860 rx_agg_buf->page = NULL;
2861 __clear_bit(i, rxr->rx_agg_bmap);
c0c050c5 2862
975bc99a
MC
2863 __free_page(page);
2864 }
1affc01f
EP
2865
2866skip_rx_agg_free:
975bc99a
MC
2867 if (rxr->rx_page) {
2868 __free_page(rxr->rx_page);
2869 rxr->rx_page = NULL;
c0c050c5 2870 }
975bc99a
MC
2871 map = rxr->rx_tpa_idx_map;
2872 if (map)
2873 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2874}
2875
2876static void bnxt_free_rx_skbs(struct bnxt *bp)
2877{
2878 int i;
2879
2880 if (!bp->rx_ring)
2881 return;
2882
2883 for (i = 0; i < bp->rx_nr_rings; i++)
2884 bnxt_free_one_rx_ring_skbs(bp, i);
c0c050c5
MC
2885}
2886
2887static void bnxt_free_skbs(struct bnxt *bp)
2888{
2889 bnxt_free_tx_skbs(bp);
2890 bnxt_free_rx_skbs(bp);
2891}
2892
41435c39
MC
2893static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
2894{
2895 u8 init_val = mem_init->init_val;
2896 u16 offset = mem_init->offset;
2897 u8 *p2 = p;
2898 int i;
2899
2900 if (!init_val)
2901 return;
2902 if (offset == BNXT_MEM_INVALID_OFFSET) {
2903 memset(p, init_val, len);
2904 return;
2905 }
2906 for (i = 0; i < len; i += mem_init->size)
2907 *(p2 + i + offset) = init_val;
2908}
2909
6fe19886 2910static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
c0c050c5
MC
2911{
2912 struct pci_dev *pdev = bp->pdev;
2913 int i;
2914
985941e1
MC
2915 if (!rmem->pg_arr)
2916 goto skip_pages;
2917
6fe19886
MC
2918 for (i = 0; i < rmem->nr_pages; i++) {
2919 if (!rmem->pg_arr[i])
c0c050c5
MC
2920 continue;
2921
6fe19886
MC
2922 dma_free_coherent(&pdev->dev, rmem->page_size,
2923 rmem->pg_arr[i], rmem->dma_arr[i]);
c0c050c5 2924
6fe19886 2925 rmem->pg_arr[i] = NULL;
c0c050c5 2926 }
985941e1 2927skip_pages:
6fe19886 2928 if (rmem->pg_tbl) {
4f49b2b8
MC
2929 size_t pg_tbl_size = rmem->nr_pages * 8;
2930
2931 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2932 pg_tbl_size = rmem->page_size;
2933 dma_free_coherent(&pdev->dev, pg_tbl_size,
6fe19886
MC
2934 rmem->pg_tbl, rmem->pg_tbl_map);
2935 rmem->pg_tbl = NULL;
c0c050c5 2936 }
6fe19886
MC
2937 if (rmem->vmem_size && *rmem->vmem) {
2938 vfree(*rmem->vmem);
2939 *rmem->vmem = NULL;
c0c050c5
MC
2940 }
2941}
2942
6fe19886 2943static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
c0c050c5 2944{
c0c050c5 2945 struct pci_dev *pdev = bp->pdev;
66cca20a 2946 u64 valid_bit = 0;
6fe19886 2947 int i;
c0c050c5 2948
66cca20a
MC
2949 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2950 valid_bit = PTU_PTE_VALID;
4f49b2b8
MC
2951 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2952 size_t pg_tbl_size = rmem->nr_pages * 8;
2953
2954 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2955 pg_tbl_size = rmem->page_size;
2956 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
6fe19886 2957 &rmem->pg_tbl_map,
c0c050c5 2958 GFP_KERNEL);
6fe19886 2959 if (!rmem->pg_tbl)
c0c050c5
MC
2960 return -ENOMEM;
2961 }
2962
6fe19886 2963 for (i = 0; i < rmem->nr_pages; i++) {
66cca20a
MC
2964 u64 extra_bits = valid_bit;
2965
6fe19886
MC
2966 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2967 rmem->page_size,
2968 &rmem->dma_arr[i],
c0c050c5 2969 GFP_KERNEL);
6fe19886 2970 if (!rmem->pg_arr[i])
c0c050c5
MC
2971 return -ENOMEM;
2972
41435c39
MC
2973 if (rmem->mem_init)
2974 bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
2975 rmem->page_size);
4f49b2b8 2976 if (rmem->nr_pages > 1 || rmem->depth > 0) {
66cca20a
MC
2977 if (i == rmem->nr_pages - 2 &&
2978 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2979 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2980 else if (i == rmem->nr_pages - 1 &&
2981 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2982 extra_bits |= PTU_PTE_LAST;
2983 rmem->pg_tbl[i] =
2984 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2985 }
c0c050c5
MC
2986 }
2987
6fe19886
MC
2988 if (rmem->vmem_size) {
2989 *rmem->vmem = vzalloc(rmem->vmem_size);
2990 if (!(*rmem->vmem))
c0c050c5
MC
2991 return -ENOMEM;
2992 }
2993 return 0;
2994}
2995
4a228a3a
MC
2996static void bnxt_free_tpa_info(struct bnxt *bp)
2997{
2998 int i;
2999
3000 for (i = 0; i < bp->rx_nr_rings; i++) {
3001 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3002
ec4d8e7c
MC
3003 kfree(rxr->rx_tpa_idx_map);
3004 rxr->rx_tpa_idx_map = NULL;
79632e9b
MC
3005 if (rxr->rx_tpa) {
3006 kfree(rxr->rx_tpa[0].agg_arr);
3007 rxr->rx_tpa[0].agg_arr = NULL;
3008 }
4a228a3a
MC
3009 kfree(rxr->rx_tpa);
3010 rxr->rx_tpa = NULL;
3011 }
3012}
3013
3014static int bnxt_alloc_tpa_info(struct bnxt *bp)
3015{
79632e9b
MC
3016 int i, j, total_aggs = 0;
3017
3018 bp->max_tpa = MAX_TPA;
3019 if (bp->flags & BNXT_FLAG_CHIP_P5) {
3020 if (!bp->max_tpa_v2)
3021 return 0;
3022 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3023 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
3024 }
4a228a3a
MC
3025
3026 for (i = 0; i < bp->rx_nr_rings; i++) {
3027 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
79632e9b 3028 struct rx_agg_cmp *agg;
4a228a3a 3029
79632e9b 3030 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
4a228a3a
MC
3031 GFP_KERNEL);
3032 if (!rxr->rx_tpa)
3033 return -ENOMEM;
79632e9b
MC
3034
3035 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3036 continue;
3037 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
3038 rxr->rx_tpa[0].agg_arr = agg;
3039 if (!agg)
3040 return -ENOMEM;
3041 for (j = 1; j < bp->max_tpa; j++)
3042 rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
ec4d8e7c
MC
3043 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3044 GFP_KERNEL);
3045 if (!rxr->rx_tpa_idx_map)
3046 return -ENOMEM;
4a228a3a
MC
3047 }
3048 return 0;
3049}
3050
c0c050c5
MC
3051static void bnxt_free_rx_rings(struct bnxt *bp)
3052{
3053 int i;
3054
b6ab4b01 3055 if (!bp->rx_ring)
c0c050c5
MC
3056 return;
3057
4a228a3a 3058 bnxt_free_tpa_info(bp);
c0c050c5 3059 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 3060 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
3061 struct bnxt_ring_struct *ring;
3062
c6d30e83
MC
3063 if (rxr->xdp_prog)
3064 bpf_prog_put(rxr->xdp_prog);
3065
96a8604f
JDB
3066 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3067 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3068
12479f62 3069 page_pool_destroy(rxr->page_pool);
322b87ca
AG
3070 rxr->page_pool = NULL;
3071
c0c050c5
MC
3072 kfree(rxr->rx_agg_bmap);
3073 rxr->rx_agg_bmap = NULL;
3074
3075 ring = &rxr->rx_ring_struct;
6fe19886 3076 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
3077
3078 ring = &rxr->rx_agg_ring_struct;
6fe19886 3079 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
3080 }
3081}
3082
322b87ca
AG
3083static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3084 struct bnxt_rx_ring_info *rxr)
3085{
3086 struct page_pool_params pp = { 0 };
3087
3088 pp.pool_size = bp->rx_ring_size;
3089 pp.nid = dev_to_node(&bp->pdev->dev);
3090 pp.dev = &bp->pdev->dev;
3091 pp.dma_dir = DMA_BIDIRECTIONAL;
3092
3093 rxr->page_pool = page_pool_create(&pp);
3094 if (IS_ERR(rxr->page_pool)) {
3095 int err = PTR_ERR(rxr->page_pool);
3096
3097 rxr->page_pool = NULL;
3098 return err;
3099 }
3100 return 0;
3101}
3102
c0c050c5
MC
3103static int bnxt_alloc_rx_rings(struct bnxt *bp)
3104{
4a228a3a 3105 int i, rc = 0, agg_rings = 0;
c0c050c5 3106
b6ab4b01
MC
3107 if (!bp->rx_ring)
3108 return -ENOMEM;
3109
c0c050c5
MC
3110 if (bp->flags & BNXT_FLAG_AGG_RINGS)
3111 agg_rings = 1;
3112
c0c050c5 3113 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 3114 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
3115 struct bnxt_ring_struct *ring;
3116
c0c050c5
MC
3117 ring = &rxr->rx_ring_struct;
3118
322b87ca
AG
3119 rc = bnxt_alloc_rx_page_pool(bp, rxr);
3120 if (rc)
3121 return rc;
3122
b02e5a0e 3123 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
12479f62 3124 if (rc < 0)
96a8604f
JDB
3125 return rc;
3126
f18c2b77 3127 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
322b87ca
AG
3128 MEM_TYPE_PAGE_POOL,
3129 rxr->page_pool);
f18c2b77
AG
3130 if (rc) {
3131 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3132 return rc;
3133 }
3134
6fe19886 3135 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
3136 if (rc)
3137 return rc;
3138
2c61d211 3139 ring->grp_idx = i;
c0c050c5
MC
3140 if (agg_rings) {
3141 u16 mem_size;
3142
3143 ring = &rxr->rx_agg_ring_struct;
6fe19886 3144 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
3145 if (rc)
3146 return rc;
3147
9899bb59 3148 ring->grp_idx = i;
c0c050c5
MC
3149 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3150 mem_size = rxr->rx_agg_bmap_size / 8;
3151 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3152 if (!rxr->rx_agg_bmap)
3153 return -ENOMEM;
c0c050c5
MC
3154 }
3155 }
4a228a3a
MC
3156 if (bp->flags & BNXT_FLAG_TPA)
3157 rc = bnxt_alloc_tpa_info(bp);
3158 return rc;
c0c050c5
MC
3159}
3160
3161static void bnxt_free_tx_rings(struct bnxt *bp)
3162{
3163 int i;
3164 struct pci_dev *pdev = bp->pdev;
3165
b6ab4b01 3166 if (!bp->tx_ring)
c0c050c5
MC
3167 return;
3168
3169 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 3170 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
3171 struct bnxt_ring_struct *ring;
3172
c0c050c5
MC
3173 if (txr->tx_push) {
3174 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3175 txr->tx_push, txr->tx_push_mapping);
3176 txr->tx_push = NULL;
3177 }
3178
3179 ring = &txr->tx_ring_struct;
3180
6fe19886 3181 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
3182 }
3183}
3184
3185static int bnxt_alloc_tx_rings(struct bnxt *bp)
3186{
3187 int i, j, rc;
3188 struct pci_dev *pdev = bp->pdev;
3189
3190 bp->tx_push_size = 0;
3191 if (bp->tx_push_thresh) {
3192 int push_size;
3193
3194 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3195 bp->tx_push_thresh);
3196
4419dbe6 3197 if (push_size > 256) {
c0c050c5
MC
3198 push_size = 0;
3199 bp->tx_push_thresh = 0;
3200 }
3201
3202 bp->tx_push_size = push_size;
3203 }
3204
3205 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 3206 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5 3207 struct bnxt_ring_struct *ring;
2e8ef77e 3208 u8 qidx;
c0c050c5 3209
c0c050c5
MC
3210 ring = &txr->tx_ring_struct;
3211
6fe19886 3212 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
3213 if (rc)
3214 return rc;
3215
9899bb59 3216 ring->grp_idx = txr->bnapi->index;
c0c050c5 3217 if (bp->tx_push_size) {
c0c050c5
MC
3218 dma_addr_t mapping;
3219
3220 /* One pre-allocated DMA buffer to backup
3221 * TX push operation
3222 */
3223 txr->tx_push = dma_alloc_coherent(&pdev->dev,
3224 bp->tx_push_size,
3225 &txr->tx_push_mapping,
3226 GFP_KERNEL);
3227
3228 if (!txr->tx_push)
3229 return -ENOMEM;
3230
c0c050c5
MC
3231 mapping = txr->tx_push_mapping +
3232 sizeof(struct tx_push_bd);
4419dbe6 3233 txr->data_mapping = cpu_to_le64(mapping);
c0c050c5 3234 }
2e8ef77e
MC
3235 qidx = bp->tc_to_qidx[j];
3236 ring->queue_id = bp->q_info[qidx].queue_id;
5f449249
MC
3237 if (i < bp->tx_nr_rings_xdp)
3238 continue;
c0c050c5
MC
3239 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3240 j++;
3241 }
3242 return 0;
3243}
3244
03c74487
MC
3245static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
3246{
985941e1
MC
3247 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3248
03c74487
MC
3249 kfree(cpr->cp_desc_ring);
3250 cpr->cp_desc_ring = NULL;
985941e1 3251 ring->ring_mem.pg_arr = NULL;
03c74487
MC
3252 kfree(cpr->cp_desc_mapping);
3253 cpr->cp_desc_mapping = NULL;
985941e1 3254 ring->ring_mem.dma_arr = NULL;
03c74487
MC
3255}
3256
3257static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
3258{
3259 cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
3260 if (!cpr->cp_desc_ring)
3261 return -ENOMEM;
3262 cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
3263 GFP_KERNEL);
3264 if (!cpr->cp_desc_mapping)
3265 return -ENOMEM;
3266 return 0;
3267}
3268
3269static void bnxt_free_all_cp_arrays(struct bnxt *bp)
3270{
3271 int i;
3272
3273 if (!bp->bnapi)
3274 return;
3275 for (i = 0; i < bp->cp_nr_rings; i++) {
3276 struct bnxt_napi *bnapi = bp->bnapi[i];
3277
3278 if (!bnapi)
3279 continue;
3280 bnxt_free_cp_arrays(&bnapi->cp_ring);
3281 }
3282}
3283
3284static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
3285{
3286 int i, n = bp->cp_nr_pages;
3287
3288 for (i = 0; i < bp->cp_nr_rings; i++) {
3289 struct bnxt_napi *bnapi = bp->bnapi[i];
3290 int rc;
3291
3292 if (!bnapi)
3293 continue;
3294 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
3295 if (rc)
3296 return rc;
3297 }
3298 return 0;
3299}
3300
c0c050c5
MC
3301static void bnxt_free_cp_rings(struct bnxt *bp)
3302{
3303 int i;
3304
3305 if (!bp->bnapi)
3306 return;
3307
3308 for (i = 0; i < bp->cp_nr_rings; i++) {
3309 struct bnxt_napi *bnapi = bp->bnapi[i];
3310 struct bnxt_cp_ring_info *cpr;
3311 struct bnxt_ring_struct *ring;
50e3ab78 3312 int j;
c0c050c5
MC
3313
3314 if (!bnapi)
3315 continue;
3316
3317 cpr = &bnapi->cp_ring;
3318 ring = &cpr->cp_ring_struct;
3319
6fe19886 3320 bnxt_free_ring(bp, &ring->ring_mem);
50e3ab78
MC
3321
3322 for (j = 0; j < 2; j++) {
3323 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3324
3325 if (cpr2) {
3326 ring = &cpr2->cp_ring_struct;
3327 bnxt_free_ring(bp, &ring->ring_mem);
03c74487 3328 bnxt_free_cp_arrays(cpr2);
50e3ab78
MC
3329 kfree(cpr2);
3330 cpr->cp_ring_arr[j] = NULL;
3331 }
3332 }
c0c050c5
MC
3333 }
3334}
3335
50e3ab78
MC
3336static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3337{
3338 struct bnxt_ring_mem_info *rmem;
3339 struct bnxt_ring_struct *ring;
3340 struct bnxt_cp_ring_info *cpr;
3341 int rc;
3342
3343 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3344 if (!cpr)
3345 return NULL;
3346
03c74487
MC
3347 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
3348 if (rc) {
3349 bnxt_free_cp_arrays(cpr);
3350 kfree(cpr);
3351 return NULL;
3352 }
50e3ab78
MC
3353 ring = &cpr->cp_ring_struct;
3354 rmem = &ring->ring_mem;
3355 rmem->nr_pages = bp->cp_nr_pages;
3356 rmem->page_size = HW_CMPD_RING_SIZE;
3357 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3358 rmem->dma_arr = cpr->cp_desc_mapping;
3359 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3360 rc = bnxt_alloc_ring(bp, rmem);
3361 if (rc) {
3362 bnxt_free_ring(bp, rmem);
03c74487 3363 bnxt_free_cp_arrays(cpr);
50e3ab78
MC
3364 kfree(cpr);
3365 cpr = NULL;
3366 }
3367 return cpr;
3368}
3369
c0c050c5
MC
3370static int bnxt_alloc_cp_rings(struct bnxt *bp)
3371{
50e3ab78 3372 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
e5811b8c 3373 int i, rc, ulp_base_vec, ulp_msix;
c0c050c5 3374
e5811b8c
MC
3375 ulp_msix = bnxt_get_ulp_msix_num(bp);
3376 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
c0c050c5
MC
3377 for (i = 0; i < bp->cp_nr_rings; i++) {
3378 struct bnxt_napi *bnapi = bp->bnapi[i];
3379 struct bnxt_cp_ring_info *cpr;
3380 struct bnxt_ring_struct *ring;
3381
3382 if (!bnapi)
3383 continue;
3384
3385 cpr = &bnapi->cp_ring;
50e3ab78 3386 cpr->bnapi = bnapi;
c0c050c5
MC
3387 ring = &cpr->cp_ring_struct;
3388
6fe19886 3389 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
3390 if (rc)
3391 return rc;
e5811b8c
MC
3392
3393 if (ulp_msix && i >= ulp_base_vec)
3394 ring->map_idx = i + ulp_msix;
3395 else
3396 ring->map_idx = i;
50e3ab78
MC
3397
3398 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3399 continue;
3400
3401 if (i < bp->rx_nr_rings) {
3402 struct bnxt_cp_ring_info *cpr2 =
3403 bnxt_alloc_cp_sub_ring(bp);
3404
3405 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3406 if (!cpr2)
3407 return -ENOMEM;
3408 cpr2->bnapi = bnapi;
3409 }
3410 if ((sh && i < bp->tx_nr_rings) ||
3411 (!sh && i >= bp->rx_nr_rings)) {
3412 struct bnxt_cp_ring_info *cpr2 =
3413 bnxt_alloc_cp_sub_ring(bp);
3414
3415 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3416 if (!cpr2)
3417 return -ENOMEM;
3418 cpr2->bnapi = bnapi;
3419 }
c0c050c5
MC
3420 }
3421 return 0;
3422}
3423
3424static void bnxt_init_ring_struct(struct bnxt *bp)
3425{
3426 int i;
3427
3428 for (i = 0; i < bp->cp_nr_rings; i++) {
3429 struct bnxt_napi *bnapi = bp->bnapi[i];
6fe19886 3430 struct bnxt_ring_mem_info *rmem;
c0c050c5
MC
3431 struct bnxt_cp_ring_info *cpr;
3432 struct bnxt_rx_ring_info *rxr;
3433 struct bnxt_tx_ring_info *txr;
3434 struct bnxt_ring_struct *ring;
3435
3436 if (!bnapi)
3437 continue;
3438
3439 cpr = &bnapi->cp_ring;
3440 ring = &cpr->cp_ring_struct;
6fe19886
MC
3441 rmem = &ring->ring_mem;
3442 rmem->nr_pages = bp->cp_nr_pages;
3443 rmem->page_size = HW_CMPD_RING_SIZE;
3444 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3445 rmem->dma_arr = cpr->cp_desc_mapping;
3446 rmem->vmem_size = 0;
c0c050c5 3447
b6ab4b01 3448 rxr = bnapi->rx_ring;
3b2b7d9d
MC
3449 if (!rxr)
3450 goto skip_rx;
3451
c0c050c5 3452 ring = &rxr->rx_ring_struct;
6fe19886
MC
3453 rmem = &ring->ring_mem;
3454 rmem->nr_pages = bp->rx_nr_pages;
3455 rmem->page_size = HW_RXBD_RING_SIZE;
3456 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3457 rmem->dma_arr = rxr->rx_desc_mapping;
3458 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3459 rmem->vmem = (void **)&rxr->rx_buf_ring;
c0c050c5
MC
3460
3461 ring = &rxr->rx_agg_ring_struct;
6fe19886
MC
3462 rmem = &ring->ring_mem;
3463 rmem->nr_pages = bp->rx_agg_nr_pages;
3464 rmem->page_size = HW_RXBD_RING_SIZE;
3465 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3466 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3467 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3468 rmem->vmem = (void **)&rxr->rx_agg_ring;
c0c050c5 3469
3b2b7d9d 3470skip_rx:
b6ab4b01 3471 txr = bnapi->tx_ring;
3b2b7d9d
MC
3472 if (!txr)
3473 continue;
3474
c0c050c5 3475 ring = &txr->tx_ring_struct;
6fe19886
MC
3476 rmem = &ring->ring_mem;
3477 rmem->nr_pages = bp->tx_nr_pages;
3478 rmem->page_size = HW_RXBD_RING_SIZE;
3479 rmem->pg_arr = (void **)txr->tx_desc_ring;
3480 rmem->dma_arr = txr->tx_desc_mapping;
3481 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3482 rmem->vmem = (void **)&txr->tx_buf_ring;
c0c050c5
MC
3483 }
3484}
3485
3486static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3487{
3488 int i;
3489 u32 prod;
3490 struct rx_bd **rx_buf_ring;
3491
6fe19886
MC
3492 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3493 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
c0c050c5
MC
3494 int j;
3495 struct rx_bd *rxbd;
3496
3497 rxbd = rx_buf_ring[i];
3498 if (!rxbd)
3499 continue;
3500
3501 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3502 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3503 rxbd->rx_bd_opaque = prod;
3504 }
3505 }
3506}
3507
7737d325 3508static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
c0c050c5 3509{
7737d325 3510 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
c0c050c5 3511 struct net_device *dev = bp->dev;
7737d325 3512 u32 prod;
c0c050c5
MC
3513 int i;
3514
c0c050c5
MC
3515 prod = rxr->rx_prod;
3516 for (i = 0; i < bp->rx_ring_size; i++) {
7737d325 3517 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
c0c050c5
MC
3518 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3519 ring_nr, i, bp->rx_ring_size);
3520 break;
3521 }
3522 prod = NEXT_RX(prod);
3523 }
3524 rxr->rx_prod = prod;
edd0c2cc 3525
c0c050c5
MC
3526 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3527 return 0;
3528
c0c050c5
MC
3529 prod = rxr->rx_agg_prod;
3530 for (i = 0; i < bp->rx_agg_ring_size; i++) {
7737d325 3531 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
c0c050c5
MC
3532 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3533 ring_nr, i, bp->rx_ring_size);
3534 break;
3535 }
3536 prod = NEXT_RX_AGG(prod);
3537 }
3538 rxr->rx_agg_prod = prod;
c0c050c5 3539
7737d325
MC
3540 if (rxr->rx_tpa) {
3541 dma_addr_t mapping;
3542 u8 *data;
c0c050c5 3543
7737d325
MC
3544 for (i = 0; i < bp->max_tpa; i++) {
3545 data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
3546 if (!data)
3547 return -ENOMEM;
c0c050c5 3548
7737d325
MC
3549 rxr->rx_tpa[i].data = data;
3550 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3551 rxr->rx_tpa[i].mapping = mapping;
c0c050c5
MC
3552 }
3553 }
c0c050c5
MC
3554 return 0;
3555}
3556
7737d325
MC
3557static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3558{
3559 struct bnxt_rx_ring_info *rxr;
3560 struct bnxt_ring_struct *ring;
3561 u32 type;
3562
3563 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3564 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3565
3566 if (NET_IP_ALIGN == 2)
3567 type |= RX_BD_FLAGS_SOP;
3568
3569 rxr = &bp->rx_ring[ring_nr];
3570 ring = &rxr->rx_ring_struct;
3571 bnxt_init_rxbd_pages(ring, type);
3572
3573 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3574 bpf_prog_add(bp->xdp_prog, 1);
3575 rxr->xdp_prog = bp->xdp_prog;
3576 }
3577 ring->fw_ring_id = INVALID_HW_RING_ID;
3578
3579 ring = &rxr->rx_agg_ring_struct;
3580 ring->fw_ring_id = INVALID_HW_RING_ID;
3581
3582 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3583 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3584 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3585
3586 bnxt_init_rxbd_pages(ring, type);
3587 }
3588
3589 return bnxt_alloc_one_rx_ring(bp, ring_nr);
3590}
3591
2247925f
SP
3592static void bnxt_init_cp_rings(struct bnxt *bp)
3593{
3e08b184 3594 int i, j;
2247925f
SP
3595
3596 for (i = 0; i < bp->cp_nr_rings; i++) {
3597 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3598 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3599
3600 ring->fw_ring_id = INVALID_HW_RING_ID;
6a8788f2
AG
3601 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3602 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3e08b184
MC
3603 for (j = 0; j < 2; j++) {
3604 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3605
3606 if (!cpr2)
3607 continue;
3608
3609 ring = &cpr2->cp_ring_struct;
3610 ring->fw_ring_id = INVALID_HW_RING_ID;
3611 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3612 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3613 }
2247925f
SP
3614 }
3615}
3616
c0c050c5
MC
3617static int bnxt_init_rx_rings(struct bnxt *bp)
3618{
3619 int i, rc = 0;
3620
c61fb99c 3621 if (BNXT_RX_PAGE_MODE(bp)) {
c6d30e83
MC
3622 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3623 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
c61fb99c
MC
3624 } else {
3625 bp->rx_offset = BNXT_RX_OFFSET;
3626 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3627 }
b3dba77c 3628
c0c050c5
MC
3629 for (i = 0; i < bp->rx_nr_rings; i++) {
3630 rc = bnxt_init_one_rx_ring(bp, i);
3631 if (rc)
3632 break;
3633 }
3634
3635 return rc;
3636}
3637
3638static int bnxt_init_tx_rings(struct bnxt *bp)
3639{
3640 u16 i;
3641
3642 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
5bed8b07 3643 BNXT_MIN_TX_DESC_CNT);
c0c050c5
MC
3644
3645 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 3646 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
3647 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3648
3649 ring->fw_ring_id = INVALID_HW_RING_ID;
3650 }
3651
3652 return 0;
3653}
3654
3655static void bnxt_free_ring_grps(struct bnxt *bp)
3656{
3657 kfree(bp->grp_info);
3658 bp->grp_info = NULL;
3659}
3660
3661static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3662{
3663 int i;
3664
3665 if (irq_re_init) {
3666 bp->grp_info = kcalloc(bp->cp_nr_rings,
3667 sizeof(struct bnxt_ring_grp_info),
3668 GFP_KERNEL);
3669 if (!bp->grp_info)
3670 return -ENOMEM;
3671 }
3672 for (i = 0; i < bp->cp_nr_rings; i++) {
3673 if (irq_re_init)
3674 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3675 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3676 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3677 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3678 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3679 }
3680 return 0;
3681}
3682
3683static void bnxt_free_vnics(struct bnxt *bp)
3684{
3685 kfree(bp->vnic_info);
3686 bp->vnic_info = NULL;
3687 bp->nr_vnics = 0;
3688}
3689
3690static int bnxt_alloc_vnics(struct bnxt *bp)
3691{
3692 int num_vnics = 1;
3693
3694#ifdef CONFIG_RFS_ACCEL
9b3d15e6 3695 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
c0c050c5
MC
3696 num_vnics += bp->rx_nr_rings;
3697#endif
3698
dc52c6c7
PS
3699 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3700 num_vnics++;
3701
c0c050c5
MC
3702 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3703 GFP_KERNEL);
3704 if (!bp->vnic_info)
3705 return -ENOMEM;
3706
3707 bp->nr_vnics = num_vnics;
3708 return 0;
3709}
3710
3711static void bnxt_init_vnics(struct bnxt *bp)
3712{
3713 int i;
3714
3715 for (i = 0; i < bp->nr_vnics; i++) {
3716 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
44c6f72a 3717 int j;
c0c050c5
MC
3718
3719 vnic->fw_vnic_id = INVALID_HW_RING_ID;
44c6f72a
MC
3720 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3721 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3722
c0c050c5
MC
3723 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3724
3725 if (bp->vnic_info[i].rss_hash_key) {
3726 if (i == 0)
3727 prandom_bytes(vnic->rss_hash_key,
3728 HW_HASH_KEY_SIZE);
3729 else
3730 memcpy(vnic->rss_hash_key,
3731 bp->vnic_info[0].rss_hash_key,
3732 HW_HASH_KEY_SIZE);
3733 }
3734 }
3735}
3736
3737static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3738{
3739 int pages;
3740
3741 pages = ring_size / desc_per_pg;
3742
3743 if (!pages)
3744 return 1;
3745
3746 pages++;
3747
3748 while (pages & (pages - 1))
3749 pages++;
3750
3751 return pages;
3752}
3753
c6d30e83 3754void bnxt_set_tpa_flags(struct bnxt *bp)
c0c050c5
MC
3755{
3756 bp->flags &= ~BNXT_FLAG_TPA;
341138c3
MC
3757 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3758 return;
c0c050c5
MC
3759 if (bp->dev->features & NETIF_F_LRO)
3760 bp->flags |= BNXT_FLAG_LRO;
1054aee8 3761 else if (bp->dev->features & NETIF_F_GRO_HW)
c0c050c5
MC
3762 bp->flags |= BNXT_FLAG_GRO;
3763}
3764
3765/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3766 * be set on entry.
3767 */
3768void bnxt_set_ring_params(struct bnxt *bp)
3769{
27640ce6 3770 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
c0c050c5
MC
3771 u32 agg_factor = 0, agg_ring_size = 0;
3772
3773 /* 8 for CRC and VLAN */
3774 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3775
3776 rx_space = rx_size + NET_SKB_PAD +
3777 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3778
3779 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3780 ring_size = bp->rx_ring_size;
3781 bp->rx_agg_ring_size = 0;
3782 bp->rx_agg_nr_pages = 0;
3783
3784 if (bp->flags & BNXT_FLAG_TPA)
2839f28b 3785 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
c0c050c5
MC
3786
3787 bp->flags &= ~BNXT_FLAG_JUMBO;
bdbd1eb5 3788 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
c0c050c5
MC
3789 u32 jumbo_factor;
3790
3791 bp->flags |= BNXT_FLAG_JUMBO;
3792 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3793 if (jumbo_factor > agg_factor)
3794 agg_factor = jumbo_factor;
3795 }
c1129b51
MC
3796 if (agg_factor) {
3797 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
3798 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
3799 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
3800 bp->rx_ring_size, ring_size);
3801 bp->rx_ring_size = ring_size;
3802 }
3803 agg_ring_size = ring_size * agg_factor;
c0c050c5 3804
c0c050c5
MC
3805 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3806 RX_DESC_CNT);
3807 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3808 u32 tmp = agg_ring_size;
3809
3810 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3811 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3812 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3813 tmp, agg_ring_size);
3814 }
3815 bp->rx_agg_ring_size = agg_ring_size;
3816 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3817 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3818 rx_space = rx_size + NET_SKB_PAD +
3819 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3820 }
3821
3822 bp->rx_buf_use_size = rx_size;
3823 bp->rx_buf_size = rx_space;
3824
3825 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3826 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3827
3828 ring_size = bp->tx_ring_size;
3829 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3830 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3831
27640ce6
MC
3832 max_rx_cmpl = bp->rx_ring_size;
3833 /* MAX TPA needs to be added because TPA_START completions are
3834 * immediately recycled, so the TPA completions are not bound by
3835 * the RX ring size.
3836 */
3837 if (bp->flags & BNXT_FLAG_TPA)
3838 max_rx_cmpl += bp->max_tpa;
3839 /* RX and TPA completions are 32-byte, all others are 16-byte */
3840 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
c0c050c5
MC
3841 bp->cp_ring_size = ring_size;
3842
3843 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3844 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3845 bp->cp_nr_pages = MAX_CP_PAGES;
3846 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3847 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3848 ring_size, bp->cp_ring_size);
3849 }
3850 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3851 bp->cp_ring_mask = bp->cp_bit - 1;
3852}
3853
96a8604f
JDB
3854/* Changing allocation mode of RX rings.
3855 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3856 */
c61fb99c 3857int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
6bb19474 3858{
c61fb99c
MC
3859 if (page_mode) {
3860 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3861 return -EOPNOTSUPP;
7eb9bb3a
MC
3862 bp->dev->max_mtu =
3863 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
c61fb99c
MC
3864 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3865 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
c61fb99c
MC
3866 bp->rx_dir = DMA_BIDIRECTIONAL;
3867 bp->rx_skb_func = bnxt_rx_page_skb;
1054aee8
MC
3868 /* Disable LRO or GRO_HW */
3869 netdev_update_features(bp->dev);
c61fb99c 3870 } else {
7eb9bb3a 3871 bp->dev->max_mtu = bp->max_mtu;
c61fb99c
MC
3872 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3873 bp->rx_dir = DMA_FROM_DEVICE;
3874 bp->rx_skb_func = bnxt_rx_skb;
3875 }
6bb19474
MC
3876 return 0;
3877}
3878
c0c050c5
MC
3879static void bnxt_free_vnic_attributes(struct bnxt *bp)
3880{
3881 int i;
3882 struct bnxt_vnic_info *vnic;
3883 struct pci_dev *pdev = bp->pdev;
3884
3885 if (!bp->vnic_info)
3886 return;
3887
3888 for (i = 0; i < bp->nr_vnics; i++) {
3889 vnic = &bp->vnic_info[i];
3890
3891 kfree(vnic->fw_grp_ids);
3892 vnic->fw_grp_ids = NULL;
3893
3894 kfree(vnic->uc_list);
3895 vnic->uc_list = NULL;
3896
3897 if (vnic->mc_list) {
3898 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3899 vnic->mc_list, vnic->mc_list_mapping);
3900 vnic->mc_list = NULL;
3901 }
3902
3903 if (vnic->rss_table) {
34370d24 3904 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
c0c050c5
MC
3905 vnic->rss_table,
3906 vnic->rss_table_dma_addr);
3907 vnic->rss_table = NULL;
3908 }
3909
3910 vnic->rss_hash_key = NULL;
3911 vnic->flags = 0;
3912 }
3913}
3914
3915static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3916{
3917 int i, rc = 0, size;
3918 struct bnxt_vnic_info *vnic;
3919 struct pci_dev *pdev = bp->pdev;
3920 int max_rings;
3921
3922 for (i = 0; i < bp->nr_vnics; i++) {
3923 vnic = &bp->vnic_info[i];
3924
3925 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3926 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3927
3928 if (mem_size > 0) {
3929 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3930 if (!vnic->uc_list) {
3931 rc = -ENOMEM;
3932 goto out;
3933 }
3934 }
3935 }
3936
3937 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3938 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3939 vnic->mc_list =
3940 dma_alloc_coherent(&pdev->dev,
3941 vnic->mc_list_size,
3942 &vnic->mc_list_mapping,
3943 GFP_KERNEL);
3944 if (!vnic->mc_list) {
3945 rc = -ENOMEM;
3946 goto out;
3947 }
3948 }
3949
44c6f72a
MC
3950 if (bp->flags & BNXT_FLAG_CHIP_P5)
3951 goto vnic_skip_grps;
3952
c0c050c5
MC
3953 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3954 max_rings = bp->rx_nr_rings;
3955 else
3956 max_rings = 1;
3957
3958 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3959 if (!vnic->fw_grp_ids) {
3960 rc = -ENOMEM;
3961 goto out;
3962 }
44c6f72a 3963vnic_skip_grps:
ae10ae74
MC
3964 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3965 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3966 continue;
3967
c0c050c5 3968 /* Allocate rss table and hash key */
34370d24
MC
3969 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3970 if (bp->flags & BNXT_FLAG_CHIP_P5)
3971 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
3972
3973 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
3974 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
3975 vnic->rss_table_size,
c0c050c5
MC
3976 &vnic->rss_table_dma_addr,
3977 GFP_KERNEL);
3978 if (!vnic->rss_table) {
3979 rc = -ENOMEM;
3980 goto out;
3981 }
3982
c0c050c5
MC
3983 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3984 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3985 }
3986 return 0;
3987
3988out:
3989 return rc;
3990}
3991
3992static void bnxt_free_hwrm_resources(struct bnxt *bp)
3993{
68f684e2
EP
3994 struct bnxt_hwrm_wait_token *token;
3995
f9ff5782
EP
3996 dma_pool_destroy(bp->hwrm_dma_pool);
3997 bp->hwrm_dma_pool = NULL;
68f684e2
EP
3998
3999 rcu_read_lock();
4000 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
4001 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
4002 rcu_read_unlock();
c0c050c5
MC
4003}
4004
4005static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
4006{
b34695a8 4007 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
f9ff5782
EP
4008 BNXT_HWRM_DMA_SIZE,
4009 BNXT_HWRM_DMA_ALIGN, 0);
4010 if (!bp->hwrm_dma_pool)
e605db80
DK
4011 return -ENOMEM;
4012
68f684e2
EP
4013 INIT_HLIST_HEAD(&bp->hwrm_pending_list);
4014
e605db80
DK
4015 return 0;
4016}
4017
177a6cde 4018static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
c0c050c5 4019{
a37120b2
MC
4020 kfree(stats->hw_masks);
4021 stats->hw_masks = NULL;
4022 kfree(stats->sw_stats);
4023 stats->sw_stats = NULL;
177a6cde
MC
4024 if (stats->hw_stats) {
4025 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
4026 stats->hw_stats_map);
4027 stats->hw_stats = NULL;
4028 }
4029}
c0c050c5 4030
a37120b2
MC
4031static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
4032 bool alloc_masks)
177a6cde
MC
4033{
4034 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
4035 &stats->hw_stats_map, GFP_KERNEL);
4036 if (!stats->hw_stats)
4037 return -ENOMEM;
00db3cba 4038
a37120b2
MC
4039 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
4040 if (!stats->sw_stats)
4041 goto stats_mem_err;
4042
4043 if (alloc_masks) {
4044 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
4045 if (!stats->hw_masks)
4046 goto stats_mem_err;
4047 }
177a6cde 4048 return 0;
a37120b2
MC
4049
4050stats_mem_err:
4051 bnxt_free_stats_mem(bp, stats);
4052 return -ENOMEM;
177a6cde 4053}
00db3cba 4054
d752d053
MC
4055static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
4056{
4057 int i;
4058
4059 for (i = 0; i < count; i++)
4060 mask_arr[i] = mask;
4061}
4062
4063static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
4064{
4065 int i;
4066
4067 for (i = 0; i < count; i++)
4068 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
4069}
4070
4071static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
4072 struct bnxt_stats_mem *stats)
4073{
bbf33d1d
EP
4074 struct hwrm_func_qstats_ext_output *resp;
4075 struct hwrm_func_qstats_ext_input *req;
d752d053
MC
4076 __le64 *hw_masks;
4077 int rc;
4078
4079 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
4080 !(bp->flags & BNXT_FLAG_CHIP_P5))
4081 return -EOPNOTSUPP;
4082
bbf33d1d 4083 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
d752d053 4084 if (rc)
bbf33d1d 4085 return rc;
d752d053 4086
bbf33d1d
EP
4087 req->fid = cpu_to_le16(0xffff);
4088 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
d752d053 4089
bbf33d1d
EP
4090 resp = hwrm_req_hold(bp, req);
4091 rc = hwrm_req_send(bp, req);
4092 if (!rc) {
4093 hw_masks = &resp->rx_ucast_pkts;
4094 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
4095 }
4096 hwrm_req_drop(bp, req);
d752d053
MC
4097 return rc;
4098}
4099
531d1d26
MC
4100static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
4101static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
4102
d752d053
MC
4103static void bnxt_init_stats(struct bnxt *bp)
4104{
4105 struct bnxt_napi *bnapi = bp->bnapi[0];
4106 struct bnxt_cp_ring_info *cpr;
4107 struct bnxt_stats_mem *stats;
531d1d26
MC
4108 __le64 *rx_stats, *tx_stats;
4109 int rc, rx_count, tx_count;
4110 u64 *rx_masks, *tx_masks;
d752d053 4111 u64 mask;
531d1d26 4112 u8 flags;
d752d053
MC
4113
4114 cpr = &bnapi->cp_ring;
4115 stats = &cpr->stats;
4116 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
4117 if (rc) {
4118 if (bp->flags & BNXT_FLAG_CHIP_P5)
4119 mask = (1ULL << 48) - 1;
4120 else
4121 mask = -1ULL;
4122 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
4123 }
531d1d26
MC
4124 if (bp->flags & BNXT_FLAG_PORT_STATS) {
4125 stats = &bp->port_stats;
4126 rx_stats = stats->hw_stats;
4127 rx_masks = stats->hw_masks;
4128 rx_count = sizeof(struct rx_port_stats) / 8;
4129 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4130 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4131 tx_count = sizeof(struct tx_port_stats) / 8;
4132
4133 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
4134 rc = bnxt_hwrm_port_qstats(bp, flags);
4135 if (rc) {
4136 mask = (1ULL << 40) - 1;
4137
4138 bnxt_fill_masks(rx_masks, mask, rx_count);
4139 bnxt_fill_masks(tx_masks, mask, tx_count);
4140 } else {
4141 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4142 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
4143 bnxt_hwrm_port_qstats(bp, 0);
4144 }
4145 }
4146 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4147 stats = &bp->rx_port_stats_ext;
4148 rx_stats = stats->hw_stats;
4149 rx_masks = stats->hw_masks;
4150 rx_count = sizeof(struct rx_port_stats_ext) / 8;
4151 stats = &bp->tx_port_stats_ext;
4152 tx_stats = stats->hw_stats;
4153 tx_masks = stats->hw_masks;
4154 tx_count = sizeof(struct tx_port_stats_ext) / 8;
4155
c07fa08f 4156 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
531d1d26
MC
4157 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4158 if (rc) {
4159 mask = (1ULL << 40) - 1;
4160
4161 bnxt_fill_masks(rx_masks, mask, rx_count);
4162 if (tx_stats)
4163 bnxt_fill_masks(tx_masks, mask, tx_count);
4164 } else {
4165 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4166 if (tx_stats)
4167 bnxt_copy_hw_masks(tx_masks, tx_stats,
4168 tx_count);
4169 bnxt_hwrm_port_qstats_ext(bp, 0);
4170 }
4171 }
d752d053
MC
4172}
4173
177a6cde
MC
4174static void bnxt_free_port_stats(struct bnxt *bp)
4175{
4176 bp->flags &= ~BNXT_FLAG_PORT_STATS;
4177 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
36e53349 4178
177a6cde
MC
4179 bnxt_free_stats_mem(bp, &bp->port_stats);
4180 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4181 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
fd3ab1c7
MC
4182}
4183
4184static void bnxt_free_ring_stats(struct bnxt *bp)
4185{
177a6cde 4186 int i;
3bdf56c4 4187
c0c050c5
MC
4188 if (!bp->bnapi)
4189 return;
4190
c0c050c5
MC
4191 for (i = 0; i < bp->cp_nr_rings; i++) {
4192 struct bnxt_napi *bnapi = bp->bnapi[i];
4193 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4194
177a6cde 4195 bnxt_free_stats_mem(bp, &cpr->stats);
c0c050c5
MC
4196 }
4197}
4198
4199static int bnxt_alloc_stats(struct bnxt *bp)
4200{
4201 u32 size, i;
177a6cde 4202 int rc;
c0c050c5 4203
4e748506 4204 size = bp->hw_ring_stats_size;
c0c050c5
MC
4205
4206 for (i = 0; i < bp->cp_nr_rings; i++) {
4207 struct bnxt_napi *bnapi = bp->bnapi[i];
4208 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4209
177a6cde 4210 cpr->stats.len = size;
a37120b2 4211 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
177a6cde
MC
4212 if (rc)
4213 return rc;
c0c050c5
MC
4214
4215 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4216 }
3bdf56c4 4217
a220eabc
VV
4218 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4219 return 0;
fd3ab1c7 4220
177a6cde 4221 if (bp->port_stats.hw_stats)
a220eabc 4222 goto alloc_ext_stats;
3bdf56c4 4223
177a6cde 4224 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
a37120b2 4225 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
177a6cde
MC
4226 if (rc)
4227 return rc;
3bdf56c4 4228
a220eabc 4229 bp->flags |= BNXT_FLAG_PORT_STATS;
00db3cba 4230
fd3ab1c7 4231alloc_ext_stats:
a220eabc
VV
4232 /* Display extended statistics only if FW supports it */
4233 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
6154532f 4234 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
00db3cba
VV
4235 return 0;
4236
177a6cde 4237 if (bp->rx_port_stats_ext.hw_stats)
a220eabc 4238 goto alloc_tx_ext_stats;
fd3ab1c7 4239
177a6cde 4240 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
a37120b2 4241 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
177a6cde
MC
4242 /* Extended stats are optional */
4243 if (rc)
a220eabc 4244 return 0;
00db3cba 4245
fd3ab1c7 4246alloc_tx_ext_stats:
177a6cde 4247 if (bp->tx_port_stats_ext.hw_stats)
dfe64de9 4248 return 0;
fd3ab1c7 4249
6154532f
VV
4250 if (bp->hwrm_spec_code >= 0x10902 ||
4251 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
177a6cde 4252 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
a37120b2 4253 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
177a6cde
MC
4254 /* Extended stats are optional */
4255 if (rc)
4256 return 0;
3bdf56c4 4257 }
a220eabc 4258 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
c0c050c5
MC
4259 return 0;
4260}
4261
4262static void bnxt_clear_ring_indices(struct bnxt *bp)
4263{
4264 int i;
4265
4266 if (!bp->bnapi)
4267 return;
4268
4269 for (i = 0; i < bp->cp_nr_rings; i++) {
4270 struct bnxt_napi *bnapi = bp->bnapi[i];
4271 struct bnxt_cp_ring_info *cpr;
4272 struct bnxt_rx_ring_info *rxr;
4273 struct bnxt_tx_ring_info *txr;
4274
4275 if (!bnapi)
4276 continue;
4277
4278 cpr = &bnapi->cp_ring;
4279 cpr->cp_raw_cons = 0;
4280
b6ab4b01 4281 txr = bnapi->tx_ring;
3b2b7d9d
MC
4282 if (txr) {
4283 txr->tx_prod = 0;
4284 txr->tx_cons = 0;
4285 }
c0c050c5 4286
b6ab4b01 4287 rxr = bnapi->rx_ring;
3b2b7d9d
MC
4288 if (rxr) {
4289 rxr->rx_prod = 0;
4290 rxr->rx_agg_prod = 0;
4291 rxr->rx_sw_agg_prod = 0;
376a5b86 4292 rxr->rx_next_cons = 0;
3b2b7d9d 4293 }
c0c050c5
MC
4294 }
4295}
4296
4297static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4298{
4299#ifdef CONFIG_RFS_ACCEL
4300 int i;
4301
4302 /* Under rtnl_lock and all our NAPIs have been disabled. It's
4303 * safe to delete the hash table.
4304 */
4305 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4306 struct hlist_head *head;
4307 struct hlist_node *tmp;
4308 struct bnxt_ntuple_filter *fltr;
4309
4310 head = &bp->ntp_fltr_hash_tbl[i];
4311 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4312 hlist_del(&fltr->hash);
4313 kfree(fltr);
4314 }
4315 }
4316 if (irq_reinit) {
4317 kfree(bp->ntp_fltr_bmap);
4318 bp->ntp_fltr_bmap = NULL;
4319 }
4320 bp->ntp_fltr_count = 0;
4321#endif
4322}
4323
4324static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4325{
4326#ifdef CONFIG_RFS_ACCEL
4327 int i, rc = 0;
4328
4329 if (!(bp->flags & BNXT_FLAG_RFS))
4330 return 0;
4331
4332 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4333 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4334
4335 bp->ntp_fltr_count = 0;
ac45bd93
DC
4336 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
4337 sizeof(long),
c0c050c5
MC
4338 GFP_KERNEL);
4339
4340 if (!bp->ntp_fltr_bmap)
4341 rc = -ENOMEM;
4342
4343 return rc;
4344#else
4345 return 0;
4346#endif
4347}
4348
4349static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4350{
4351 bnxt_free_vnic_attributes(bp);
4352 bnxt_free_tx_rings(bp);
4353 bnxt_free_rx_rings(bp);
4354 bnxt_free_cp_rings(bp);
03c74487 4355 bnxt_free_all_cp_arrays(bp);
c0c050c5
MC
4356 bnxt_free_ntp_fltrs(bp, irq_re_init);
4357 if (irq_re_init) {
fd3ab1c7 4358 bnxt_free_ring_stats(bp);
b0d28207 4359 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
eba93de6 4360 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
fea6b333 4361 bnxt_free_port_stats(bp);
c0c050c5
MC
4362 bnxt_free_ring_grps(bp);
4363 bnxt_free_vnics(bp);
a960dec9
MC
4364 kfree(bp->tx_ring_map);
4365 bp->tx_ring_map = NULL;
b6ab4b01
MC
4366 kfree(bp->tx_ring);
4367 bp->tx_ring = NULL;
4368 kfree(bp->rx_ring);
4369 bp->rx_ring = NULL;
c0c050c5
MC
4370 kfree(bp->bnapi);
4371 bp->bnapi = NULL;
4372 } else {
4373 bnxt_clear_ring_indices(bp);
4374 }
4375}
4376
4377static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4378{
01657bcd 4379 int i, j, rc, size, arr_size;
c0c050c5
MC
4380 void *bnapi;
4381
4382 if (irq_re_init) {
4383 /* Allocate bnapi mem pointer array and mem block for
4384 * all queues
4385 */
4386 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4387 bp->cp_nr_rings);
4388 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4389 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4390 if (!bnapi)
4391 return -ENOMEM;
4392
4393 bp->bnapi = bnapi;
4394 bnapi += arr_size;
4395 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4396 bp->bnapi[i] = bnapi;
4397 bp->bnapi[i]->index = i;
4398 bp->bnapi[i]->bp = bp;
e38287b7
MC
4399 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4400 struct bnxt_cp_ring_info *cpr =
4401 &bp->bnapi[i]->cp_ring;
4402
4403 cpr->cp_ring_struct.ring_mem.flags =
4404 BNXT_RMEM_RING_PTE_FLAG;
4405 }
c0c050c5
MC
4406 }
4407
b6ab4b01
MC
4408 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4409 sizeof(struct bnxt_rx_ring_info),
4410 GFP_KERNEL);
4411 if (!bp->rx_ring)
4412 return -ENOMEM;
4413
4414 for (i = 0; i < bp->rx_nr_rings; i++) {
e38287b7
MC
4415 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4416
4417 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4418 rxr->rx_ring_struct.ring_mem.flags =
4419 BNXT_RMEM_RING_PTE_FLAG;
4420 rxr->rx_agg_ring_struct.ring_mem.flags =
4421 BNXT_RMEM_RING_PTE_FLAG;
4422 }
4423 rxr->bnapi = bp->bnapi[i];
b6ab4b01
MC
4424 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4425 }
4426
4427 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4428 sizeof(struct bnxt_tx_ring_info),
4429 GFP_KERNEL);
4430 if (!bp->tx_ring)
4431 return -ENOMEM;
4432
a960dec9
MC
4433 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4434 GFP_KERNEL);
4435
4436 if (!bp->tx_ring_map)
4437 return -ENOMEM;
4438
01657bcd
MC
4439 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4440 j = 0;
4441 else
4442 j = bp->rx_nr_rings;
4443
4444 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
e38287b7
MC
4445 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4446
4447 if (bp->flags & BNXT_FLAG_CHIP_P5)
4448 txr->tx_ring_struct.ring_mem.flags =
4449 BNXT_RMEM_RING_PTE_FLAG;
4450 txr->bnapi = bp->bnapi[j];
4451 bp->bnapi[j]->tx_ring = txr;
5f449249 4452 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
38413406 4453 if (i >= bp->tx_nr_rings_xdp) {
e38287b7 4454 txr->txq_index = i - bp->tx_nr_rings_xdp;
38413406
MC
4455 bp->bnapi[j]->tx_int = bnxt_tx_int;
4456 } else {
fa3e93e8 4457 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
38413406
MC
4458 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4459 }
b6ab4b01
MC
4460 }
4461
c0c050c5
MC
4462 rc = bnxt_alloc_stats(bp);
4463 if (rc)
4464 goto alloc_mem_err;
d752d053 4465 bnxt_init_stats(bp);
c0c050c5
MC
4466
4467 rc = bnxt_alloc_ntp_fltrs(bp);
4468 if (rc)
4469 goto alloc_mem_err;
4470
4471 rc = bnxt_alloc_vnics(bp);
4472 if (rc)
4473 goto alloc_mem_err;
4474 }
4475
03c74487
MC
4476 rc = bnxt_alloc_all_cp_arrays(bp);
4477 if (rc)
4478 goto alloc_mem_err;
4479
c0c050c5
MC
4480 bnxt_init_ring_struct(bp);
4481
4482 rc = bnxt_alloc_rx_rings(bp);
4483 if (rc)
4484 goto alloc_mem_err;
4485
4486 rc = bnxt_alloc_tx_rings(bp);
4487 if (rc)
4488 goto alloc_mem_err;
4489
4490 rc = bnxt_alloc_cp_rings(bp);
4491 if (rc)
4492 goto alloc_mem_err;
4493
4494 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4495 BNXT_VNIC_UCAST_FLAG;
4496 rc = bnxt_alloc_vnic_attributes(bp);
4497 if (rc)
4498 goto alloc_mem_err;
4499 return 0;
4500
4501alloc_mem_err:
4502 bnxt_free_mem(bp, true);
4503 return rc;
4504}
4505
9d8bc097
MC
4506static void bnxt_disable_int(struct bnxt *bp)
4507{
4508 int i;
4509
4510 if (!bp->bnapi)
4511 return;
4512
4513 for (i = 0; i < bp->cp_nr_rings; i++) {
4514 struct bnxt_napi *bnapi = bp->bnapi[i];
4515 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
daf1f1e7 4516 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
9d8bc097 4517
daf1f1e7 4518 if (ring->fw_ring_id != INVALID_HW_RING_ID)
697197e5 4519 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
9d8bc097
MC
4520 }
4521}
4522
e5811b8c
MC
4523static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4524{
4525 struct bnxt_napi *bnapi = bp->bnapi[n];
4526 struct bnxt_cp_ring_info *cpr;
4527
4528 cpr = &bnapi->cp_ring;
4529 return cpr->cp_ring_struct.map_idx;
4530}
4531
9d8bc097
MC
4532static void bnxt_disable_int_sync(struct bnxt *bp)
4533{
4534 int i;
4535
38290e37
MC
4536 if (!bp->irq_tbl)
4537 return;
4538
9d8bc097
MC
4539 atomic_inc(&bp->intr_sem);
4540
4541 bnxt_disable_int(bp);
e5811b8c
MC
4542 for (i = 0; i < bp->cp_nr_rings; i++) {
4543 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4544
4545 synchronize_irq(bp->irq_tbl[map_idx].vector);
4546 }
9d8bc097
MC
4547}
4548
4549static void bnxt_enable_int(struct bnxt *bp)
4550{
4551 int i;
4552
4553 atomic_set(&bp->intr_sem, 0);
4554 for (i = 0; i < bp->cp_nr_rings; i++) {
4555 struct bnxt_napi *bnapi = bp->bnapi[i];
4556 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4557
697197e5 4558 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
9d8bc097
MC
4559 }
4560}
4561
2e882468
VV
4562int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4563 bool async_only)
c0c050c5 4564{
25be8623
MC
4565 DECLARE_BITMAP(async_events_bmap, 256);
4566 u32 *events = (u32 *)async_events_bmap;
bbf33d1d
EP
4567 struct hwrm_func_drv_rgtr_output *resp;
4568 struct hwrm_func_drv_rgtr_input *req;
acfb50e4 4569 u32 flags;
2e882468 4570 int rc, i;
a1653b13 4571
bbf33d1d
EP
4572 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
4573 if (rc)
4574 return rc;
a1653b13 4575
bbf33d1d
EP
4576 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4577 FUNC_DRV_RGTR_REQ_ENABLES_VER |
4578 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
a1653b13 4579
bbf33d1d 4580 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
8280b38e
VV
4581 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4582 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4583 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
acfb50e4 4584 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
e633a329
VV
4585 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4586 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
bbf33d1d
EP
4587 req->flags = cpu_to_le32(flags);
4588 req->ver_maj_8b = DRV_VER_MAJ;
4589 req->ver_min_8b = DRV_VER_MIN;
4590 req->ver_upd_8b = DRV_VER_UPD;
4591 req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
4592 req->ver_min = cpu_to_le16(DRV_VER_MIN);
4593 req->ver_upd = cpu_to_le16(DRV_VER_UPD);
c0c050c5
MC
4594
4595 if (BNXT_PF(bp)) {
9b0436c3 4596 u32 data[8];
a1653b13 4597 int i;
c0c050c5 4598
9b0436c3
MC
4599 memset(data, 0, sizeof(data));
4600 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4601 u16 cmd = bnxt_vf_req_snif[i];
4602 unsigned int bit, idx;
4603
4604 idx = cmd / 32;
4605 bit = cmd % 32;
4606 data[idx] |= 1 << bit;
4607 }
c0c050c5 4608
de68f5de 4609 for (i = 0; i < 8; i++)
bbf33d1d 4610 req->vf_req_fwd[i] = cpu_to_le32(data[i]);
de68f5de 4611
bbf33d1d 4612 req->enables |=
c0c050c5
MC
4613 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4614 }
4615
abd43a13 4616 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
bbf33d1d 4617 req->flags |= cpu_to_le32(
abd43a13
VD
4618 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4619
2e882468
VV
4620 memset(async_events_bmap, 0, sizeof(async_events_bmap));
4621 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4622 u16 event_id = bnxt_async_events_arr[i];
4623
4624 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4625 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4626 continue;
4627 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4628 }
4629 if (bmap && bmap_size) {
4630 for (i = 0; i < bmap_size; i++) {
4631 if (test_bit(i, bmap))
4632 __set_bit(i, async_events_bmap);
4633 }
4634 }
4635 for (i = 0; i < 8; i++)
bbf33d1d 4636 req->async_event_fwd[i] |= cpu_to_le32(events[i]);
2e882468
VV
4637
4638 if (async_only)
bbf33d1d 4639 req->enables =
2e882468
VV
4640 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4641
bbf33d1d
EP
4642 resp = hwrm_req_hold(bp, req);
4643 rc = hwrm_req_send(bp, req);
bdb38602
VV
4644 if (!rc) {
4645 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4646 if (resp->flags &
4647 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4648 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4649 }
bbf33d1d 4650 hwrm_req_drop(bp, req);
25e1acd6 4651 return rc;
c0c050c5
MC
4652}
4653
be58a0da
JH
4654static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4655{
bbf33d1d
EP
4656 struct hwrm_func_drv_unrgtr_input *req;
4657 int rc;
be58a0da 4658
bdb38602
VV
4659 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4660 return 0;
4661
bbf33d1d
EP
4662 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
4663 if (rc)
4664 return rc;
4665 return hwrm_req_send(bp, req);
be58a0da
JH
4666}
4667
c0c050c5
MC
4668static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4669{
bbf33d1d
EP
4670 struct hwrm_tunnel_dst_port_free_input *req;
4671 int rc;
c0c050c5 4672
7ae9dc35
MC
4673 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
4674 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
4675 return 0;
4676 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
4677 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
4678 return 0;
4679
bbf33d1d
EP
4680 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
4681 if (rc)
4682 return rc;
4683
4684 req->tunnel_type = tunnel_type;
c0c050c5
MC
4685
4686 switch (tunnel_type) {
4687 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
bbf33d1d 4688 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
7ae9dc35 4689 bp->vxlan_port = 0;
442a35a5 4690 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
c0c050c5
MC
4691 break;
4692 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
bbf33d1d 4693 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
7ae9dc35 4694 bp->nge_port = 0;
442a35a5 4695 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
c0c050c5
MC
4696 break;
4697 default:
4698 break;
4699 }
4700
bbf33d1d 4701 rc = hwrm_req_send(bp, req);
c0c050c5
MC
4702 if (rc)
4703 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4704 rc);
4705 return rc;
4706}
4707
4708static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4709 u8 tunnel_type)
4710{
bbf33d1d
EP
4711 struct hwrm_tunnel_dst_port_alloc_output *resp;
4712 struct hwrm_tunnel_dst_port_alloc_input *req;
4713 int rc;
c0c050c5 4714
bbf33d1d
EP
4715 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
4716 if (rc)
4717 return rc;
c0c050c5 4718
bbf33d1d
EP
4719 req->tunnel_type = tunnel_type;
4720 req->tunnel_dst_port_val = port;
c0c050c5 4721
bbf33d1d
EP
4722 resp = hwrm_req_hold(bp, req);
4723 rc = hwrm_req_send(bp, req);
c0c050c5
MC
4724 if (rc) {
4725 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4726 rc);
4727 goto err_out;
4728 }
4729
57aac71b
CJ
4730 switch (tunnel_type) {
4731 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
7ae9dc35 4732 bp->vxlan_port = port;
442a35a5
JK
4733 bp->vxlan_fw_dst_port_id =
4734 le16_to_cpu(resp->tunnel_dst_port_id);
57aac71b
CJ
4735 break;
4736 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
7ae9dc35 4737 bp->nge_port = port;
442a35a5 4738 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
57aac71b
CJ
4739 break;
4740 default:
4741 break;
4742 }
4743
c0c050c5 4744err_out:
bbf33d1d 4745 hwrm_req_drop(bp, req);
c0c050c5
MC
4746 return rc;
4747}
4748
4749static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4750{
bbf33d1d 4751 struct hwrm_cfa_l2_set_rx_mask_input *req;
c0c050c5 4752 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
bbf33d1d 4753 int rc;
c0c050c5 4754
bbf33d1d
EP
4755 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
4756 if (rc)
4757 return rc;
c0c050c5 4758
bbf33d1d
EP
4759 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4760 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4761 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4762 req->mask = cpu_to_le32(vnic->rx_mask);
4763 return hwrm_req_send_silent(bp, req);
c0c050c5
MC
4764}
4765
4766#ifdef CONFIG_RFS_ACCEL
4767static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4768 struct bnxt_ntuple_filter *fltr)
4769{
bbf33d1d
EP
4770 struct hwrm_cfa_ntuple_filter_free_input *req;
4771 int rc;
c0c050c5 4772
bbf33d1d
EP
4773 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
4774 if (rc)
4775 return rc;
4776
4777 req->ntuple_filter_id = fltr->filter_id;
4778 return hwrm_req_send(bp, req);
c0c050c5
MC
4779}
4780
4781#define BNXT_NTP_FLTR_FLAGS \
4782 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4783 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
4784 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
4785 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
4786 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
4787 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
4788 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
4789 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
4790 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
4791 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
4792 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
4793 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
4794 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
c193554e 4795 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
c0c050c5 4796
61aad724
MC
4797#define BNXT_NTP_TUNNEL_FLTR_FLAG \
4798 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4799
c0c050c5
MC
4800static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4801 struct bnxt_ntuple_filter *fltr)
4802{
5c209fc8 4803 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
bbf33d1d 4804 struct hwrm_cfa_ntuple_filter_alloc_input *req;
c0c050c5 4805 struct flow_keys *keys = &fltr->fkeys;
ac33906c 4806 struct bnxt_vnic_info *vnic;
41136ab3 4807 u32 flags = 0;
bbf33d1d 4808 int rc;
c0c050c5 4809
bbf33d1d
EP
4810 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
4811 if (rc)
4812 return rc;
4813
4814 req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
c0c050c5 4815
41136ab3
MC
4816 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4817 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
bbf33d1d 4818 req->dst_id = cpu_to_le16(fltr->rxq);
ac33906c
MC
4819 } else {
4820 vnic = &bp->vnic_info[fltr->rxq + 1];
bbf33d1d 4821 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
ac33906c 4822 }
bbf33d1d
EP
4823 req->flags = cpu_to_le32(flags);
4824 req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
c0c050c5 4825
bbf33d1d
EP
4826 req->ethertype = htons(ETH_P_IP);
4827 memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4828 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4829 req->ip_protocol = keys->basic.ip_proto;
c0c050c5 4830
dda0e746
MC
4831 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4832 int i;
4833
bbf33d1d
EP
4834 req->ethertype = htons(ETH_P_IPV6);
4835 req->ip_addr_type =
dda0e746 4836 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
bbf33d1d 4837 *(struct in6_addr *)&req->src_ipaddr[0] =
dda0e746 4838 keys->addrs.v6addrs.src;
bbf33d1d 4839 *(struct in6_addr *)&req->dst_ipaddr[0] =
dda0e746
MC
4840 keys->addrs.v6addrs.dst;
4841 for (i = 0; i < 4; i++) {
bbf33d1d
EP
4842 req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4843 req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
dda0e746
MC
4844 }
4845 } else {
bbf33d1d
EP
4846 req->src_ipaddr[0] = keys->addrs.v4addrs.src;
4847 req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4848 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4849 req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
dda0e746 4850 }
61aad724 4851 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
bbf33d1d
EP
4852 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4853 req->tunnel_type =
61aad724
MC
4854 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4855 }
c0c050c5 4856
bbf33d1d
EP
4857 req->src_port = keys->ports.src;
4858 req->src_port_mask = cpu_to_be16(0xffff);
4859 req->dst_port = keys->ports.dst;
4860 req->dst_port_mask = cpu_to_be16(0xffff);
c0c050c5 4861
bbf33d1d
EP
4862 resp = hwrm_req_hold(bp, req);
4863 rc = hwrm_req_send(bp, req);
4864 if (!rc)
c0c050c5 4865 fltr->filter_id = resp->ntuple_filter_id;
bbf33d1d 4866 hwrm_req_drop(bp, req);
c0c050c5
MC
4867 return rc;
4868}
4869#endif
4870
4871static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4872 u8 *mac_addr)
4873{
bbf33d1d
EP
4874 struct hwrm_cfa_l2_filter_alloc_output *resp;
4875 struct hwrm_cfa_l2_filter_alloc_input *req;
4876 int rc;
c0c050c5 4877
bbf33d1d
EP
4878 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
4879 if (rc)
4880 return rc;
4881
4882 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
dc52c6c7 4883 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
bbf33d1d 4884 req->flags |=
dc52c6c7 4885 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
bbf33d1d
EP
4886 req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4887 req->enables =
c0c050c5 4888 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
c193554e 4889 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
c0c050c5 4890 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
bbf33d1d
EP
4891 memcpy(req->l2_addr, mac_addr, ETH_ALEN);
4892 req->l2_addr_mask[0] = 0xff;
4893 req->l2_addr_mask[1] = 0xff;
4894 req->l2_addr_mask[2] = 0xff;
4895 req->l2_addr_mask[3] = 0xff;
4896 req->l2_addr_mask[4] = 0xff;
4897 req->l2_addr_mask[5] = 0xff;
4898
4899 resp = hwrm_req_hold(bp, req);
4900 rc = hwrm_req_send(bp, req);
c0c050c5
MC
4901 if (!rc)
4902 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4903 resp->l2_filter_id;
bbf33d1d 4904 hwrm_req_drop(bp, req);
c0c050c5
MC
4905 return rc;
4906}
4907
4908static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4909{
bbf33d1d 4910 struct hwrm_cfa_l2_filter_free_input *req;
c0c050c5 4911 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
bbf33d1d 4912 int rc;
c0c050c5
MC
4913
4914 /* Any associated ntuple filters will also be cleared by firmware. */
bbf33d1d
EP
4915 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
4916 if (rc)
4917 return rc;
4918 hwrm_req_hold(bp, req);
c0c050c5
MC
4919 for (i = 0; i < num_of_vnics; i++) {
4920 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4921
4922 for (j = 0; j < vnic->uc_filter_count; j++) {
bbf33d1d 4923 req->l2_filter_id = vnic->fw_l2_filter_id[j];
c0c050c5 4924
bbf33d1d 4925 rc = hwrm_req_send(bp, req);
c0c050c5
MC
4926 }
4927 vnic->uc_filter_count = 0;
4928 }
bbf33d1d 4929 hwrm_req_drop(bp, req);
c0c050c5
MC
4930 return rc;
4931}
4932
4933static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4934{
4935 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
79632e9b 4936 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
bbf33d1d
EP
4937 struct hwrm_vnic_tpa_cfg_input *req;
4938 int rc;
c0c050c5 4939
3c4fe80b
MC
4940 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4941 return 0;
4942
bbf33d1d
EP
4943 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
4944 if (rc)
4945 return rc;
c0c050c5
MC
4946
4947 if (tpa_flags) {
4948 u16 mss = bp->dev->mtu - 40;
4949 u32 nsegs, n, segs = 0, flags;
4950
4951 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4952 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4953 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4954 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4955 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4956 if (tpa_flags & BNXT_FLAG_GRO)
4957 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4958
bbf33d1d 4959 req->flags = cpu_to_le32(flags);
c0c050c5 4960
bbf33d1d 4961 req->enables =
c0c050c5 4962 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
c193554e
MC
4963 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4964 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
c0c050c5
MC
4965
4966 /* Number of segs are log2 units, and first packet is not
4967 * included as part of this units.
4968 */
2839f28b
MC
4969 if (mss <= BNXT_RX_PAGE_SIZE) {
4970 n = BNXT_RX_PAGE_SIZE / mss;
c0c050c5
MC
4971 nsegs = (MAX_SKB_FRAGS - 1) * n;
4972 } else {
2839f28b
MC
4973 n = mss / BNXT_RX_PAGE_SIZE;
4974 if (mss & (BNXT_RX_PAGE_SIZE - 1))
c0c050c5
MC
4975 n++;
4976 nsegs = (MAX_SKB_FRAGS - n) / n;
4977 }
4978
79632e9b
MC
4979 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4980 segs = MAX_TPA_SEGS_P5;
4981 max_aggs = bp->max_tpa;
4982 } else {
4983 segs = ilog2(nsegs);
4984 }
bbf33d1d
EP
4985 req->max_agg_segs = cpu_to_le16(segs);
4986 req->max_aggs = cpu_to_le16(max_aggs);
c193554e 4987
bbf33d1d 4988 req->min_agg_len = cpu_to_le32(512);
c0c050c5 4989 }
bbf33d1d 4990 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
c0c050c5 4991
bbf33d1d 4992 return hwrm_req_send(bp, req);
c0c050c5
MC
4993}
4994
2c61d211
MC
4995static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4996{
4997 struct bnxt_ring_grp_info *grp_info;
4998
4999 grp_info = &bp->grp_info[ring->grp_idx];
5000 return grp_info->cp_fw_ring_id;
5001}
5002
5003static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
5004{
5005 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5006 struct bnxt_napi *bnapi = rxr->bnapi;
5007 struct bnxt_cp_ring_info *cpr;
5008
5009 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
5010 return cpr->cp_ring_struct.fw_ring_id;
5011 } else {
5012 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
5013 }
5014}
5015
5016static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
5017{
5018 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5019 struct bnxt_napi *bnapi = txr->bnapi;
5020 struct bnxt_cp_ring_info *cpr;
5021
5022 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
5023 return cpr->cp_ring_struct.fw_ring_id;
5024 } else {
5025 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5026 }
5027}
5028
1667cbf6
MC
5029static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
5030{
5031 int entries;
5032
5033 if (bp->flags & BNXT_FLAG_CHIP_P5)
5034 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
5035 else
5036 entries = HW_HASH_INDEX_SIZE;
5037
5038 bp->rss_indir_tbl_entries = entries;
5039 bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
5040 GFP_KERNEL);
5041 if (!bp->rss_indir_tbl)
5042 return -ENOMEM;
5043 return 0;
5044}
5045
5046static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5047{
5048 u16 max_rings, max_entries, pad, i;
5049
5050 if (!bp->rx_nr_rings)
5051 return;
5052
5053 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5054 max_rings = bp->rx_nr_rings - 1;
5055 else
5056 max_rings = bp->rx_nr_rings;
5057
5058 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5059
5060 for (i = 0; i < max_entries; i++)
5061 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5062
5063 pad = bp->rss_indir_tbl_entries - max_entries;
5064 if (pad)
5065 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5066}
5067
bd3191b5
MC
5068static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5069{
5070 u16 i, tbl_size, max_ring = 0;
5071
5072 if (!bp->rss_indir_tbl)
5073 return 0;
5074
5075 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5076 for (i = 0; i < tbl_size; i++)
5077 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5078 return max_ring;
5079}
5080
f9f6a3fb
MC
5081int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5082{
5083 if (bp->flags & BNXT_FLAG_CHIP_P5)
5084 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5085 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5086 return 2;
5087 return 1;
5088}
5089
f33a305d
MC
5090static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5091{
5092 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5093 u16 i, j;
5094
5095 /* Fill the RSS indirection table with ring group ids */
5096 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5097 if (!no_rss)
5098 j = bp->rss_indir_tbl[i];
5099 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5100 }
5101}
5102
5103static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5104 struct bnxt_vnic_info *vnic)
5105{
5106 __le16 *ring_tbl = vnic->rss_table;
5107 struct bnxt_rx_ring_info *rxr;
5108 u16 tbl_size, i;
5109
5110 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5111
5112 for (i = 0; i < tbl_size; i++) {
5113 u16 ring_id, j;
5114
5115 j = bp->rss_indir_tbl[i];
5116 rxr = &bp->rx_ring[j];
5117
5118 ring_id = rxr->rx_ring_struct.fw_ring_id;
5119 *ring_tbl++ = cpu_to_le16(ring_id);
5120 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5121 *ring_tbl++ = cpu_to_le16(ring_id);
5122 }
5123}
5124
5125static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5126{
5127 if (bp->flags & BNXT_FLAG_CHIP_P5)
5128 __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5129 else
5130 __bnxt_fill_hw_rss_tbl(bp, vnic);
5131}
5132
c0c050c5
MC
5133static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5134{
c0c050c5 5135 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
bbf33d1d
EP
5136 struct hwrm_vnic_rss_cfg_input *req;
5137 int rc;
c0c050c5 5138
7b3af4f7
MC
5139 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5140 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
c0c050c5
MC
5141 return 0;
5142
bbf33d1d
EP
5143 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5144 if (rc)
5145 return rc;
5146
c0c050c5 5147 if (set_rss) {
f33a305d 5148 bnxt_fill_hw_rss_tbl(bp, vnic);
bbf33d1d
EP
5149 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5150 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5151 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5152 req->hash_key_tbl_addr =
c0c050c5
MC
5153 cpu_to_le64(vnic->rss_hash_key_dma_addr);
5154 }
bbf33d1d
EP
5155 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5156 return hwrm_req_send(bp, req);
c0c050c5
MC
5157}
5158
7b3af4f7
MC
5159static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5160{
5161 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
bbf33d1d 5162 struct hwrm_vnic_rss_cfg_input *req;
f33a305d
MC
5163 dma_addr_t ring_tbl_map;
5164 u32 i, nr_ctxs;
bbf33d1d
EP
5165 int rc;
5166
5167 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5168 if (rc)
5169 return rc;
5170
5171 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5172 if (!set_rss)
5173 return hwrm_req_send(bp, req);
7b3af4f7 5174
f33a305d 5175 bnxt_fill_hw_rss_tbl(bp, vnic);
bbf33d1d
EP
5176 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5177 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5178 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
f33a305d 5179 ring_tbl_map = vnic->rss_table_dma_addr;
f9f6a3fb 5180 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
7b3af4f7 5181
bbf33d1d
EP
5182 hwrm_req_hold(bp, req);
5183 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
5184 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
5185 req->ring_table_pair_index = i;
5186 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5187 rc = hwrm_req_send(bp, req);
7b3af4f7 5188 if (rc)
bbf33d1d 5189 goto exit;
7b3af4f7 5190 }
bbf33d1d
EP
5191
5192exit:
5193 hwrm_req_drop(bp, req);
5194 return rc;
7b3af4f7
MC
5195}
5196
c0c050c5
MC
5197static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5198{
5199 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
bbf33d1d
EP
5200 struct hwrm_vnic_plcmodes_cfg_input *req;
5201 int rc;
5202
5203 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
5204 if (rc)
5205 return rc;
c0c050c5 5206
bbf33d1d
EP
5207 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
5208 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5209 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5210 req->enables =
c0c050c5
MC
5211 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
5212 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
5213 /* thresholds not implemented in firmware yet */
bbf33d1d
EP
5214 req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5215 req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5216 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5217 return hwrm_req_send(bp, req);
c0c050c5
MC
5218}
5219
94ce9caa
PS
5220static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5221 u16 ctx_idx)
c0c050c5 5222{
bbf33d1d 5223 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
c0c050c5 5224
bbf33d1d
EP
5225 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
5226 return;
5227
5228 req->rss_cos_lb_ctx_id =
94ce9caa 5229 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
c0c050c5 5230
bbf33d1d 5231 hwrm_req_send(bp, req);
94ce9caa 5232 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
c0c050c5
MC
5233}
5234
5235static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5236{
94ce9caa 5237 int i, j;
c0c050c5
MC
5238
5239 for (i = 0; i < bp->nr_vnics; i++) {
5240 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5241
94ce9caa
PS
5242 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5243 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5244 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5245 }
c0c050c5
MC
5246 }
5247 bp->rsscos_nr_ctxs = 0;
5248}
5249
94ce9caa 5250static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
c0c050c5 5251{
bbf33d1d
EP
5252 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
5253 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
c0c050c5 5254 int rc;
c0c050c5 5255
bbf33d1d
EP
5256 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
5257 if (rc)
5258 return rc;
c0c050c5 5259
bbf33d1d
EP
5260 resp = hwrm_req_hold(bp, req);
5261 rc = hwrm_req_send(bp, req);
c0c050c5 5262 if (!rc)
94ce9caa 5263 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
c0c050c5 5264 le16_to_cpu(resp->rss_cos_lb_ctx_id);
bbf33d1d 5265 hwrm_req_drop(bp, req);
c0c050c5
MC
5266
5267 return rc;
5268}
5269
abe93ad2
MC
5270static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5271{
5272 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5273 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5274 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5275}
5276
a588e458 5277int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
c0c050c5 5278{
c0c050c5 5279 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
bbf33d1d
EP
5280 struct hwrm_vnic_cfg_input *req;
5281 unsigned int ring = 0, grp_idx;
cf6645f8 5282 u16 def_vlan = 0;
bbf33d1d 5283 int rc;
c0c050c5 5284
bbf33d1d
EP
5285 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
5286 if (rc)
5287 return rc;
dc52c6c7 5288
7b3af4f7
MC
5289 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5290 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5291
bbf33d1d 5292 req->default_rx_ring_id =
7b3af4f7 5293 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
bbf33d1d 5294 req->default_cmpl_ring_id =
7b3af4f7 5295 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
bbf33d1d 5296 req->enables =
7b3af4f7
MC
5297 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5298 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5299 goto vnic_mru;
5300 }
bbf33d1d 5301 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
c0c050c5 5302 /* Only RSS support for now TBD: COS & LB */
dc52c6c7 5303 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
bbf33d1d
EP
5304 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5305 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
dc52c6c7 5306 VNIC_CFG_REQ_ENABLES_MRU);
ae10ae74 5307 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
bbf33d1d 5308 req->rss_rule =
ae10ae74 5309 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
bbf33d1d 5310 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
ae10ae74 5311 VNIC_CFG_REQ_ENABLES_MRU);
bbf33d1d 5312 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
dc52c6c7 5313 } else {
bbf33d1d 5314 req->rss_rule = cpu_to_le16(0xffff);
dc52c6c7 5315 }
94ce9caa 5316
dc52c6c7
PS
5317 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5318 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
bbf33d1d
EP
5319 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5320 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
94ce9caa 5321 } else {
bbf33d1d 5322 req->cos_rule = cpu_to_le16(0xffff);
94ce9caa
PS
5323 }
5324
c0c050c5 5325 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
b81a90d3 5326 ring = 0;
c0c050c5 5327 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
b81a90d3 5328 ring = vnic_id - 1;
76595193
PS
5329 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5330 ring = bp->rx_nr_rings - 1;
c0c050c5 5331
b81a90d3 5332 grp_idx = bp->rx_ring[ring].bnapi->index;
bbf33d1d
EP
5333 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5334 req->lb_rule = cpu_to_le16(0xffff);
7b3af4f7 5335vnic_mru:
bbf33d1d 5336 req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
c0c050c5 5337
bbf33d1d 5338 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
cf6645f8
MC
5339#ifdef CONFIG_BNXT_SRIOV
5340 if (BNXT_VF(bp))
5341 def_vlan = bp->vf.vlan;
5342#endif
5343 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
bbf33d1d 5344 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
a588e458 5345 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
bbf33d1d 5346 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
c0c050c5 5347
bbf33d1d 5348 return hwrm_req_send(bp, req);
c0c050c5
MC
5349}
5350
3d061591 5351static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
c0c050c5 5352{
c0c050c5 5353 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
bbf33d1d 5354 struct hwrm_vnic_free_input *req;
c0c050c5 5355
bbf33d1d
EP
5356 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
5357 return;
5358
5359 req->vnic_id =
c0c050c5
MC
5360 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5361
bbf33d1d 5362 hwrm_req_send(bp, req);
c0c050c5
MC
5363 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5364 }
c0c050c5
MC
5365}
5366
5367static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5368{
5369 u16 i;
5370
5371 for (i = 0; i < bp->nr_vnics; i++)
5372 bnxt_hwrm_vnic_free_one(bp, i);
5373}
5374
b81a90d3
MC
5375static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5376 unsigned int start_rx_ring_idx,
5377 unsigned int nr_rings)
c0c050c5 5378{
b81a90d3 5379 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
44c6f72a 5380 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
bbf33d1d
EP
5381 struct hwrm_vnic_alloc_output *resp;
5382 struct hwrm_vnic_alloc_input *req;
5383 int rc;
5384
5385 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
5386 if (rc)
5387 return rc;
44c6f72a
MC
5388
5389 if (bp->flags & BNXT_FLAG_CHIP_P5)
5390 goto vnic_no_ring_grps;
c0c050c5
MC
5391
5392 /* map ring groups to this vnic */
b81a90d3
MC
5393 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5394 grp_idx = bp->rx_ring[i].bnapi->index;
5395 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
c0c050c5 5396 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
b81a90d3 5397 j, nr_rings);
c0c050c5
MC
5398 break;
5399 }
44c6f72a 5400 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
c0c050c5
MC
5401 }
5402
44c6f72a
MC
5403vnic_no_ring_grps:
5404 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5405 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
c0c050c5 5406 if (vnic_id == 0)
bbf33d1d 5407 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
c0c050c5 5408
bbf33d1d
EP
5409 resp = hwrm_req_hold(bp, req);
5410 rc = hwrm_req_send(bp, req);
c0c050c5 5411 if (!rc)
44c6f72a 5412 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
bbf33d1d 5413 hwrm_req_drop(bp, req);
c0c050c5
MC
5414 return rc;
5415}
5416
8fdefd63
MC
5417static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5418{
bbf33d1d
EP
5419 struct hwrm_vnic_qcaps_output *resp;
5420 struct hwrm_vnic_qcaps_input *req;
8fdefd63
MC
5421 int rc;
5422
fbbdbc64 5423 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
ba642ab7 5424 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
8fdefd63
MC
5425 if (bp->hwrm_spec_code < 0x10600)
5426 return 0;
5427
bbf33d1d
EP
5428 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
5429 if (rc)
5430 return rc;
5431
5432 resp = hwrm_req_hold(bp, req);
5433 rc = hwrm_req_send(bp, req);
8fdefd63 5434 if (!rc) {
abe93ad2
MC
5435 u32 flags = le32_to_cpu(resp->flags);
5436
41e8d798
MC
5437 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5438 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
8fdefd63 5439 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
abe93ad2
MC
5440 if (flags &
5441 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5442 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
1da63ddd
EP
5443
5444 /* Older P5 fw before EXT_HW_STATS support did not set
5445 * VLAN_STRIP_CAP properly.
5446 */
5447 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
9d6b648c 5448 (BNXT_CHIP_P5_THOR(bp) &&
1da63ddd
EP
5449 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5450 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
79632e9b 5451 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
9d6b648c
MC
5452 if (bp->max_tpa_v2) {
5453 if (BNXT_CHIP_P5_THOR(bp))
5454 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5455 else
5456 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5457 }
8fdefd63 5458 }
bbf33d1d 5459 hwrm_req_drop(bp, req);
8fdefd63
MC
5460 return rc;
5461}
5462
c0c050c5
MC
5463static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5464{
bbf33d1d
EP
5465 struct hwrm_ring_grp_alloc_output *resp;
5466 struct hwrm_ring_grp_alloc_input *req;
5467 int rc;
c0c050c5 5468 u16 i;
c0c050c5 5469
44c6f72a
MC
5470 if (bp->flags & BNXT_FLAG_CHIP_P5)
5471 return 0;
5472
bbf33d1d
EP
5473 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
5474 if (rc)
5475 return rc;
5476
5477 resp = hwrm_req_hold(bp, req);
c0c050c5 5478 for (i = 0; i < bp->rx_nr_rings; i++) {
b81a90d3 5479 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
c0c050c5 5480
bbf33d1d
EP
5481 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5482 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5483 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5484 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
c0c050c5 5485
bbf33d1d 5486 rc = hwrm_req_send(bp, req);
c0c050c5 5487
c0c050c5
MC
5488 if (rc)
5489 break;
5490
b81a90d3
MC
5491 bp->grp_info[grp_idx].fw_grp_id =
5492 le32_to_cpu(resp->ring_group_id);
c0c050c5 5493 }
bbf33d1d 5494 hwrm_req_drop(bp, req);
c0c050c5
MC
5495 return rc;
5496}
5497
3d061591 5498static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
c0c050c5 5499{
bbf33d1d 5500 struct hwrm_ring_grp_free_input *req;
c0c050c5 5501 u16 i;
c0c050c5 5502
44c6f72a 5503 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
3d061591 5504 return;
c0c050c5 5505
bbf33d1d
EP
5506 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
5507 return;
c0c050c5 5508
bbf33d1d 5509 hwrm_req_hold(bp, req);
c0c050c5
MC
5510 for (i = 0; i < bp->cp_nr_rings; i++) {
5511 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5512 continue;
bbf33d1d 5513 req->ring_group_id =
c0c050c5
MC
5514 cpu_to_le32(bp->grp_info[i].fw_grp_id);
5515
bbf33d1d 5516 hwrm_req_send(bp, req);
c0c050c5
MC
5517 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5518 }
bbf33d1d 5519 hwrm_req_drop(bp, req);
c0c050c5
MC
5520}
5521
5522static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5523 struct bnxt_ring_struct *ring,
9899bb59 5524 u32 ring_type, u32 map_index)
c0c050c5 5525{
bbf33d1d
EP
5526 struct hwrm_ring_alloc_output *resp;
5527 struct hwrm_ring_alloc_input *req;
6fe19886 5528 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
9899bb59 5529 struct bnxt_ring_grp_info *grp_info;
bbf33d1d 5530 int rc, err = 0;
c0c050c5
MC
5531 u16 ring_id;
5532
bbf33d1d
EP
5533 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
5534 if (rc)
5535 goto exit;
c0c050c5 5536
bbf33d1d 5537 req->enables = 0;
6fe19886 5538 if (rmem->nr_pages > 1) {
bbf33d1d 5539 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
c0c050c5 5540 /* Page size is in log2 units */
bbf33d1d
EP
5541 req->page_size = BNXT_PAGE_SHIFT;
5542 req->page_tbl_depth = 1;
c0c050c5 5543 } else {
bbf33d1d 5544 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
c0c050c5 5545 }
bbf33d1d 5546 req->fbo = 0;
c0c050c5 5547 /* Association of ring index with doorbell index and MSIX number */
bbf33d1d 5548 req->logical_id = cpu_to_le16(map_index);
c0c050c5
MC
5549
5550 switch (ring_type) {
2c61d211
MC
5551 case HWRM_RING_ALLOC_TX: {
5552 struct bnxt_tx_ring_info *txr;
5553
5554 txr = container_of(ring, struct bnxt_tx_ring_info,
5555 tx_ring_struct);
bbf33d1d 5556 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
c0c050c5 5557 /* Association of transmit ring with completion ring */
9899bb59 5558 grp_info = &bp->grp_info[ring->grp_idx];
bbf33d1d
EP
5559 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5560 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
5561 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5562 req->queue_id = cpu_to_le16(ring->queue_id);
c0c050c5 5563 break;
2c61d211 5564 }
c0c050c5 5565 case HWRM_RING_ALLOC_RX:
bbf33d1d
EP
5566 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5567 req->length = cpu_to_le32(bp->rx_ring_mask + 1);
23aefdd7
MC
5568 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5569 u16 flags = 0;
5570
5571 /* Association of rx ring with stats context */
5572 grp_info = &bp->grp_info[ring->grp_idx];
bbf33d1d
EP
5573 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5574 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5575 req->enables |= cpu_to_le32(
23aefdd7
MC
5576 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5577 if (NET_IP_ALIGN == 2)
5578 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
bbf33d1d 5579 req->flags = cpu_to_le16(flags);
23aefdd7 5580 }
c0c050c5
MC
5581 break;
5582 case HWRM_RING_ALLOC_AGG:
23aefdd7 5583 if (bp->flags & BNXT_FLAG_CHIP_P5) {
bbf33d1d 5584 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
23aefdd7
MC
5585 /* Association of agg ring with rx ring */
5586 grp_info = &bp->grp_info[ring->grp_idx];
bbf33d1d
EP
5587 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5588 req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5589 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5590 req->enables |= cpu_to_le32(
23aefdd7
MC
5591 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5592 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5593 } else {
bbf33d1d 5594 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
23aefdd7 5595 }
bbf33d1d 5596 req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
c0c050c5
MC
5597 break;
5598 case HWRM_RING_ALLOC_CMPL:
bbf33d1d
EP
5599 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5600 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
23aefdd7
MC
5601 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5602 /* Association of cp ring with nq */
5603 grp_info = &bp->grp_info[map_index];
bbf33d1d
EP
5604 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5605 req->cq_handle = cpu_to_le64(ring->handle);
5606 req->enables |= cpu_to_le32(
23aefdd7
MC
5607 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5608 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
bbf33d1d 5609 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
23aefdd7
MC
5610 }
5611 break;
5612 case HWRM_RING_ALLOC_NQ:
bbf33d1d
EP
5613 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5614 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
c0c050c5 5615 if (bp->flags & BNXT_FLAG_USING_MSIX)
bbf33d1d 5616 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
c0c050c5
MC
5617 break;
5618 default:
5619 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5620 ring_type);
5621 return -1;
5622 }
5623
bbf33d1d
EP
5624 resp = hwrm_req_hold(bp, req);
5625 rc = hwrm_req_send(bp, req);
c0c050c5
MC
5626 err = le16_to_cpu(resp->error_code);
5627 ring_id = le16_to_cpu(resp->ring_id);
bbf33d1d 5628 hwrm_req_drop(bp, req);
c0c050c5 5629
bbf33d1d 5630exit:
c0c050c5 5631 if (rc || err) {
2727c888
MC
5632 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5633 ring_type, rc, err);
5634 return -EIO;
c0c050c5
MC
5635 }
5636 ring->fw_ring_id = ring_id;
5637 return rc;
5638}
5639
486b5c22
MC
5640static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5641{
5642 int rc;
5643
5644 if (BNXT_PF(bp)) {
bbf33d1d 5645 struct hwrm_func_cfg_input *req;
486b5c22 5646
bbf33d1d
EP
5647 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
5648 if (rc)
5649 return rc;
5650
5651 req->fid = cpu_to_le16(0xffff);
5652 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5653 req->async_event_cr = cpu_to_le16(idx);
5654 return hwrm_req_send(bp, req);
486b5c22 5655 } else {
bbf33d1d
EP
5656 struct hwrm_func_vf_cfg_input *req;
5657
5658 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
5659 if (rc)
5660 return rc;
486b5c22 5661
bbf33d1d 5662 req->enables =
486b5c22 5663 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
bbf33d1d
EP
5664 req->async_event_cr = cpu_to_le16(idx);
5665 return hwrm_req_send(bp, req);
486b5c22 5666 }
486b5c22
MC
5667}
5668
697197e5
MC
5669static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5670 u32 map_idx, u32 xid)
5671{
5672 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5673 if (BNXT_PF(bp))
ebdf73dc 5674 db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
697197e5 5675 else
ebdf73dc 5676 db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
697197e5
MC
5677 switch (ring_type) {
5678 case HWRM_RING_ALLOC_TX:
5679 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5680 break;
5681 case HWRM_RING_ALLOC_RX:
5682 case HWRM_RING_ALLOC_AGG:
5683 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5684 break;
5685 case HWRM_RING_ALLOC_CMPL:
5686 db->db_key64 = DBR_PATH_L2;
5687 break;
5688 case HWRM_RING_ALLOC_NQ:
5689 db->db_key64 = DBR_PATH_L2;
5690 break;
5691 }
5692 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5693 } else {
5694 db->doorbell = bp->bar1 + map_idx * 0x80;
5695 switch (ring_type) {
5696 case HWRM_RING_ALLOC_TX:
5697 db->db_key32 = DB_KEY_TX;
5698 break;
5699 case HWRM_RING_ALLOC_RX:
5700 case HWRM_RING_ALLOC_AGG:
5701 db->db_key32 = DB_KEY_RX;
5702 break;
5703 case HWRM_RING_ALLOC_CMPL:
5704 db->db_key32 = DB_KEY_CP;
5705 break;
5706 }
5707 }
5708}
5709
c0c050c5
MC
5710static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5711{
e8f267b0 5712 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
c0c050c5 5713 int i, rc = 0;
697197e5 5714 u32 type;
c0c050c5 5715
23aefdd7
MC
5716 if (bp->flags & BNXT_FLAG_CHIP_P5)
5717 type = HWRM_RING_ALLOC_NQ;
5718 else
5719 type = HWRM_RING_ALLOC_CMPL;
edd0c2cc
MC
5720 for (i = 0; i < bp->cp_nr_rings; i++) {
5721 struct bnxt_napi *bnapi = bp->bnapi[i];
5722 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5723 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
9899bb59 5724 u32 map_idx = ring->map_idx;
5e66e35a 5725 unsigned int vector;
c0c050c5 5726
5e66e35a
MC
5727 vector = bp->irq_tbl[map_idx].vector;
5728 disable_irq_nosync(vector);
697197e5 5729 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5e66e35a
MC
5730 if (rc) {
5731 enable_irq(vector);
edd0c2cc 5732 goto err_out;
5e66e35a 5733 }
697197e5
MC
5734 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5735 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5e66e35a 5736 enable_irq(vector);
edd0c2cc 5737 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
486b5c22
MC
5738
5739 if (!i) {
5740 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5741 if (rc)
5742 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5743 }
c0c050c5
MC
5744 }
5745
697197e5 5746 type = HWRM_RING_ALLOC_TX;
edd0c2cc 5747 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 5748 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3e08b184
MC
5749 struct bnxt_ring_struct *ring;
5750 u32 map_idx;
c0c050c5 5751
3e08b184
MC
5752 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5753 struct bnxt_napi *bnapi = txr->bnapi;
5754 struct bnxt_cp_ring_info *cpr, *cpr2;
5755 u32 type2 = HWRM_RING_ALLOC_CMPL;
5756
5757 cpr = &bnapi->cp_ring;
5758 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5759 ring = &cpr2->cp_ring_struct;
5760 ring->handle = BNXT_TX_HDL;
5761 map_idx = bnapi->index;
5762 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5763 if (rc)
5764 goto err_out;
5765 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5766 ring->fw_ring_id);
5767 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5768 }
5769 ring = &txr->tx_ring_struct;
5770 map_idx = i;
697197e5 5771 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
edd0c2cc
MC
5772 if (rc)
5773 goto err_out;
697197e5 5774 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
c0c050c5
MC
5775 }
5776
697197e5 5777 type = HWRM_RING_ALLOC_RX;
edd0c2cc 5778 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5779 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 5780 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
3e08b184
MC
5781 struct bnxt_napi *bnapi = rxr->bnapi;
5782 u32 map_idx = bnapi->index;
c0c050c5 5783
697197e5 5784 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
edd0c2cc
MC
5785 if (rc)
5786 goto err_out;
697197e5 5787 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
e8f267b0
MC
5788 /* If we have agg rings, post agg buffers first. */
5789 if (!agg_rings)
5790 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
b81a90d3 5791 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
3e08b184
MC
5792 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5793 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5794 u32 type2 = HWRM_RING_ALLOC_CMPL;
5795 struct bnxt_cp_ring_info *cpr2;
5796
5797 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5798 ring = &cpr2->cp_ring_struct;
5799 ring->handle = BNXT_RX_HDL;
5800 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5801 if (rc)
5802 goto err_out;
5803 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5804 ring->fw_ring_id);
5805 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5806 }
c0c050c5
MC
5807 }
5808
e8f267b0 5809 if (agg_rings) {
697197e5 5810 type = HWRM_RING_ALLOC_AGG;
c0c050c5 5811 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5812 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
5813 struct bnxt_ring_struct *ring =
5814 &rxr->rx_agg_ring_struct;
9899bb59 5815 u32 grp_idx = ring->grp_idx;
b81a90d3 5816 u32 map_idx = grp_idx + bp->rx_nr_rings;
c0c050c5 5817
697197e5 5818 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
c0c050c5
MC
5819 if (rc)
5820 goto err_out;
5821
697197e5
MC
5822 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5823 ring->fw_ring_id);
5824 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
e8f267b0 5825 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
b81a90d3 5826 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
c0c050c5
MC
5827 }
5828 }
5829err_out:
5830 return rc;
5831}
5832
5833static int hwrm_ring_free_send_msg(struct bnxt *bp,
5834 struct bnxt_ring_struct *ring,
5835 u32 ring_type, int cmpl_ring_id)
5836{
bbf33d1d
EP
5837 struct hwrm_ring_free_output *resp;
5838 struct hwrm_ring_free_input *req;
5839 u16 error_code = 0;
c0c050c5 5840 int rc;
c0c050c5 5841
b340dc68 5842 if (BNXT_NO_FW_ACCESS(bp))
b4fff207
MC
5843 return 0;
5844
bbf33d1d
EP
5845 rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
5846 if (rc)
5847 goto exit;
c0c050c5 5848
bbf33d1d
EP
5849 req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
5850 req->ring_type = ring_type;
5851 req->ring_id = cpu_to_le16(ring->fw_ring_id);
c0c050c5 5852
bbf33d1d
EP
5853 resp = hwrm_req_hold(bp, req);
5854 rc = hwrm_req_send(bp, req);
5855 error_code = le16_to_cpu(resp->error_code);
5856 hwrm_req_drop(bp, req);
5857exit:
c0c050c5 5858 if (rc || error_code) {
2727c888
MC
5859 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5860 ring_type, rc, error_code);
5861 return -EIO;
c0c050c5
MC
5862 }
5863 return 0;
5864}
5865
edd0c2cc 5866static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
c0c050c5 5867{
23aefdd7 5868 u32 type;
edd0c2cc 5869 int i;
c0c050c5
MC
5870
5871 if (!bp->bnapi)
edd0c2cc 5872 return;
c0c050c5 5873
edd0c2cc 5874 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 5875 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
edd0c2cc 5876 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
edd0c2cc
MC
5877
5878 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
5879 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5880
edd0c2cc
MC
5881 hwrm_ring_free_send_msg(bp, ring,
5882 RING_FREE_REQ_RING_TYPE_TX,
5883 close_path ? cmpl_ring_id :
5884 INVALID_HW_RING_ID);
5885 ring->fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
5886 }
5887 }
5888
edd0c2cc 5889 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5890 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 5891 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
b81a90d3 5892 u32 grp_idx = rxr->bnapi->index;
edd0c2cc
MC
5893
5894 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
5895 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5896
edd0c2cc
MC
5897 hwrm_ring_free_send_msg(bp, ring,
5898 RING_FREE_REQ_RING_TYPE_RX,
5899 close_path ? cmpl_ring_id :
5900 INVALID_HW_RING_ID);
5901 ring->fw_ring_id = INVALID_HW_RING_ID;
b81a90d3
MC
5902 bp->grp_info[grp_idx].rx_fw_ring_id =
5903 INVALID_HW_RING_ID;
c0c050c5
MC
5904 }
5905 }
5906
23aefdd7
MC
5907 if (bp->flags & BNXT_FLAG_CHIP_P5)
5908 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5909 else
5910 type = RING_FREE_REQ_RING_TYPE_RX;
edd0c2cc 5911 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5912 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 5913 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
b81a90d3 5914 u32 grp_idx = rxr->bnapi->index;
edd0c2cc
MC
5915
5916 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
5917 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5918
23aefdd7 5919 hwrm_ring_free_send_msg(bp, ring, type,
edd0c2cc
MC
5920 close_path ? cmpl_ring_id :
5921 INVALID_HW_RING_ID);
5922 ring->fw_ring_id = INVALID_HW_RING_ID;
b81a90d3
MC
5923 bp->grp_info[grp_idx].agg_fw_ring_id =
5924 INVALID_HW_RING_ID;
c0c050c5
MC
5925 }
5926 }
5927
9d8bc097
MC
5928 /* The completion rings are about to be freed. After that the
5929 * IRQ doorbell will not work anymore. So we need to disable
5930 * IRQ here.
5931 */
5932 bnxt_disable_int_sync(bp);
5933
23aefdd7
MC
5934 if (bp->flags & BNXT_FLAG_CHIP_P5)
5935 type = RING_FREE_REQ_RING_TYPE_NQ;
5936 else
5937 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
edd0c2cc
MC
5938 for (i = 0; i < bp->cp_nr_rings; i++) {
5939 struct bnxt_napi *bnapi = bp->bnapi[i];
5940 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3e08b184
MC
5941 struct bnxt_ring_struct *ring;
5942 int j;
edd0c2cc 5943
3e08b184
MC
5944 for (j = 0; j < 2; j++) {
5945 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5946
5947 if (cpr2) {
5948 ring = &cpr2->cp_ring_struct;
5949 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5950 continue;
5951 hwrm_ring_free_send_msg(bp, ring,
5952 RING_FREE_REQ_RING_TYPE_L2_CMPL,
5953 INVALID_HW_RING_ID);
5954 ring->fw_ring_id = INVALID_HW_RING_ID;
5955 }
5956 }
5957 ring = &cpr->cp_ring_struct;
edd0c2cc 5958 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
23aefdd7 5959 hwrm_ring_free_send_msg(bp, ring, type,
edd0c2cc
MC
5960 INVALID_HW_RING_ID);
5961 ring->fw_ring_id = INVALID_HW_RING_ID;
5962 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
5963 }
5964 }
c0c050c5
MC
5965}
5966
41e8d798
MC
5967static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5968 bool shared);
5969
674f50a5
MC
5970static int bnxt_hwrm_get_rings(struct bnxt *bp)
5971{
674f50a5 5972 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
bbf33d1d
EP
5973 struct hwrm_func_qcfg_output *resp;
5974 struct hwrm_func_qcfg_input *req;
674f50a5
MC
5975 int rc;
5976
5977 if (bp->hwrm_spec_code < 0x10601)
5978 return 0;
5979
bbf33d1d
EP
5980 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
5981 if (rc)
5982 return rc;
5983
5984 req->fid = cpu_to_le16(0xffff);
5985 resp = hwrm_req_hold(bp, req);
5986 rc = hwrm_req_send(bp, req);
674f50a5 5987 if (rc) {
bbf33d1d 5988 hwrm_req_drop(bp, req);
d4f1420d 5989 return rc;
674f50a5
MC
5990 }
5991
5992 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
f1ca94de 5993 if (BNXT_NEW_RM(bp)) {
674f50a5
MC
5994 u16 cp, stats;
5995
5996 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5997 hw_resc->resv_hw_ring_grps =
5998 le32_to_cpu(resp->alloc_hw_ring_grps);
5999 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
6000 cp = le16_to_cpu(resp->alloc_cmpl_rings);
6001 stats = le16_to_cpu(resp->alloc_stat_ctx);
75720e63 6002 hw_resc->resv_irqs = cp;
41e8d798
MC
6003 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6004 int rx = hw_resc->resv_rx_rings;
6005 int tx = hw_resc->resv_tx_rings;
6006
6007 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6008 rx >>= 1;
6009 if (cp < (rx + tx)) {
6010 bnxt_trim_rings(bp, &rx, &tx, cp, false);
6011 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6012 rx <<= 1;
6013 hw_resc->resv_rx_rings = rx;
6014 hw_resc->resv_tx_rings = tx;
6015 }
75720e63 6016 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
41e8d798
MC
6017 hw_resc->resv_hw_ring_grps = rx;
6018 }
674f50a5 6019 hw_resc->resv_cp_rings = cp;
780baad4 6020 hw_resc->resv_stat_ctxs = stats;
674f50a5 6021 }
bbf33d1d 6022 hwrm_req_drop(bp, req);
674f50a5
MC
6023 return 0;
6024}
6025
391be5c2
MC
6026int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
6027{
bbf33d1d
EP
6028 struct hwrm_func_qcfg_output *resp;
6029 struct hwrm_func_qcfg_input *req;
391be5c2
MC
6030 int rc;
6031
6032 if (bp->hwrm_spec_code < 0x10601)
6033 return 0;
6034
bbf33d1d
EP
6035 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6036 if (rc)
6037 return rc;
6038
6039 req->fid = cpu_to_le16(fid);
6040 resp = hwrm_req_hold(bp, req);
6041 rc = hwrm_req_send(bp, req);
391be5c2
MC
6042 if (!rc)
6043 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6044
bbf33d1d 6045 hwrm_req_drop(bp, req);
391be5c2
MC
6046 return rc;
6047}
6048
41e8d798
MC
6049static bool bnxt_rfs_supported(struct bnxt *bp);
6050
bbf33d1d
EP
6051static struct hwrm_func_cfg_input *
6052__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6053 int ring_grps, int cp_rings, int stats, int vnics)
391be5c2 6054{
bbf33d1d 6055 struct hwrm_func_cfg_input *req;
674f50a5 6056 u32 enables = 0;
391be5c2 6057
bbf33d1d
EP
6058 if (hwrm_req_init(bp, req, HWRM_FUNC_CFG))
6059 return NULL;
6060
4ed50ef4 6061 req->fid = cpu_to_le16(0xffff);
674f50a5 6062 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
4ed50ef4 6063 req->num_tx_rings = cpu_to_le16(tx_rings);
f1ca94de 6064 if (BNXT_NEW_RM(bp)) {
674f50a5 6065 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
3f93cd3f 6066 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
41e8d798
MC
6067 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6068 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6069 enables |= tx_rings + ring_grps ?
3f93cd3f 6070 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
6071 enables |= rx_rings ?
6072 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6073 } else {
6074 enables |= cp_rings ?
3f93cd3f 6075 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
6076 enables |= ring_grps ?
6077 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
6078 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6079 }
dbe80d44 6080 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
674f50a5 6081
4ed50ef4 6082 req->num_rx_rings = cpu_to_le16(rx_rings);
41e8d798
MC
6083 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6084 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6085 req->num_msix = cpu_to_le16(cp_rings);
6086 req->num_rsscos_ctxs =
6087 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6088 } else {
6089 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6090 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6091 req->num_rsscos_ctxs = cpu_to_le16(1);
6092 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6093 bnxt_rfs_supported(bp))
6094 req->num_rsscos_ctxs =
6095 cpu_to_le16(ring_grps + 1);
6096 }
780baad4 6097 req->num_stat_ctxs = cpu_to_le16(stats);
4ed50ef4 6098 req->num_vnics = cpu_to_le16(vnics);
674f50a5 6099 }
4ed50ef4 6100 req->enables = cpu_to_le32(enables);
bbf33d1d 6101 return req;
4ed50ef4
MC
6102}
6103
bbf33d1d
EP
6104static struct hwrm_func_vf_cfg_input *
6105__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6106 int ring_grps, int cp_rings, int stats, int vnics)
4ed50ef4 6107{
bbf33d1d 6108 struct hwrm_func_vf_cfg_input *req;
4ed50ef4
MC
6109 u32 enables = 0;
6110
bbf33d1d
EP
6111 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
6112 return NULL;
6113
4ed50ef4 6114 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
41e8d798
MC
6115 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
6116 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
3f93cd3f 6117 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
41e8d798
MC
6118 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6119 enables |= tx_rings + ring_grps ?
3f93cd3f 6120 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
6121 } else {
6122 enables |= cp_rings ?
3f93cd3f 6123 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
6124 enables |= ring_grps ?
6125 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
6126 }
4ed50ef4 6127 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
41e8d798 6128 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
4ed50ef4 6129
41e8d798 6130 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
4ed50ef4
MC
6131 req->num_tx_rings = cpu_to_le16(tx_rings);
6132 req->num_rx_rings = cpu_to_le16(rx_rings);
41e8d798
MC
6133 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6134 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6135 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6136 } else {
6137 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6138 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6139 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6140 }
780baad4 6141 req->num_stat_ctxs = cpu_to_le16(stats);
4ed50ef4
MC
6142 req->num_vnics = cpu_to_le16(vnics);
6143
6144 req->enables = cpu_to_le32(enables);
bbf33d1d 6145 return req;
4ed50ef4
MC
6146}
6147
6148static int
6149bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4 6150 int ring_grps, int cp_rings, int stats, int vnics)
4ed50ef4 6151{
bbf33d1d 6152 struct hwrm_func_cfg_input *req;
4ed50ef4
MC
6153 int rc;
6154
bbf33d1d
EP
6155 req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6156 cp_rings, stats, vnics);
6157 if (!req)
6158 return -ENOMEM;
6159
6160 if (!req->enables) {
6161 hwrm_req_drop(bp, req);
391be5c2 6162 return 0;
bbf33d1d 6163 }
391be5c2 6164
bbf33d1d 6165 rc = hwrm_req_send(bp, req);
674f50a5 6166 if (rc)
d4f1420d 6167 return rc;
674f50a5
MC
6168
6169 if (bp->hwrm_spec_code < 0x10601)
6170 bp->hw_resc.resv_tx_rings = tx_rings;
6171
9f90445c 6172 return bnxt_hwrm_get_rings(bp);
674f50a5
MC
6173}
6174
6175static int
6176bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4 6177 int ring_grps, int cp_rings, int stats, int vnics)
674f50a5 6178{
bbf33d1d 6179 struct hwrm_func_vf_cfg_input *req;
674f50a5
MC
6180 int rc;
6181
f1ca94de 6182 if (!BNXT_NEW_RM(bp)) {
674f50a5 6183 bp->hw_resc.resv_tx_rings = tx_rings;
391be5c2 6184 return 0;
674f50a5 6185 }
391be5c2 6186
bbf33d1d
EP
6187 req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6188 cp_rings, stats, vnics);
6189 if (!req)
6190 return -ENOMEM;
6191
6192 rc = hwrm_req_send(bp, req);
674f50a5 6193 if (rc)
d4f1420d 6194 return rc;
674f50a5 6195
9f90445c 6196 return bnxt_hwrm_get_rings(bp);
674f50a5
MC
6197}
6198
6199static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
780baad4 6200 int cp, int stat, int vnic)
674f50a5
MC
6201{
6202 if (BNXT_PF(bp))
780baad4
VV
6203 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6204 vnic);
674f50a5 6205 else
780baad4
VV
6206 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6207 vnic);
674f50a5
MC
6208}
6209
b16b6891 6210int bnxt_nq_rings_in_use(struct bnxt *bp)
08654eb2
MC
6211{
6212 int cp = bp->cp_nr_rings;
6213 int ulp_msix, ulp_base;
6214
6215 ulp_msix = bnxt_get_ulp_msix_num(bp);
6216 if (ulp_msix) {
6217 ulp_base = bnxt_get_ulp_msix_base(bp);
6218 cp += ulp_msix;
6219 if ((ulp_base + ulp_msix) > cp)
6220 cp = ulp_base + ulp_msix;
6221 }
6222 return cp;
6223}
6224
c0b8cda0
MC
6225static int bnxt_cp_rings_in_use(struct bnxt *bp)
6226{
6227 int cp;
6228
6229 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6230 return bnxt_nq_rings_in_use(bp);
6231
6232 cp = bp->tx_nr_rings + bp->rx_nr_rings;
6233 return cp;
6234}
6235
780baad4
VV
6236static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6237{
d77b1ad8
MC
6238 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6239 int cp = bp->cp_nr_rings;
6240
6241 if (!ulp_stat)
6242 return cp;
6243
6244 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6245 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6246
6247 return cp + ulp_stat;
780baad4
VV
6248}
6249
b43b9f53
MC
6250/* Check if a default RSS map needs to be setup. This function is only
6251 * used on older firmware that does not require reserving RX rings.
6252 */
6253static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6254{
6255 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6256
6257 /* The RSS map is valid for RX rings set to resv_rx_rings */
6258 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6259 hw_resc->resv_rx_rings = bp->rx_nr_rings;
6260 if (!netif_is_rxfh_configured(bp->dev))
6261 bnxt_set_dflt_rss_indir_tbl(bp);
6262 }
6263}
6264
4e41dc5d
MC
6265static bool bnxt_need_reserve_rings(struct bnxt *bp)
6266{
6267 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
fbcfc8e4 6268 int cp = bnxt_cp_rings_in_use(bp);
c0b8cda0 6269 int nq = bnxt_nq_rings_in_use(bp);
780baad4 6270 int rx = bp->rx_nr_rings, stat;
4e41dc5d
MC
6271 int vnic = 1, grp = rx;
6272
b43b9f53
MC
6273 if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6274 bp->hwrm_spec_code >= 0x10601)
4e41dc5d
MC
6275 return true;
6276
b43b9f53
MC
6277 /* Old firmware does not need RX ring reservations but we still
6278 * need to setup a default RSS map when needed. With new firmware
6279 * we go through RX ring reservations first and then set up the
6280 * RSS map for the successfully reserved RX rings when needed.
6281 */
6282 if (!BNXT_NEW_RM(bp)) {
6283 bnxt_check_rss_tbl_no_rmgr(bp);
6284 return false;
6285 }
41e8d798 6286 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
4e41dc5d
MC
6287 vnic = rx + 1;
6288 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6289 rx <<= 1;
780baad4 6290 stat = bnxt_get_func_stat_ctxs(bp);
b43b9f53
MC
6291 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6292 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6293 (hw_resc->resv_hw_ring_grps != grp &&
6294 !(bp->flags & BNXT_FLAG_CHIP_P5)))
4e41dc5d 6295 return true;
01989c6b
MC
6296 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6297 hw_resc->resv_irqs != nq)
6298 return true;
4e41dc5d
MC
6299 return false;
6300}
6301
674f50a5
MC
6302static int __bnxt_reserve_rings(struct bnxt *bp)
6303{
6304 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
c0b8cda0 6305 int cp = bnxt_nq_rings_in_use(bp);
674f50a5
MC
6306 int tx = bp->tx_nr_rings;
6307 int rx = bp->rx_nr_rings;
674f50a5 6308 int grp, rx_rings, rc;
780baad4 6309 int vnic = 1, stat;
674f50a5 6310 bool sh = false;
674f50a5 6311
4e41dc5d 6312 if (!bnxt_need_reserve_rings(bp))
674f50a5
MC
6313 return 0;
6314
6315 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6316 sh = true;
41e8d798 6317 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
674f50a5
MC
6318 vnic = rx + 1;
6319 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6320 rx <<= 1;
674f50a5 6321 grp = bp->rx_nr_rings;
780baad4 6322 stat = bnxt_get_func_stat_ctxs(bp);
674f50a5 6323
780baad4 6324 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
391be5c2
MC
6325 if (rc)
6326 return rc;
6327
674f50a5 6328 tx = hw_resc->resv_tx_rings;
f1ca94de 6329 if (BNXT_NEW_RM(bp)) {
674f50a5 6330 rx = hw_resc->resv_rx_rings;
c0b8cda0 6331 cp = hw_resc->resv_irqs;
674f50a5
MC
6332 grp = hw_resc->resv_hw_ring_grps;
6333 vnic = hw_resc->resv_vnics;
780baad4 6334 stat = hw_resc->resv_stat_ctxs;
674f50a5
MC
6335 }
6336
6337 rx_rings = rx;
6338 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6339 if (rx >= 2) {
6340 rx_rings = rx >> 1;
6341 } else {
6342 if (netif_running(bp->dev))
6343 return -ENOMEM;
6344
6345 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6346 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6347 bp->dev->hw_features &= ~NETIF_F_LRO;
6348 bp->dev->features &= ~NETIF_F_LRO;
6349 bnxt_set_ring_params(bp);
6350 }
6351 }
6352 rx_rings = min_t(int, rx_rings, grp);
780baad4
VV
6353 cp = min_t(int, cp, bp->cp_nr_rings);
6354 if (stat > bnxt_get_ulp_stat_ctxs(bp))
6355 stat -= bnxt_get_ulp_stat_ctxs(bp);
6356 cp = min_t(int, cp, stat);
674f50a5
MC
6357 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6358 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6359 rx = rx_rings << 1;
6360 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6361 bp->tx_nr_rings = tx;
bd3191b5
MC
6362
6363 /* If we cannot reserve all the RX rings, reset the RSS map only
6364 * if absolutely necessary
6365 */
6366 if (rx_rings != bp->rx_nr_rings) {
6367 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6368 rx_rings, bp->rx_nr_rings);
6369 if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
6370 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6371 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6372 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6373 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6374 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6375 }
6376 }
674f50a5
MC
6377 bp->rx_nr_rings = rx_rings;
6378 bp->cp_nr_rings = cp;
6379
780baad4 6380 if (!tx || !rx || !cp || !grp || !vnic || !stat)
674f50a5
MC
6381 return -ENOMEM;
6382
5fa65524
EP
6383 if (!netif_is_rxfh_configured(bp->dev))
6384 bnxt_set_dflt_rss_indir_tbl(bp);
6385
391be5c2
MC
6386 return rc;
6387}
6388
8f23d638 6389static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
6390 int ring_grps, int cp_rings, int stats,
6391 int vnics)
98fdbe73 6392{
bbf33d1d 6393 struct hwrm_func_vf_cfg_input *req;
6fc2ffdf 6394 u32 flags;
98fdbe73 6395
f1ca94de 6396 if (!BNXT_NEW_RM(bp))
98fdbe73
MC
6397 return 0;
6398
bbf33d1d
EP
6399 req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6400 cp_rings, stats, vnics);
8f23d638
MC
6401 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6402 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6403 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8f23d638 6404 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
41e8d798
MC
6405 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6406 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6407 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6408 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8f23d638 6409
bbf33d1d
EP
6410 req->flags = cpu_to_le32(flags);
6411 return hwrm_req_send_silent(bp, req);
8f23d638
MC
6412}
6413
6414static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
6415 int ring_grps, int cp_rings, int stats,
6416 int vnics)
8f23d638 6417{
bbf33d1d 6418 struct hwrm_func_cfg_input *req;
6fc2ffdf 6419 u32 flags;
98fdbe73 6420
bbf33d1d
EP
6421 req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6422 cp_rings, stats, vnics);
8f23d638 6423 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
41e8d798 6424 if (BNXT_NEW_RM(bp)) {
8f23d638
MC
6425 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6426 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8f23d638
MC
6427 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6428 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
41e8d798 6429 if (bp->flags & BNXT_FLAG_CHIP_P5)
0b815023
MC
6430 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6431 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
41e8d798
MC
6432 else
6433 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6434 }
6fc2ffdf 6435
bbf33d1d
EP
6436 req->flags = cpu_to_le32(flags);
6437 return hwrm_req_send_silent(bp, req);
98fdbe73
MC
6438}
6439
8f23d638 6440static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
6441 int ring_grps, int cp_rings, int stats,
6442 int vnics)
8f23d638
MC
6443{
6444 if (bp->hwrm_spec_code < 0x10801)
6445 return 0;
6446
6447 if (BNXT_PF(bp))
6448 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
780baad4
VV
6449 ring_grps, cp_rings, stats,
6450 vnics);
8f23d638
MC
6451
6452 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
780baad4 6453 cp_rings, stats, vnics);
8f23d638
MC
6454}
6455
74706afa
MC
6456static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6457{
74706afa 6458 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
bbf33d1d
EP
6459 struct hwrm_ring_aggint_qcaps_output *resp;
6460 struct hwrm_ring_aggint_qcaps_input *req;
74706afa
MC
6461 int rc;
6462
6463 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6464 coal_cap->num_cmpl_dma_aggr_max = 63;
6465 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6466 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6467 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6468 coal_cap->int_lat_tmr_min_max = 65535;
6469 coal_cap->int_lat_tmr_max_max = 65535;
6470 coal_cap->num_cmpl_aggr_int_max = 65535;
6471 coal_cap->timer_units = 80;
6472
6473 if (bp->hwrm_spec_code < 0x10902)
6474 return;
6475
bbf33d1d
EP
6476 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
6477 return;
6478
6479 resp = hwrm_req_hold(bp, req);
6480 rc = hwrm_req_send_silent(bp, req);
74706afa
MC
6481 if (!rc) {
6482 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
58590c8d 6483 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
74706afa
MC
6484 coal_cap->num_cmpl_dma_aggr_max =
6485 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6486 coal_cap->num_cmpl_dma_aggr_during_int_max =
6487 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6488 coal_cap->cmpl_aggr_dma_tmr_max =
6489 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6490 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6491 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6492 coal_cap->int_lat_tmr_min_max =
6493 le16_to_cpu(resp->int_lat_tmr_min_max);
6494 coal_cap->int_lat_tmr_max_max =
6495 le16_to_cpu(resp->int_lat_tmr_max_max);
6496 coal_cap->num_cmpl_aggr_int_max =
6497 le16_to_cpu(resp->num_cmpl_aggr_int_max);
6498 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6499 }
bbf33d1d 6500 hwrm_req_drop(bp, req);
74706afa
MC
6501}
6502
6503static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6504{
6505 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6506
6507 return usec * 1000 / coal_cap->timer_units;
6508}
6509
6510static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6511 struct bnxt_coal *hw_coal,
bb053f52
MC
6512 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6513{
74706afa
MC
6514 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6515 u32 cmpl_params = coal_cap->cmpl_params;
6516 u16 val, tmr, max, flags = 0;
f8503969
MC
6517
6518 max = hw_coal->bufs_per_record * 128;
6519 if (hw_coal->budget)
6520 max = hw_coal->bufs_per_record * hw_coal->budget;
74706afa 6521 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
f8503969
MC
6522
6523 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6524 req->num_cmpl_aggr_int = cpu_to_le16(val);
b153cbc5 6525
74706afa 6526 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
f8503969
MC
6527 req->num_cmpl_dma_aggr = cpu_to_le16(val);
6528
74706afa
MC
6529 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6530 coal_cap->num_cmpl_dma_aggr_during_int_max);
f8503969
MC
6531 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6532
74706afa
MC
6533 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6534 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
f8503969
MC
6535 req->int_lat_tmr_max = cpu_to_le16(tmr);
6536
6537 /* min timer set to 1/2 of interrupt timer */
74706afa
MC
6538 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6539 val = tmr / 2;
6540 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6541 req->int_lat_tmr_min = cpu_to_le16(val);
6542 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6543 }
f8503969
MC
6544
6545 /* buf timer set to 1/4 of interrupt timer */
74706afa 6546 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
f8503969
MC
6547 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6548
74706afa
MC
6549 if (cmpl_params &
6550 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6551 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6552 val = clamp_t(u16, tmr, 1,
6553 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6adc4601 6554 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
74706afa
MC
6555 req->enables |=
6556 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6557 }
f8503969 6558
74706afa
MC
6559 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6560 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6561 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6562 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
f8503969 6563 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
bb053f52 6564 req->flags = cpu_to_le16(flags);
74706afa 6565 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
bb053f52
MC
6566}
6567
58590c8d
MC
6568static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6569 struct bnxt_coal *hw_coal)
6570{
bbf33d1d 6571 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
58590c8d
MC
6572 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6573 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6574 u32 nq_params = coal_cap->nq_params;
6575 u16 tmr;
bbf33d1d 6576 int rc;
58590c8d
MC
6577
6578 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6579 return 0;
6580
bbf33d1d
EP
6581 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6582 if (rc)
6583 return rc;
6584
6585 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6586 req->flags =
58590c8d
MC
6587 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6588
6589 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6590 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
bbf33d1d
EP
6591 req->int_lat_tmr_min = cpu_to_le16(tmr);
6592 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6593 return hwrm_req_send(bp, req);
58590c8d
MC
6594}
6595
6a8788f2
AG
6596int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6597{
bbf33d1d 6598 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
6a8788f2
AG
6599 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6600 struct bnxt_coal coal;
bbf33d1d 6601 int rc;
6a8788f2
AG
6602
6603 /* Tick values in micro seconds.
6604 * 1 coal_buf x bufs_per_record = 1 completion record.
6605 */
6606 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6607
6608 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6609 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6610
6611 if (!bnapi->rx_ring)
6612 return -ENODEV;
6613
bbf33d1d
EP
6614 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6615 if (rc)
6616 return rc;
6a8788f2 6617
bbf33d1d 6618 bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
6a8788f2 6619
bbf33d1d 6620 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6a8788f2 6621
bbf33d1d 6622 return hwrm_req_send(bp, req_rx);
6a8788f2
AG
6623}
6624
c0c050c5
MC
6625int bnxt_hwrm_set_coal(struct bnxt *bp)
6626{
bbf33d1d
EP
6627 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx,
6628 *req;
6629 int i, rc;
6630
6631 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6632 if (rc)
6633 return rc;
c0c050c5 6634
bbf33d1d
EP
6635 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6636 if (rc) {
6637 hwrm_req_drop(bp, req_rx);
6638 return rc;
6639 }
c0c050c5 6640
bbf33d1d
EP
6641 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
6642 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
c0c050c5 6643
bbf33d1d
EP
6644 hwrm_req_hold(bp, req_rx);
6645 hwrm_req_hold(bp, req_tx);
c0c050c5 6646 for (i = 0; i < bp->cp_nr_rings; i++) {
dfc9c94a 6647 struct bnxt_napi *bnapi = bp->bnapi[i];
58590c8d 6648 struct bnxt_coal *hw_coal;
2c61d211 6649 u16 ring_id;
c0c050c5 6650
bbf33d1d 6651 req = req_rx;
2c61d211
MC
6652 if (!bnapi->rx_ring) {
6653 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
bbf33d1d 6654 req = req_tx;
2c61d211
MC
6655 } else {
6656 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6657 }
6658 req->ring_id = cpu_to_le16(ring_id);
dfc9c94a 6659
bbf33d1d 6660 rc = hwrm_req_send(bp, req);
c0c050c5
MC
6661 if (rc)
6662 break;
58590c8d
MC
6663
6664 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6665 continue;
6666
6667 if (bnapi->rx_ring && bnapi->tx_ring) {
bbf33d1d 6668 req = req_tx;
58590c8d
MC
6669 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6670 req->ring_id = cpu_to_le16(ring_id);
bbf33d1d 6671 rc = hwrm_req_send(bp, req);
58590c8d
MC
6672 if (rc)
6673 break;
6674 }
6675 if (bnapi->rx_ring)
6676 hw_coal = &bp->rx_coal;
6677 else
6678 hw_coal = &bp->tx_coal;
6679 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
c0c050c5 6680 }
bbf33d1d
EP
6681 hwrm_req_drop(bp, req_rx);
6682 hwrm_req_drop(bp, req_tx);
c0c050c5
MC
6683 return rc;
6684}
6685
3d061591 6686static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
c0c050c5 6687{
bbf33d1d
EP
6688 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
6689 struct hwrm_stat_ctx_free_input *req;
3d061591 6690 int i;
c0c050c5
MC
6691
6692 if (!bp->bnapi)
3d061591 6693 return;
c0c050c5 6694
3e8060fa 6695 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3d061591 6696 return;
3e8060fa 6697
bbf33d1d
EP
6698 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
6699 return;
6700 if (BNXT_FW_MAJ(bp) <= 20) {
6701 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
6702 hwrm_req_drop(bp, req);
6703 return;
6704 }
6705 hwrm_req_hold(bp, req0);
6706 }
6707 hwrm_req_hold(bp, req);
c0c050c5
MC
6708 for (i = 0; i < bp->cp_nr_rings; i++) {
6709 struct bnxt_napi *bnapi = bp->bnapi[i];
6710 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6711
6712 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
bbf33d1d
EP
6713 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6714 if (req0) {
6715 req0->stat_ctx_id = req->stat_ctx_id;
6716 hwrm_req_send(bp, req0);
c2dec363 6717 }
bbf33d1d 6718 hwrm_req_send(bp, req);
c0c050c5
MC
6719
6720 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6721 }
6722 }
bbf33d1d
EP
6723 hwrm_req_drop(bp, req);
6724 if (req0)
6725 hwrm_req_drop(bp, req0);
c0c050c5
MC
6726}
6727
6728static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6729{
bbf33d1d
EP
6730 struct hwrm_stat_ctx_alloc_output *resp;
6731 struct hwrm_stat_ctx_alloc_input *req;
6732 int rc, i;
c0c050c5 6733
3e8060fa
PS
6734 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6735 return 0;
6736
bbf33d1d
EP
6737 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
6738 if (rc)
6739 return rc;
c0c050c5 6740
bbf33d1d
EP
6741 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6742 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
c0c050c5 6743
bbf33d1d 6744 resp = hwrm_req_hold(bp, req);
c0c050c5
MC
6745 for (i = 0; i < bp->cp_nr_rings; i++) {
6746 struct bnxt_napi *bnapi = bp->bnapi[i];
6747 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6748
bbf33d1d 6749 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
c0c050c5 6750
bbf33d1d 6751 rc = hwrm_req_send(bp, req);
c0c050c5
MC
6752 if (rc)
6753 break;
6754
6755 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6756
6757 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6758 }
bbf33d1d 6759 hwrm_req_drop(bp, req);
89aa8445 6760 return rc;
c0c050c5
MC
6761}
6762
cf6645f8
MC
6763static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6764{
bbf33d1d
EP
6765 struct hwrm_func_qcfg_output *resp;
6766 struct hwrm_func_qcfg_input *req;
8ae24738 6767 u32 min_db_offset = 0;
9315edca 6768 u16 flags;
cf6645f8
MC
6769 int rc;
6770
bbf33d1d
EP
6771 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6772 if (rc)
6773 return rc;
6774
6775 req->fid = cpu_to_le16(0xffff);
6776 resp = hwrm_req_hold(bp, req);
6777 rc = hwrm_req_send(bp, req);
cf6645f8
MC
6778 if (rc)
6779 goto func_qcfg_exit;
6780
6781#ifdef CONFIG_BNXT_SRIOV
6782 if (BNXT_VF(bp)) {
cf6645f8
MC
6783 struct bnxt_vf_info *vf = &bp->vf;
6784
6785 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
230d1f0d
MC
6786 } else {
6787 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
cf6645f8
MC
6788 }
6789#endif
9315edca
MC
6790 flags = le16_to_cpu(resp->flags);
6791 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6792 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
97381a18 6793 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
9315edca 6794 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
97381a18 6795 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
9315edca
MC
6796 }
6797 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6798 bp->flags |= BNXT_FLAG_MULTI_HOST;
8d4bd96b
MC
6799 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
6800 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
bc39f885 6801
567b2abe
SB
6802 switch (resp->port_partition_type) {
6803 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6804 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6805 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6806 bp->port_partition_type = resp->port_partition_type;
6807 break;
6808 }
32e8239c
MC
6809 if (bp->hwrm_spec_code < 0x10707 ||
6810 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6811 bp->br_mode = BRIDGE_MODE_VEB;
6812 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6813 bp->br_mode = BRIDGE_MODE_VEPA;
6814 else
6815 bp->br_mode = BRIDGE_MODE_UNDEF;
cf6645f8 6816
7eb9bb3a
MC
6817 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6818 if (!bp->max_mtu)
6819 bp->max_mtu = BNXT_MAX_MTU;
6820
8ae24738
MC
6821 if (bp->db_size)
6822 goto func_qcfg_exit;
6823
6824 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6825 if (BNXT_PF(bp))
6826 min_db_offset = DB_PF_OFFSET_P5;
6827 else
6828 min_db_offset = DB_VF_OFFSET_P5;
6829 }
6830 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6831 1024);
6832 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6833 bp->db_size <= min_db_offset)
6834 bp->db_size = pci_resource_len(bp->pdev, 2);
6835
cf6645f8 6836func_qcfg_exit:
bbf33d1d 6837 hwrm_req_drop(bp, req);
cf6645f8
MC
6838 return rc;
6839}
6840
e9696ff3
MC
6841static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
6842 struct hwrm_func_backing_store_qcaps_output *resp)
6843{
6844 struct bnxt_mem_init *mem_init;
41435c39 6845 u16 init_mask;
e9696ff3 6846 u8 init_val;
41435c39 6847 u8 *offset;
e9696ff3
MC
6848 int i;
6849
6850 init_val = resp->ctx_kind_initializer;
41435c39
MC
6851 init_mask = le16_to_cpu(resp->ctx_init_mask);
6852 offset = &resp->qp_init_offset;
6853 mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
6854 for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
e9696ff3 6855 mem_init->init_val = init_val;
41435c39
MC
6856 mem_init->offset = BNXT_MEM_INVALID_OFFSET;
6857 if (!init_mask)
6858 continue;
6859 if (i == BNXT_CTX_MEM_INIT_STAT)
6860 offset = &resp->stat_init_offset;
6861 if (init_mask & (1 << i))
6862 mem_init->offset = *offset * 4;
6863 else
6864 mem_init->init_val = 0;
6865 }
6866 ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
6867 ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
6868 ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
6869 ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
6870 ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
6871 ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
e9696ff3
MC
6872}
6873
98f04cf0
MC
6874static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6875{
bbf33d1d
EP
6876 struct hwrm_func_backing_store_qcaps_output *resp;
6877 struct hwrm_func_backing_store_qcaps_input *req;
98f04cf0
MC
6878 int rc;
6879
6880 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6881 return 0;
6882
bbf33d1d
EP
6883 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
6884 if (rc)
6885 return rc;
6886
6887 resp = hwrm_req_hold(bp, req);
6888 rc = hwrm_req_send_silent(bp, req);
98f04cf0
MC
6889 if (!rc) {
6890 struct bnxt_ctx_pg_info *ctx_pg;
6891 struct bnxt_ctx_mem_info *ctx;
ac3158cb 6892 int i, tqm_rings;
98f04cf0
MC
6893
6894 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6895 if (!ctx) {
6896 rc = -ENOMEM;
6897 goto ctx_err;
6898 }
98f04cf0
MC
6899 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6900 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6901 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6902 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6903 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6904 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6905 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6906 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6907 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6908 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6909 ctx->vnic_max_vnic_entries =
6910 le16_to_cpu(resp->vnic_max_vnic_entries);
6911 ctx->vnic_max_ring_table_entries =
6912 le16_to_cpu(resp->vnic_max_ring_table_entries);
6913 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6914 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6915 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6916 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6917 ctx->tqm_min_entries_per_ring =
6918 le32_to_cpu(resp->tqm_min_entries_per_ring);
6919 ctx->tqm_max_entries_per_ring =
6920 le32_to_cpu(resp->tqm_max_entries_per_ring);
6921 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6922 if (!ctx->tqm_entries_multiple)
6923 ctx->tqm_entries_multiple = 1;
6924 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6925 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
53579e37
DS
6926 ctx->mrav_num_entries_units =
6927 le16_to_cpu(resp->mrav_num_entries_units);
98f04cf0
MC
6928 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6929 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
e9696ff3
MC
6930
6931 bnxt_init_ctx_initializer(ctx, resp);
6932
ac3158cb
MC
6933 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
6934 if (!ctx->tqm_fp_rings_count)
6935 ctx->tqm_fp_rings_count = bp->max_q;
a029a2fe
MC
6936 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
6937 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
ac3158cb 6938
a029a2fe 6939 tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
ac3158cb
MC
6940 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
6941 if (!ctx_pg) {
6942 kfree(ctx);
6943 rc = -ENOMEM;
6944 goto ctx_err;
6945 }
6946 for (i = 0; i < tqm_rings; i++, ctx_pg++)
6947 ctx->tqm_mem[i] = ctx_pg;
6948 bp->ctx = ctx;
98f04cf0
MC
6949 } else {
6950 rc = 0;
6951 }
6952ctx_err:
bbf33d1d 6953 hwrm_req_drop(bp, req);
98f04cf0
MC
6954 return rc;
6955}
6956
1b9394e5
MC
6957static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6958 __le64 *pg_dir)
6959{
be6d755f
EP
6960 if (!rmem->nr_pages)
6961 return;
6962
702279d2 6963 BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
08fe9d18
MC
6964 if (rmem->depth >= 1) {
6965 if (rmem->depth == 2)
6966 *pg_attr |= 2;
6967 else
6968 *pg_attr |= 1;
1b9394e5
MC
6969 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6970 } else {
6971 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6972 }
6973}
6974
6975#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
6976 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
6977 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
6978 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
6979 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
6980 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6981
6982static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6983{
bbf33d1d 6984 struct hwrm_func_backing_store_cfg_input *req;
1b9394e5
MC
6985 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6986 struct bnxt_ctx_pg_info *ctx_pg;
bbf33d1d
EP
6987 void **__req = (void **)&req;
6988 u32 req_len = sizeof(*req);
1b9394e5
MC
6989 __le32 *num_entries;
6990 __le64 *pg_dir;
53579e37 6991 u32 flags = 0;
1b9394e5 6992 u8 *pg_attr;
1b9394e5 6993 u32 ena;
bbf33d1d 6994 int rc;
9f90445c 6995 int i;
1b9394e5
MC
6996
6997 if (!ctx)
6998 return 0;
6999
16db6323
MC
7000 if (req_len > bp->hwrm_max_ext_req_len)
7001 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
bbf33d1d
EP
7002 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
7003 if (rc)
7004 return rc;
1b9394e5 7005
bbf33d1d 7006 req->enables = cpu_to_le32(enables);
1b9394e5
MC
7007 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
7008 ctx_pg = &ctx->qp_mem;
bbf33d1d
EP
7009 req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
7010 req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
7011 req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
7012 req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
1b9394e5 7013 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
bbf33d1d
EP
7014 &req->qpc_pg_size_qpc_lvl,
7015 &req->qpc_page_dir);
1b9394e5
MC
7016 }
7017 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
7018 ctx_pg = &ctx->srq_mem;
bbf33d1d
EP
7019 req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
7020 req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
7021 req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
1b9394e5 7022 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
bbf33d1d
EP
7023 &req->srq_pg_size_srq_lvl,
7024 &req->srq_page_dir);
1b9394e5
MC
7025 }
7026 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
7027 ctx_pg = &ctx->cq_mem;
bbf33d1d
EP
7028 req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
7029 req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
7030 req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
7031 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7032 &req->cq_pg_size_cq_lvl,
7033 &req->cq_page_dir);
1b9394e5
MC
7034 }
7035 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
7036 ctx_pg = &ctx->vnic_mem;
bbf33d1d 7037 req->vnic_num_vnic_entries =
1b9394e5 7038 cpu_to_le16(ctx->vnic_max_vnic_entries);
bbf33d1d 7039 req->vnic_num_ring_table_entries =
1b9394e5 7040 cpu_to_le16(ctx->vnic_max_ring_table_entries);
bbf33d1d 7041 req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
1b9394e5 7042 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
bbf33d1d
EP
7043 &req->vnic_pg_size_vnic_lvl,
7044 &req->vnic_page_dir);
1b9394e5
MC
7045 }
7046 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
7047 ctx_pg = &ctx->stat_mem;
bbf33d1d
EP
7048 req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
7049 req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
1b9394e5 7050 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
bbf33d1d
EP
7051 &req->stat_pg_size_stat_lvl,
7052 &req->stat_page_dir);
1b9394e5 7053 }
cf6daed0
MC
7054 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
7055 ctx_pg = &ctx->mrav_mem;
bbf33d1d 7056 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
53579e37
DS
7057 if (ctx->mrav_num_entries_units)
7058 flags |=
7059 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
bbf33d1d 7060 req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
cf6daed0 7061 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
bbf33d1d
EP
7062 &req->mrav_pg_size_mrav_lvl,
7063 &req->mrav_page_dir);
cf6daed0
MC
7064 }
7065 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
7066 ctx_pg = &ctx->tim_mem;
bbf33d1d
EP
7067 req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
7068 req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
cf6daed0 7069 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
bbf33d1d
EP
7070 &req->tim_pg_size_tim_lvl,
7071 &req->tim_page_dir);
cf6daed0 7072 }
bbf33d1d
EP
7073 for (i = 0, num_entries = &req->tqm_sp_num_entries,
7074 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
7075 pg_dir = &req->tqm_sp_page_dir,
1b9394e5 7076 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
a029a2fe
MC
7077 i < BNXT_MAX_TQM_RINGS;
7078 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
1b9394e5
MC
7079 if (!(enables & ena))
7080 continue;
7081
bbf33d1d 7082 req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
1b9394e5
MC
7083 ctx_pg = ctx->tqm_mem[i];
7084 *num_entries = cpu_to_le32(ctx_pg->entries);
7085 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
7086 }
bbf33d1d
EP
7087 req->flags = cpu_to_le32(flags);
7088 return hwrm_req_send(bp, req);
1b9394e5
MC
7089}
7090
98f04cf0 7091static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
08fe9d18 7092 struct bnxt_ctx_pg_info *ctx_pg)
98f04cf0
MC
7093{
7094 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7095
98f04cf0
MC
7096 rmem->page_size = BNXT_PAGE_SIZE;
7097 rmem->pg_arr = ctx_pg->ctx_pg_arr;
7098 rmem->dma_arr = ctx_pg->ctx_dma_arr;
1b9394e5 7099 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
08fe9d18
MC
7100 if (rmem->depth >= 1)
7101 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
98f04cf0
MC
7102 return bnxt_alloc_ring(bp, rmem);
7103}
7104
08fe9d18
MC
7105static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7106 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
e9696ff3 7107 u8 depth, struct bnxt_mem_init *mem_init)
08fe9d18
MC
7108{
7109 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7110 int rc;
7111
7112 if (!mem_size)
bbf211b1 7113 return -EINVAL;
08fe9d18
MC
7114
7115 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7116 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
7117 ctx_pg->nr_pages = 0;
7118 return -EINVAL;
7119 }
7120 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
7121 int nr_tbls, i;
7122
7123 rmem->depth = 2;
7124 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
7125 GFP_KERNEL);
7126 if (!ctx_pg->ctx_pg_tbl)
7127 return -ENOMEM;
7128 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
7129 rmem->nr_pages = nr_tbls;
7130 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7131 if (rc)
7132 return rc;
7133 for (i = 0; i < nr_tbls; i++) {
7134 struct bnxt_ctx_pg_info *pg_tbl;
7135
7136 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
7137 if (!pg_tbl)
7138 return -ENOMEM;
7139 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
7140 rmem = &pg_tbl->ring_mem;
7141 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
7142 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
7143 rmem->depth = 1;
7144 rmem->nr_pages = MAX_CTX_PAGES;
e9696ff3 7145 rmem->mem_init = mem_init;
6ef982de
MC
7146 if (i == (nr_tbls - 1)) {
7147 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
7148
7149 if (rem)
7150 rmem->nr_pages = rem;
7151 }
08fe9d18
MC
7152 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7153 if (rc)
7154 break;
7155 }
7156 } else {
7157 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7158 if (rmem->nr_pages > 1 || depth)
7159 rmem->depth = 1;
e9696ff3 7160 rmem->mem_init = mem_init;
08fe9d18
MC
7161 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7162 }
7163 return rc;
7164}
7165
7166static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7167 struct bnxt_ctx_pg_info *ctx_pg)
7168{
7169 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7170
7171 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
7172 ctx_pg->ctx_pg_tbl) {
7173 int i, nr_tbls = rmem->nr_pages;
7174
7175 for (i = 0; i < nr_tbls; i++) {
7176 struct bnxt_ctx_pg_info *pg_tbl;
7177 struct bnxt_ring_mem_info *rmem2;
7178
7179 pg_tbl = ctx_pg->ctx_pg_tbl[i];
7180 if (!pg_tbl)
7181 continue;
7182 rmem2 = &pg_tbl->ring_mem;
7183 bnxt_free_ring(bp, rmem2);
7184 ctx_pg->ctx_pg_arr[i] = NULL;
7185 kfree(pg_tbl);
7186 ctx_pg->ctx_pg_tbl[i] = NULL;
7187 }
7188 kfree(ctx_pg->ctx_pg_tbl);
7189 ctx_pg->ctx_pg_tbl = NULL;
7190 }
7191 bnxt_free_ring(bp, rmem);
7192 ctx_pg->nr_pages = 0;
7193}
7194
98f04cf0
MC
7195static void bnxt_free_ctx_mem(struct bnxt *bp)
7196{
7197 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7198 int i;
7199
7200 if (!ctx)
7201 return;
7202
7203 if (ctx->tqm_mem[0]) {
ac3158cb 7204 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
08fe9d18 7205 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
98f04cf0
MC
7206 kfree(ctx->tqm_mem[0]);
7207 ctx->tqm_mem[0] = NULL;
7208 }
7209
cf6daed0
MC
7210 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7211 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
08fe9d18
MC
7212 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7213 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7214 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7215 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7216 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
98f04cf0
MC
7217 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7218}
7219
7220static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7221{
7222 struct bnxt_ctx_pg_info *ctx_pg;
7223 struct bnxt_ctx_mem_info *ctx;
e9696ff3 7224 struct bnxt_mem_init *init;
1b9394e5 7225 u32 mem_size, ena, entries;
c7dd7ab4 7226 u32 entries_sp, min;
53579e37 7227 u32 num_mr, num_ah;
cf6daed0
MC
7228 u32 extra_srqs = 0;
7229 u32 extra_qps = 0;
7230 u8 pg_lvl = 1;
98f04cf0
MC
7231 int i, rc;
7232
7233 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7234 if (rc) {
7235 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7236 rc);
7237 return rc;
7238 }
7239 ctx = bp->ctx;
7240 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7241 return 0;
7242
d629522e 7243 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
cf6daed0
MC
7244 pg_lvl = 2;
7245 extra_qps = 65536;
7246 extra_srqs = 8192;
7247 }
7248
98f04cf0 7249 ctx_pg = &ctx->qp_mem;
cf6daed0
MC
7250 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7251 extra_qps;
be6d755f
EP
7252 if (ctx->qp_entry_size) {
7253 mem_size = ctx->qp_entry_size * ctx_pg->entries;
e9696ff3
MC
7254 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7255 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
be6d755f
EP
7256 if (rc)
7257 return rc;
7258 }
98f04cf0
MC
7259
7260 ctx_pg = &ctx->srq_mem;
cf6daed0 7261 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
be6d755f
EP
7262 if (ctx->srq_entry_size) {
7263 mem_size = ctx->srq_entry_size * ctx_pg->entries;
e9696ff3
MC
7264 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
7265 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
be6d755f
EP
7266 if (rc)
7267 return rc;
7268 }
98f04cf0
MC
7269
7270 ctx_pg = &ctx->cq_mem;
cf6daed0 7271 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
be6d755f
EP
7272 if (ctx->cq_entry_size) {
7273 mem_size = ctx->cq_entry_size * ctx_pg->entries;
e9696ff3
MC
7274 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
7275 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
be6d755f
EP
7276 if (rc)
7277 return rc;
7278 }
98f04cf0
MC
7279
7280 ctx_pg = &ctx->vnic_mem;
7281 ctx_pg->entries = ctx->vnic_max_vnic_entries +
7282 ctx->vnic_max_ring_table_entries;
be6d755f
EP
7283 if (ctx->vnic_entry_size) {
7284 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
e9696ff3
MC
7285 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
7286 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
be6d755f
EP
7287 if (rc)
7288 return rc;
7289 }
98f04cf0
MC
7290
7291 ctx_pg = &ctx->stat_mem;
7292 ctx_pg->entries = ctx->stat_max_entries;
be6d755f
EP
7293 if (ctx->stat_entry_size) {
7294 mem_size = ctx->stat_entry_size * ctx_pg->entries;
e9696ff3
MC
7295 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
7296 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
be6d755f
EP
7297 if (rc)
7298 return rc;
7299 }
98f04cf0 7300
cf6daed0
MC
7301 ena = 0;
7302 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7303 goto skip_rdma;
7304
7305 ctx_pg = &ctx->mrav_mem;
53579e37
DS
7306 /* 128K extra is needed to accommodate static AH context
7307 * allocation by f/w.
7308 */
7309 num_mr = 1024 * 256;
7310 num_ah = 1024 * 128;
7311 ctx_pg->entries = num_mr + num_ah;
be6d755f
EP
7312 if (ctx->mrav_entry_size) {
7313 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
e9696ff3
MC
7314 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
7315 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
be6d755f
EP
7316 if (rc)
7317 return rc;
7318 }
cf6daed0 7319 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
53579e37
DS
7320 if (ctx->mrav_num_entries_units)
7321 ctx_pg->entries =
7322 ((num_mr / ctx->mrav_num_entries_units) << 16) |
7323 (num_ah / ctx->mrav_num_entries_units);
cf6daed0
MC
7324
7325 ctx_pg = &ctx->tim_mem;
7326 ctx_pg->entries = ctx->qp_mem.entries;
be6d755f
EP
7327 if (ctx->tim_entry_size) {
7328 mem_size = ctx->tim_entry_size * ctx_pg->entries;
e9696ff3 7329 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
be6d755f
EP
7330 if (rc)
7331 return rc;
7332 }
cf6daed0
MC
7333 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7334
7335skip_rdma:
c7dd7ab4
MC
7336 min = ctx->tqm_min_entries_per_ring;
7337 entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7338 2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7339 entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
c12e1643 7340 entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
98f04cf0 7341 entries = roundup(entries, ctx->tqm_entries_multiple);
c7dd7ab4 7342 entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
ac3158cb 7343 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
98f04cf0 7344 ctx_pg = ctx->tqm_mem[i];
c7dd7ab4 7345 ctx_pg->entries = i ? entries : entries_sp;
be6d755f
EP
7346 if (ctx->tqm_entry_size) {
7347 mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7348 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
e9696ff3 7349 NULL);
be6d755f
EP
7350 if (rc)
7351 return rc;
7352 }
1b9394e5 7353 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
98f04cf0 7354 }
1b9394e5
MC
7355 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7356 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
0b5b561c 7357 if (rc) {
1b9394e5
MC
7358 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7359 rc);
0b5b561c
MC
7360 return rc;
7361 }
7362 ctx->flags |= BNXT_CTX_FLAG_INITED;
98f04cf0
MC
7363 return 0;
7364}
7365
db4723b3 7366int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
be0dd9c4 7367{
bbf33d1d
EP
7368 struct hwrm_func_resource_qcaps_output *resp;
7369 struct hwrm_func_resource_qcaps_input *req;
be0dd9c4
MC
7370 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7371 int rc;
7372
bbf33d1d
EP
7373 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
7374 if (rc)
7375 return rc;
be0dd9c4 7376
bbf33d1d
EP
7377 req->fid = cpu_to_le16(0xffff);
7378 resp = hwrm_req_hold(bp, req);
7379 rc = hwrm_req_send_silent(bp, req);
d4f1420d 7380 if (rc)
be0dd9c4 7381 goto hwrm_func_resc_qcaps_exit;
be0dd9c4 7382
db4723b3
MC
7383 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7384 if (!all)
7385 goto hwrm_func_resc_qcaps_exit;
7386
be0dd9c4
MC
7387 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7388 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7389 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7390 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7391 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7392 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7393 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7394 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7395 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7396 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7397 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7398 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7399 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7400 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7401 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7402 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7403
9c1fabdf
MC
7404 if (bp->flags & BNXT_FLAG_CHIP_P5) {
7405 u16 max_msix = le16_to_cpu(resp->max_msix);
7406
f7588cd8 7407 hw_resc->max_nqs = max_msix;
9c1fabdf
MC
7408 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7409 }
7410
4673d664
MC
7411 if (BNXT_PF(bp)) {
7412 struct bnxt_pf_info *pf = &bp->pf;
7413
7414 pf->vf_resv_strategy =
7415 le16_to_cpu(resp->vf_reservation_strategy);
bf82736d 7416 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
4673d664
MC
7417 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7418 }
be0dd9c4 7419hwrm_func_resc_qcaps_exit:
bbf33d1d 7420 hwrm_req_drop(bp, req);
be0dd9c4
MC
7421 return rc;
7422}
7423
ae5c42f0
MC
7424static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
7425{
bbf33d1d
EP
7426 struct hwrm_port_mac_ptp_qcfg_output *resp;
7427 struct hwrm_port_mac_ptp_qcfg_input *req;
ae5c42f0
MC
7428 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
7429 u8 flags;
7430 int rc;
7431
7432 if (bp->hwrm_spec_code < 0x10801) {
7433 rc = -ENODEV;
7434 goto no_ptp;
7435 }
7436
bbf33d1d 7437 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
ae5c42f0
MC
7438 if (rc)
7439 goto no_ptp;
7440
bbf33d1d
EP
7441 req->port_id = cpu_to_le16(bp->pf.port_id);
7442 resp = hwrm_req_hold(bp, req);
7443 rc = hwrm_req_send(bp, req);
7444 if (rc)
7445 goto exit;
7446
ae5c42f0
MC
7447 flags = resp->flags;
7448 if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
7449 rc = -ENODEV;
bbf33d1d 7450 goto exit;
ae5c42f0
MC
7451 }
7452 if (!ptp) {
7453 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
bbf33d1d
EP
7454 if (!ptp) {
7455 rc = -ENOMEM;
7456 goto exit;
7457 }
ae5c42f0
MC
7458 ptp->bp = bp;
7459 bp->ptp_cfg = ptp;
7460 }
7461 if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
7462 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
7463 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
7464 } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
7465 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
7466 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
7467 } else {
7468 rc = -ENODEV;
bbf33d1d 7469 goto exit;
ae5c42f0 7470 }
a521c8a0 7471 rc = bnxt_ptp_init(bp);
bbf33d1d
EP
7472 if (rc)
7473 netdev_warn(bp->dev, "PTP initialization failed.\n");
7474exit:
7475 hwrm_req_drop(bp, req);
a521c8a0
MC
7476 if (!rc)
7477 return 0;
7478
ae5c42f0 7479no_ptp:
a521c8a0 7480 bnxt_ptp_clear(bp);
ae5c42f0
MC
7481 kfree(ptp);
7482 bp->ptp_cfg = NULL;
7483 return rc;
7484}
7485
be0dd9c4 7486static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
c0c050c5 7487{
bbf33d1d
EP
7488 struct hwrm_func_qcaps_output *resp;
7489 struct hwrm_func_qcaps_input *req;
6a4f2947 7490 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
1da63ddd 7491 u32 flags, flags_ext;
bbf33d1d 7492 int rc;
c0c050c5 7493
bbf33d1d
EP
7494 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
7495 if (rc)
7496 return rc;
c0c050c5 7497
bbf33d1d
EP
7498 req->fid = cpu_to_le16(0xffff);
7499 resp = hwrm_req_hold(bp, req);
7500 rc = hwrm_req_send(bp, req);
c0c050c5
MC
7501 if (rc)
7502 goto hwrm_func_qcaps_exit;
7503
6a4f2947
MC
7504 flags = le32_to_cpu(resp->flags);
7505 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
e4060d30 7506 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
6a4f2947 7507 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
e4060d30 7508 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
55e4398d
VV
7509 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7510 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
0a3f4e4f
VV
7511 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7512 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
6154532f
VV
7513 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7514 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
07f83d72
MC
7515 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7516 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
4037eb71
VV
7517 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7518 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
1da63ddd
EP
7519 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7520 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7521
7522 flags_ext = le32_to_cpu(resp->flags_ext);
7523 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7524 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
caf3eedb
PC
7525 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
7526 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
e4060d30 7527
7cc5a20e 7528 bp->tx_push_thresh = 0;
fed7edd1
MC
7529 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7530 BNXT_FW_MAJ(bp) > 217)
7cc5a20e
MC
7531 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7532
6a4f2947
MC
7533 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7534 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7535 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7536 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7537 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7538 if (!hw_resc->max_hw_ring_grps)
7539 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7540 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7541 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7542 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7543
c0c050c5
MC
7544 if (BNXT_PF(bp)) {
7545 struct bnxt_pf_info *pf = &bp->pf;
7546
7547 pf->fw_fid = le16_to_cpu(resp->fid);
7548 pf->port_id = le16_to_cpu(resp->port_id);
11f15ed3 7549 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
c0c050c5
MC
7550 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7551 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7552 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7553 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7554 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7555 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7556 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7557 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
ba642ab7 7558 bp->flags &= ~BNXT_FLAG_WOL_CAP;
6a4f2947 7559 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
c1ef146a 7560 bp->flags |= BNXT_FLAG_WOL_CAP;
de5bf194 7561 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
ae5c42f0 7562 __bnxt_hwrm_ptp_qcfg(bp);
de5bf194 7563 } else {
a521c8a0 7564 bnxt_ptp_clear(bp);
de5bf194
MC
7565 kfree(bp->ptp_cfg);
7566 bp->ptp_cfg = NULL;
7567 }
c0c050c5 7568 } else {
379a80a1 7569#ifdef CONFIG_BNXT_SRIOV
c0c050c5
MC
7570 struct bnxt_vf_info *vf = &bp->vf;
7571
7572 vf->fw_fid = le16_to_cpu(resp->fid);
7cc5a20e 7573 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
379a80a1 7574#endif
c0c050c5
MC
7575 }
7576
c0c050c5 7577hwrm_func_qcaps_exit:
bbf33d1d 7578 hwrm_req_drop(bp, req);
c0c050c5
MC
7579 return rc;
7580}
7581
804fba4e
MC
7582static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7583
be0dd9c4
MC
7584static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7585{
7586 int rc;
7587
7588 rc = __bnxt_hwrm_func_qcaps(bp);
7589 if (rc)
7590 return rc;
804fba4e
MC
7591 rc = bnxt_hwrm_queue_qportcfg(bp);
7592 if (rc) {
7593 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7594 return rc;
7595 }
be0dd9c4 7596 if (bp->hwrm_spec_code >= 0x10803) {
98f04cf0
MC
7597 rc = bnxt_alloc_ctx_mem(bp);
7598 if (rc)
7599 return rc;
db4723b3 7600 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
be0dd9c4 7601 if (!rc)
97381a18 7602 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
be0dd9c4
MC
7603 }
7604 return 0;
7605}
7606
e969ae5b
MC
7607static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7608{
e969ae5b 7609 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
bbf33d1d 7610 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
e969ae5b 7611 u32 flags;
bbf33d1d 7612 int rc;
e969ae5b
MC
7613
7614 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7615 return 0;
7616
bbf33d1d
EP
7617 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
7618 if (rc)
7619 return rc;
e969ae5b 7620
bbf33d1d
EP
7621 resp = hwrm_req_hold(bp, req);
7622 rc = hwrm_req_send(bp, req);
e969ae5b
MC
7623 if (rc)
7624 goto hwrm_cfa_adv_qcaps_exit;
7625
7626 flags = le32_to_cpu(resp->flags);
7627 if (flags &
41136ab3
MC
7628 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7629 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
e969ae5b
MC
7630
7631hwrm_cfa_adv_qcaps_exit:
bbf33d1d 7632 hwrm_req_drop(bp, req);
e969ae5b
MC
7633 return rc;
7634}
7635
3e9ec2bb
EP
7636static int __bnxt_alloc_fw_health(struct bnxt *bp)
7637{
7638 if (bp->fw_health)
7639 return 0;
7640
7641 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7642 if (!bp->fw_health)
7643 return -ENOMEM;
7644
7645 return 0;
7646}
7647
7648static int bnxt_alloc_fw_health(struct bnxt *bp)
7649{
7650 int rc;
7651
7652 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7653 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7654 return 0;
7655
7656 rc = __bnxt_alloc_fw_health(bp);
7657 if (rc) {
7658 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7659 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7660 return rc;
7661 }
7662
7663 return 0;
7664}
7665
ba02629f
EP
7666static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7667{
7668 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7669 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7670 BNXT_FW_HEALTH_WIN_MAP_OFF);
7671}
7672
80a9641f
PC
7673bool bnxt_is_fw_healthy(struct bnxt *bp)
7674{
7675 if (bp->fw_health && bp->fw_health->status_reliable) {
7676 u32 fw_status;
7677
7678 fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
7679 if (fw_status && !BNXT_FW_IS_HEALTHY(fw_status))
7680 return false;
7681 }
7682
7683 return true;
7684}
7685
43a440c4
MC
7686static void bnxt_inv_fw_health_reg(struct bnxt *bp)
7687{
7688 struct bnxt_fw_health *fw_health = bp->fw_health;
7689 u32 reg_type;
7690
7691 if (!fw_health || !fw_health->status_reliable)
7692 return;
7693
7694 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
7695 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7696 fw_health->status_reliable = false;
7697}
7698
ba02629f
EP
7699static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7700{
7701 void __iomem *hs;
7702 u32 status_loc;
7703 u32 reg_type;
7704 u32 sig;
7705
43a440c4
MC
7706 if (bp->fw_health)
7707 bp->fw_health->status_reliable = false;
7708
ba02629f
EP
7709 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7710 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7711
7712 sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
7713 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
d1cbd165
MC
7714 if (!bp->chip_num) {
7715 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
7716 bp->chip_num = readl(bp->bar0 +
7717 BNXT_FW_HEALTH_WIN_BASE +
7718 BNXT_GRC_REG_CHIP_NUM);
7719 }
43a440c4 7720 if (!BNXT_CHIP_P5(bp))
d1cbd165 7721 return;
43a440c4 7722
d1cbd165
MC
7723 status_loc = BNXT_GRC_REG_STATUS_P5 |
7724 BNXT_FW_HEALTH_REG_TYPE_BAR0;
7725 } else {
7726 status_loc = readl(hs + offsetof(struct hcomm_status,
7727 fw_status_loc));
ba02629f
EP
7728 }
7729
7730 if (__bnxt_alloc_fw_health(bp)) {
7731 netdev_warn(bp->dev, "no memory for firmware status checks\n");
7732 return;
7733 }
7734
ba02629f
EP
7735 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7736 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
7737 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
7738 __bnxt_map_fw_health_reg(bp, status_loc);
7739 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7740 BNXT_FW_HEALTH_WIN_OFF(status_loc);
7741 }
7742
7743 bp->fw_health->status_reliable = true;
7744}
7745
9ffbd677
MC
7746static int bnxt_map_fw_health_regs(struct bnxt *bp)
7747{
7748 struct bnxt_fw_health *fw_health = bp->fw_health;
7749 u32 reg_base = 0xffffffff;
7750 int i;
7751
43a440c4 7752 bp->fw_health->status_reliable = false;
9ffbd677
MC
7753 /* Only pre-map the monitoring GRC registers using window 3 */
7754 for (i = 0; i < 4; i++) {
7755 u32 reg = fw_health->regs[i];
7756
7757 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7758 continue;
7759 if (reg_base == 0xffffffff)
7760 reg_base = reg & BNXT_GRC_BASE_MASK;
7761 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7762 return -ERANGE;
ba02629f 7763 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
9ffbd677 7764 }
43a440c4 7765 bp->fw_health->status_reliable = true;
9ffbd677
MC
7766 if (reg_base == 0xffffffff)
7767 return 0;
7768
ba02629f 7769 __bnxt_map_fw_health_reg(bp, reg_base);
9ffbd677
MC
7770 return 0;
7771}
7772
07f83d72
MC
7773static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7774{
07f83d72 7775 struct bnxt_fw_health *fw_health = bp->fw_health;
bbf33d1d
EP
7776 struct hwrm_error_recovery_qcfg_output *resp;
7777 struct hwrm_error_recovery_qcfg_input *req;
07f83d72
MC
7778 int rc, i;
7779
7780 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7781 return 0;
7782
bbf33d1d
EP
7783 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
7784 if (rc)
7785 return rc;
7786
7787 resp = hwrm_req_hold(bp, req);
7788 rc = hwrm_req_send(bp, req);
07f83d72
MC
7789 if (rc)
7790 goto err_recovery_out;
07f83d72
MC
7791 fw_health->flags = le32_to_cpu(resp->flags);
7792 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7793 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7794 rc = -EINVAL;
7795 goto err_recovery_out;
7796 }
7797 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7798 fw_health->master_func_wait_dsecs =
7799 le32_to_cpu(resp->master_func_wait_period);
7800 fw_health->normal_func_wait_dsecs =
7801 le32_to_cpu(resp->normal_func_wait_period);
7802 fw_health->post_reset_wait_dsecs =
7803 le32_to_cpu(resp->master_func_wait_period_after_reset);
7804 fw_health->post_reset_max_wait_dsecs =
7805 le32_to_cpu(resp->max_bailout_time_after_reset);
7806 fw_health->regs[BNXT_FW_HEALTH_REG] =
7807 le32_to_cpu(resp->fw_health_status_reg);
7808 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7809 le32_to_cpu(resp->fw_heartbeat_reg);
7810 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7811 le32_to_cpu(resp->fw_reset_cnt_reg);
7812 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7813 le32_to_cpu(resp->reset_inprogress_reg);
7814 fw_health->fw_reset_inprog_reg_mask =
7815 le32_to_cpu(resp->reset_inprogress_reg_mask);
7816 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7817 if (fw_health->fw_reset_seq_cnt >= 16) {
7818 rc = -EINVAL;
7819 goto err_recovery_out;
7820 }
7821 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7822 fw_health->fw_reset_seq_regs[i] =
7823 le32_to_cpu(resp->reset_reg[i]);
7824 fw_health->fw_reset_seq_vals[i] =
7825 le32_to_cpu(resp->reset_reg_val[i]);
7826 fw_health->fw_reset_seq_delay_msec[i] =
7827 resp->delay_after_reset[i];
7828 }
7829err_recovery_out:
bbf33d1d 7830 hwrm_req_drop(bp, req);
9ffbd677
MC
7831 if (!rc)
7832 rc = bnxt_map_fw_health_regs(bp);
07f83d72
MC
7833 if (rc)
7834 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7835 return rc;
7836}
7837
c0c050c5
MC
7838static int bnxt_hwrm_func_reset(struct bnxt *bp)
7839{
bbf33d1d
EP
7840 struct hwrm_func_reset_input *req;
7841 int rc;
c0c050c5 7842
bbf33d1d
EP
7843 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
7844 if (rc)
7845 return rc;
c0c050c5 7846
bbf33d1d
EP
7847 req->enables = 0;
7848 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
7849 return hwrm_req_send(bp, req);
c0c050c5
MC
7850}
7851
4933f675
VV
7852static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
7853{
7854 struct hwrm_nvm_get_dev_info_output nvm_info;
7855
7856 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
7857 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
7858 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
7859 nvm_info.nvm_cfg_ver_upd);
7860}
7861
c0c050c5
MC
7862static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7863{
bbf33d1d
EP
7864 struct hwrm_queue_qportcfg_output *resp;
7865 struct hwrm_queue_qportcfg_input *req;
aabfc016
MC
7866 u8 i, j, *qptr;
7867 bool no_rdma;
bbf33d1d 7868 int rc = 0;
c0c050c5 7869
bbf33d1d
EP
7870 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
7871 if (rc)
7872 return rc;
c0c050c5 7873
bbf33d1d
EP
7874 resp = hwrm_req_hold(bp, req);
7875 rc = hwrm_req_send(bp, req);
c0c050c5
MC
7876 if (rc)
7877 goto qportcfg_exit;
7878
7879 if (!resp->max_configurable_queues) {
7880 rc = -EINVAL;
7881 goto qportcfg_exit;
7882 }
7883 bp->max_tc = resp->max_configurable_queues;
87c374de 7884 bp->max_lltc = resp->max_configurable_lossless_queues;
c0c050c5
MC
7885 if (bp->max_tc > BNXT_MAX_QUEUE)
7886 bp->max_tc = BNXT_MAX_QUEUE;
7887
aabfc016
MC
7888 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7889 qptr = &resp->queue_id0;
7890 for (i = 0, j = 0; i < bp->max_tc; i++) {
98f04cf0
MC
7891 bp->q_info[j].queue_id = *qptr;
7892 bp->q_ids[i] = *qptr++;
aabfc016
MC
7893 bp->q_info[j].queue_profile = *qptr++;
7894 bp->tc_to_qidx[j] = j;
7895 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7896 (no_rdma && BNXT_PF(bp)))
7897 j++;
7898 }
98f04cf0 7899 bp->max_q = bp->max_tc;
aabfc016
MC
7900 bp->max_tc = max_t(u8, j, 1);
7901
441cabbb
MC
7902 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7903 bp->max_tc = 1;
7904
87c374de
MC
7905 if (bp->max_lltc > bp->max_tc)
7906 bp->max_lltc = bp->max_tc;
7907
c0c050c5 7908qportcfg_exit:
bbf33d1d 7909 hwrm_req_drop(bp, req);
c0c050c5
MC
7910 return rc;
7911}
7912
7b370ad7 7913static int bnxt_hwrm_poll(struct bnxt *bp)
c0c050c5 7914{
bbf33d1d 7915 struct hwrm_ver_get_input *req;
ba642ab7 7916 int rc;
c0c050c5 7917
bbf33d1d
EP
7918 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
7919 if (rc)
7920 return rc;
ba642ab7 7921
bbf33d1d
EP
7922 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
7923 req->hwrm_intf_min = HWRM_VERSION_MINOR;
7924 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
7925
7926 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
7927 rc = hwrm_req_send(bp, req);
ba642ab7
MC
7928 return rc;
7929}
7930
7931static int bnxt_hwrm_ver_get(struct bnxt *bp)
7932{
bbf33d1d
EP
7933 struct hwrm_ver_get_output *resp;
7934 struct hwrm_ver_get_input *req;
d0ad2ea2 7935 u16 fw_maj, fw_min, fw_bld, fw_rsv;
b7a444f0 7936 u32 dev_caps_cfg, hwrm_ver;
d0ad2ea2 7937 int rc, len;
ba642ab7 7938
bbf33d1d
EP
7939 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
7940 if (rc)
7941 return rc;
7942
7943 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
ba642ab7 7944 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
bbf33d1d
EP
7945 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
7946 req->hwrm_intf_min = HWRM_VERSION_MINOR;
7947 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
7b370ad7 7948
bbf33d1d
EP
7949 resp = hwrm_req_hold(bp, req);
7950 rc = hwrm_req_send(bp, req);
c0c050c5
MC
7951 if (rc)
7952 goto hwrm_ver_get_exit;
7953
7954 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7955
894aa69a
MC
7956 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7957 resp->hwrm_intf_min_8b << 8 |
7958 resp->hwrm_intf_upd_8b;
7959 if (resp->hwrm_intf_maj_8b < 1) {
c193554e 7960 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
894aa69a
MC
7961 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7962 resp->hwrm_intf_upd_8b);
c193554e 7963 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
c0c050c5 7964 }
b7a444f0
VV
7965
7966 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
7967 HWRM_VERSION_UPDATE;
7968
7969 if (bp->hwrm_spec_code > hwrm_ver)
7970 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7971 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
7972 HWRM_VERSION_UPDATE);
7973 else
7974 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7975 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7976 resp->hwrm_intf_upd_8b);
7977
d0ad2ea2
MC
7978 fw_maj = le16_to_cpu(resp->hwrm_fw_major);
7979 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
7980 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
7981 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
7982 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
7983 len = FW_VER_STR_LEN;
7984 } else {
7985 fw_maj = resp->hwrm_fw_maj_8b;
7986 fw_min = resp->hwrm_fw_min_8b;
7987 fw_bld = resp->hwrm_fw_bld_8b;
7988 fw_rsv = resp->hwrm_fw_rsvd_8b;
7989 len = BC_HWRM_STR_LEN;
7990 }
7991 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
7992 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
7993 fw_rsv);
c0c050c5 7994
691aa620
VV
7995 if (strlen(resp->active_pkg_name)) {
7996 int fw_ver_len = strlen(bp->fw_ver_str);
7997
7998 snprintf(bp->fw_ver_str + fw_ver_len,
7999 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
8000 resp->active_pkg_name);
8001 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
8002 }
8003
ff4fe81d
MC
8004 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
8005 if (!bp->hwrm_cmd_timeout)
8006 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
8007
1dfddc41 8008 if (resp->hwrm_intf_maj_8b >= 1) {
e6ef2699 8009 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
1dfddc41
MC
8010 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
8011 }
8012 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
8013 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
e6ef2699 8014
659c805c 8015 bp->chip_num = le16_to_cpu(resp->chip_num);
5313845f 8016 bp->chip_rev = resp->chip_rev;
3e8060fa
PS
8017 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
8018 !resp->chip_metal)
8019 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
659c805c 8020
e605db80
DK
8021 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
8022 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
8023 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
97381a18 8024 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
e605db80 8025
760b6d33
VD
8026 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
8027 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
8028
abd43a13
VD
8029 if (dev_caps_cfg &
8030 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
8031 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
8032
2a516444
MC
8033 if (dev_caps_cfg &
8034 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
8035 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
8036
e969ae5b
MC
8037 if (dev_caps_cfg &
8038 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
8039 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
8040
c0c050c5 8041hwrm_ver_get_exit:
bbf33d1d 8042 hwrm_req_drop(bp, req);
c0c050c5
MC
8043 return rc;
8044}
8045
5ac67d8b
RS
8046int bnxt_hwrm_fw_set_time(struct bnxt *bp)
8047{
bbf33d1d 8048 struct hwrm_fw_set_time_input *req;
7dfaa7bc
AB
8049 struct tm tm;
8050 time64_t now = ktime_get_real_seconds();
bbf33d1d 8051 int rc;
5ac67d8b 8052
ca2c39e2
MC
8053 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
8054 bp->hwrm_spec_code < 0x10400)
5ac67d8b
RS
8055 return -EOPNOTSUPP;
8056
7dfaa7bc 8057 time64_to_tm(now, 0, &tm);
bbf33d1d
EP
8058 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
8059 if (rc)
8060 return rc;
8061
8062 req->year = cpu_to_le16(1900 + tm.tm_year);
8063 req->month = 1 + tm.tm_mon;
8064 req->day = tm.tm_mday;
8065 req->hour = tm.tm_hour;
8066 req->minute = tm.tm_min;
8067 req->second = tm.tm_sec;
8068 return hwrm_req_send(bp, req);
5ac67d8b
RS
8069}
8070
fea6b333
MC
8071static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
8072{
8073 u64 sw_tmp;
8074
fa97f303 8075 hw &= mask;
fea6b333
MC
8076 sw_tmp = (*sw & ~mask) | hw;
8077 if (hw < (*sw & mask))
8078 sw_tmp += mask + 1;
8079 WRITE_ONCE(*sw, sw_tmp);
8080}
8081
8082static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
8083 int count, bool ignore_zero)
8084{
8085 int i;
8086
8087 for (i = 0; i < count; i++) {
8088 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
8089
8090 if (ignore_zero && !hw)
8091 continue;
8092
8093 if (masks[i] == -1ULL)
8094 sw_stats[i] = hw;
8095 else
8096 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
8097 }
8098}
8099
8100static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
8101{
8102 if (!stats->hw_stats)
8103 return;
8104
8105 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8106 stats->hw_masks, stats->len / 8, false);
8107}
8108
8109static void bnxt_accumulate_all_stats(struct bnxt *bp)
8110{
8111 struct bnxt_stats_mem *ring0_stats;
8112 bool ignore_zero = false;
8113 int i;
8114
8115 /* Chip bug. Counter intermittently becomes 0. */
8116 if (bp->flags & BNXT_FLAG_CHIP_P5)
8117 ignore_zero = true;
8118
8119 for (i = 0; i < bp->cp_nr_rings; i++) {
8120 struct bnxt_napi *bnapi = bp->bnapi[i];
8121 struct bnxt_cp_ring_info *cpr;
8122 struct bnxt_stats_mem *stats;
8123
8124 cpr = &bnapi->cp_ring;
8125 stats = &cpr->stats;
8126 if (!i)
8127 ring0_stats = stats;
8128 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8129 ring0_stats->hw_masks,
8130 ring0_stats->len / 8, ignore_zero);
8131 }
8132 if (bp->flags & BNXT_FLAG_PORT_STATS) {
8133 struct bnxt_stats_mem *stats = &bp->port_stats;
8134 __le64 *hw_stats = stats->hw_stats;
8135 u64 *sw_stats = stats->sw_stats;
8136 u64 *masks = stats->hw_masks;
8137 int cnt;
8138
8139 cnt = sizeof(struct rx_port_stats) / 8;
8140 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8141
8142 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8143 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8144 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8145 cnt = sizeof(struct tx_port_stats) / 8;
8146 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8147 }
8148 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
8149 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
8150 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
8151 }
8152}
8153
531d1d26 8154static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
3bdf56c4 8155{
bbf33d1d 8156 struct hwrm_port_qstats_input *req;
3bdf56c4 8157 struct bnxt_pf_info *pf = &bp->pf;
bbf33d1d 8158 int rc;
3bdf56c4
MC
8159
8160 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
8161 return 0;
8162
531d1d26
MC
8163 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8164 return -EOPNOTSUPP;
8165
bbf33d1d
EP
8166 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
8167 if (rc)
8168 return rc;
8169
8170 req->flags = flags;
8171 req->port_id = cpu_to_le16(pf->port_id);
8172 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
177a6cde 8173 BNXT_TX_PORT_STATS_BYTE_OFFSET);
bbf33d1d
EP
8174 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
8175 return hwrm_req_send(bp, req);
3bdf56c4
MC
8176}
8177
531d1d26 8178static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
00db3cba 8179{
bbf33d1d
EP
8180 struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
8181 struct hwrm_queue_pri2cos_qcfg_input *req_qc;
8182 struct hwrm_port_qstats_ext_output *resp_qs;
8183 struct hwrm_port_qstats_ext_input *req_qs;
00db3cba 8184 struct bnxt_pf_info *pf = &bp->pf;
ad361adf 8185 u32 tx_stat_size;
36e53349 8186 int rc;
00db3cba
VV
8187
8188 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
8189 return 0;
8190
531d1d26
MC
8191 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8192 return -EOPNOTSUPP;
8193
bbf33d1d
EP
8194 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
8195 if (rc)
8196 return rc;
8197
8198 req_qs->flags = flags;
8199 req_qs->port_id = cpu_to_le16(pf->port_id);
8200 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
8201 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
177a6cde
MC
8202 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
8203 sizeof(struct tx_port_stats_ext) : 0;
bbf33d1d
EP
8204 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
8205 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
8206 resp_qs = hwrm_req_hold(bp, req_qs);
8207 rc = hwrm_req_send(bp, req_qs);
36e53349 8208 if (!rc) {
bbf33d1d
EP
8209 bp->fw_rx_stats_ext_size =
8210 le16_to_cpu(resp_qs->rx_stat_size) / 8;
ad361adf 8211 bp->fw_tx_stats_ext_size = tx_stat_size ?
bbf33d1d 8212 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
36e53349
MC
8213 } else {
8214 bp->fw_rx_stats_ext_size = 0;
8215 bp->fw_tx_stats_ext_size = 0;
8216 }
bbf33d1d
EP
8217 hwrm_req_drop(bp, req_qs);
8218
531d1d26 8219 if (flags)
bbf33d1d 8220 return rc;
531d1d26 8221
e37fed79
MC
8222 if (bp->fw_tx_stats_ext_size <=
8223 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
e37fed79
MC
8224 bp->pri2cos_valid = 0;
8225 return rc;
8226 }
8227
bbf33d1d
EP
8228 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
8229 if (rc)
8230 return rc;
e37fed79 8231
bbf33d1d
EP
8232 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
8233
8234 resp_qc = hwrm_req_hold(bp, req_qc);
8235 rc = hwrm_req_send(bp, req_qc);
e37fed79 8236 if (!rc) {
e37fed79
MC
8237 u8 *pri2cos;
8238 int i, j;
8239
bbf33d1d 8240 pri2cos = &resp_qc->pri0_cos_queue_id;
e37fed79
MC
8241 for (i = 0; i < 8; i++) {
8242 u8 queue_id = pri2cos[i];
a24ec322 8243 u8 queue_idx;
e37fed79 8244
a24ec322
MC
8245 /* Per port queue IDs start from 0, 10, 20, etc */
8246 queue_idx = queue_id % 10;
8247 if (queue_idx > BNXT_MAX_QUEUE) {
8248 bp->pri2cos_valid = false;
bbf33d1d
EP
8249 hwrm_req_drop(bp, req_qc);
8250 return rc;
a24ec322 8251 }
e37fed79
MC
8252 for (j = 0; j < bp->max_q; j++) {
8253 if (bp->q_ids[j] == queue_id)
a24ec322 8254 bp->pri2cos_idx[i] = queue_idx;
e37fed79
MC
8255 }
8256 }
bbf33d1d 8257 bp->pri2cos_valid = true;
e37fed79 8258 }
bbf33d1d
EP
8259 hwrm_req_drop(bp, req_qc);
8260
36e53349 8261 return rc;
00db3cba
VV
8262}
8263
c0c050c5
MC
8264static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8265{
7ae9dc35
MC
8266 bnxt_hwrm_tunnel_dst_port_free(bp,
8267 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8268 bnxt_hwrm_tunnel_dst_port_free(bp,
8269 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
c0c050c5
MC
8270}
8271
8272static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8273{
8274 int rc, i;
8275 u32 tpa_flags = 0;
8276
8277 if (set_tpa)
8278 tpa_flags = bp->flags & BNXT_FLAG_TPA;
b340dc68 8279 else if (BNXT_NO_FW_ACCESS(bp))
b4fff207 8280 return 0;
c0c050c5
MC
8281 for (i = 0; i < bp->nr_vnics; i++) {
8282 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8283 if (rc) {
8284 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
23e12c89 8285 i, rc);
c0c050c5
MC
8286 return rc;
8287 }
8288 }
8289 return 0;
8290}
8291
8292static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8293{
8294 int i;
8295
8296 for (i = 0; i < bp->nr_vnics; i++)
8297 bnxt_hwrm_vnic_set_rss(bp, i, false);
8298}
8299
a46ecb11 8300static void bnxt_clear_vnic(struct bnxt *bp)
c0c050c5 8301{
a46ecb11
MC
8302 if (!bp->vnic_info)
8303 return;
8304
8305 bnxt_hwrm_clear_vnic_filter(bp);
8306 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
c0c050c5
MC
8307 /* clear all RSS setting before free vnic ctx */
8308 bnxt_hwrm_clear_vnic_rss(bp);
8309 bnxt_hwrm_vnic_ctx_free(bp);
c0c050c5 8310 }
a46ecb11
MC
8311 /* before free the vnic, undo the vnic tpa settings */
8312 if (bp->flags & BNXT_FLAG_TPA)
8313 bnxt_set_tpa(bp, false);
8314 bnxt_hwrm_vnic_free(bp);
8315 if (bp->flags & BNXT_FLAG_CHIP_P5)
8316 bnxt_hwrm_vnic_ctx_free(bp);
8317}
8318
8319static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8320 bool irq_re_init)
8321{
8322 bnxt_clear_vnic(bp);
c0c050c5
MC
8323 bnxt_hwrm_ring_free(bp, close_path);
8324 bnxt_hwrm_ring_grp_free(bp);
8325 if (irq_re_init) {
8326 bnxt_hwrm_stat_ctx_free(bp);
8327 bnxt_hwrm_free_tunnel_ports(bp);
8328 }
8329}
8330
39d8ba2e
MC
8331static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8332{
bbf33d1d
EP
8333 struct hwrm_func_cfg_input *req;
8334 u8 evb_mode;
8335 int rc;
39d8ba2e 8336
39d8ba2e 8337 if (br_mode == BRIDGE_MODE_VEB)
bbf33d1d 8338 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
39d8ba2e 8339 else if (br_mode == BRIDGE_MODE_VEPA)
bbf33d1d 8340 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
39d8ba2e
MC
8341 else
8342 return -EINVAL;
bbf33d1d
EP
8343
8344 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8345 if (rc)
8346 return rc;
8347
8348 req->fid = cpu_to_le16(0xffff);
8349 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
8350 req->evb_mode = evb_mode;
8351 return hwrm_req_send(bp, req);
39d8ba2e
MC
8352}
8353
c3480a60
MC
8354static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8355{
bbf33d1d
EP
8356 struct hwrm_func_cfg_input *req;
8357 int rc;
c3480a60
MC
8358
8359 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8360 return 0;
8361
bbf33d1d
EP
8362 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8363 if (rc)
8364 return rc;
8365
8366 req->fid = cpu_to_le16(0xffff);
8367 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
8368 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
c3480a60 8369 if (size == 128)
bbf33d1d 8370 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
c3480a60 8371
bbf33d1d 8372 return hwrm_req_send(bp, req);
c3480a60
MC
8373}
8374
7b3af4f7 8375static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
c0c050c5 8376{
ae10ae74 8377 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
c0c050c5
MC
8378 int rc;
8379
ae10ae74
MC
8380 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8381 goto skip_rss_ctx;
8382
c0c050c5 8383 /* allocate context for vnic */
94ce9caa 8384 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
c0c050c5
MC
8385 if (rc) {
8386 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8387 vnic_id, rc);
8388 goto vnic_setup_err;
8389 }
8390 bp->rsscos_nr_ctxs++;
8391
94ce9caa
PS
8392 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8393 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8394 if (rc) {
8395 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8396 vnic_id, rc);
8397 goto vnic_setup_err;
8398 }
8399 bp->rsscos_nr_ctxs++;
8400 }
8401
ae10ae74 8402skip_rss_ctx:
c0c050c5
MC
8403 /* configure default vnic, ring grp */
8404 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8405 if (rc) {
8406 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8407 vnic_id, rc);
8408 goto vnic_setup_err;
8409 }
8410
8411 /* Enable RSS hashing on vnic */
8412 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8413 if (rc) {
8414 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8415 vnic_id, rc);
8416 goto vnic_setup_err;
8417 }
8418
8419 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8420 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8421 if (rc) {
8422 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8423 vnic_id, rc);
8424 }
8425 }
8426
8427vnic_setup_err:
8428 return rc;
8429}
8430
7b3af4f7
MC
8431static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8432{
8433 int rc, i, nr_ctxs;
8434
f9f6a3fb 8435 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
7b3af4f7
MC
8436 for (i = 0; i < nr_ctxs; i++) {
8437 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8438 if (rc) {
8439 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8440 vnic_id, i, rc);
8441 break;
8442 }
8443 bp->rsscos_nr_ctxs++;
8444 }
8445 if (i < nr_ctxs)
8446 return -ENOMEM;
8447
8448 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8449 if (rc) {
8450 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8451 vnic_id, rc);
8452 return rc;
8453 }
8454 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8455 if (rc) {
8456 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8457 vnic_id, rc);
8458 return rc;
8459 }
8460 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8461 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8462 if (rc) {
8463 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8464 vnic_id, rc);
8465 }
8466 }
8467 return rc;
8468}
8469
8470static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8471{
8472 if (bp->flags & BNXT_FLAG_CHIP_P5)
8473 return __bnxt_setup_vnic_p5(bp, vnic_id);
8474 else
8475 return __bnxt_setup_vnic(bp, vnic_id);
8476}
8477
c0c050c5
MC
8478static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8479{
8480#ifdef CONFIG_RFS_ACCEL
8481 int i, rc = 0;
8482
9b3d15e6
MC
8483 if (bp->flags & BNXT_FLAG_CHIP_P5)
8484 return 0;
8485
c0c050c5 8486 for (i = 0; i < bp->rx_nr_rings; i++) {
ae10ae74 8487 struct bnxt_vnic_info *vnic;
c0c050c5
MC
8488 u16 vnic_id = i + 1;
8489 u16 ring_id = i;
8490
8491 if (vnic_id >= bp->nr_vnics)
8492 break;
8493
ae10ae74
MC
8494 vnic = &bp->vnic_info[vnic_id];
8495 vnic->flags |= BNXT_VNIC_RFS_FLAG;
8496 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8497 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
b81a90d3 8498 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
c0c050c5
MC
8499 if (rc) {
8500 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8501 vnic_id, rc);
8502 break;
8503 }
8504 rc = bnxt_setup_vnic(bp, vnic_id);
8505 if (rc)
8506 break;
8507 }
8508 return rc;
8509#else
8510 return 0;
8511#endif
8512}
8513
dd85fc0a 8514/* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
17c71ac3
MC
8515static bool bnxt_promisc_ok(struct bnxt *bp)
8516{
8517#ifdef CONFIG_BNXT_SRIOV
dd85fc0a 8518 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
17c71ac3
MC
8519 return false;
8520#endif
8521 return true;
8522}
8523
dc52c6c7
PS
8524static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8525{
8526 unsigned int rc = 0;
8527
8528 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8529 if (rc) {
8530 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8531 rc);
8532 return rc;
8533 }
8534
8535 rc = bnxt_hwrm_vnic_cfg(bp, 1);
8536 if (rc) {
8537 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8538 rc);
8539 return rc;
8540 }
8541 return rc;
8542}
8543
b664f008 8544static int bnxt_cfg_rx_mode(struct bnxt *);
7d2837dd 8545static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
b664f008 8546
c0c050c5
MC
8547static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8548{
7d2837dd 8549 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
c0c050c5 8550 int rc = 0;
76595193 8551 unsigned int rx_nr_rings = bp->rx_nr_rings;
c0c050c5
MC
8552
8553 if (irq_re_init) {
8554 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8555 if (rc) {
8556 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8557 rc);
8558 goto err_out;
8559 }
8560 }
8561
8562 rc = bnxt_hwrm_ring_alloc(bp);
8563 if (rc) {
8564 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8565 goto err_out;
8566 }
8567
8568 rc = bnxt_hwrm_ring_grp_alloc(bp);
8569 if (rc) {
8570 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8571 goto err_out;
8572 }
8573
76595193
PS
8574 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8575 rx_nr_rings--;
8576
c0c050c5 8577 /* default vnic 0 */
76595193 8578 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
c0c050c5
MC
8579 if (rc) {
8580 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8581 goto err_out;
8582 }
8583
8584 rc = bnxt_setup_vnic(bp, 0);
8585 if (rc)
8586 goto err_out;
8587
8588 if (bp->flags & BNXT_FLAG_RFS) {
8589 rc = bnxt_alloc_rfs_vnics(bp);
8590 if (rc)
8591 goto err_out;
8592 }
8593
8594 if (bp->flags & BNXT_FLAG_TPA) {
8595 rc = bnxt_set_tpa(bp, true);
8596 if (rc)
8597 goto err_out;
8598 }
8599
8600 if (BNXT_VF(bp))
8601 bnxt_update_vf_mac(bp);
8602
8603 /* Filter for default vnic 0 */
8604 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8605 if (rc) {
8606 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8607 goto err_out;
8608 }
7d2837dd 8609 vnic->uc_filter_count = 1;
c0c050c5 8610
30e33848
MC
8611 vnic->rx_mask = 0;
8612 if (bp->dev->flags & IFF_BROADCAST)
8613 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
c0c050c5 8614
dd85fc0a 8615 if (bp->dev->flags & IFF_PROMISC)
7d2837dd
MC
8616 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8617
8618 if (bp->dev->flags & IFF_ALLMULTI) {
8619 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8620 vnic->mc_list_count = 0;
8621 } else {
8622 u32 mask = 0;
8623
8624 bnxt_mc_list_updated(bp, &mask);
8625 vnic->rx_mask |= mask;
8626 }
c0c050c5 8627
b664f008
MC
8628 rc = bnxt_cfg_rx_mode(bp);
8629 if (rc)
c0c050c5 8630 goto err_out;
c0c050c5
MC
8631
8632 rc = bnxt_hwrm_set_coal(bp);
8633 if (rc)
8634 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
dc52c6c7
PS
8635 rc);
8636
8637 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8638 rc = bnxt_setup_nitroa0_vnic(bp);
8639 if (rc)
8640 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8641 rc);
8642 }
c0c050c5 8643
cf6645f8
MC
8644 if (BNXT_VF(bp)) {
8645 bnxt_hwrm_func_qcfg(bp);
8646 netdev_update_features(bp->dev);
8647 }
8648
c0c050c5
MC
8649 return 0;
8650
8651err_out:
8652 bnxt_hwrm_resource_free(bp, 0, true);
8653
8654 return rc;
8655}
8656
8657static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8658{
8659 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8660 return 0;
8661}
8662
8663static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8664{
2247925f 8665 bnxt_init_cp_rings(bp);
c0c050c5
MC
8666 bnxt_init_rx_rings(bp);
8667 bnxt_init_tx_rings(bp);
8668 bnxt_init_ring_grps(bp, irq_re_init);
8669 bnxt_init_vnics(bp);
8670
8671 return bnxt_init_chip(bp, irq_re_init);
8672}
8673
c0c050c5
MC
8674static int bnxt_set_real_num_queues(struct bnxt *bp)
8675{
8676 int rc;
8677 struct net_device *dev = bp->dev;
8678
5f449249
MC
8679 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8680 bp->tx_nr_rings_xdp);
c0c050c5
MC
8681 if (rc)
8682 return rc;
8683
8684 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8685 if (rc)
8686 return rc;
8687
8688#ifdef CONFIG_RFS_ACCEL
45019a18 8689 if (bp->flags & BNXT_FLAG_RFS)
c0c050c5 8690 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
c0c050c5
MC
8691#endif
8692
8693 return rc;
8694}
8695
6e6c5a57
MC
8696static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8697 bool shared)
8698{
8699 int _rx = *rx, _tx = *tx;
8700
8701 if (shared) {
8702 *rx = min_t(int, _rx, max);
8703 *tx = min_t(int, _tx, max);
8704 } else {
8705 if (max < 2)
8706 return -ENOMEM;
8707
8708 while (_rx + _tx > max) {
8709 if (_rx > _tx && _rx > 1)
8710 _rx--;
8711 else if (_tx > 1)
8712 _tx--;
8713 }
8714 *rx = _rx;
8715 *tx = _tx;
8716 }
8717 return 0;
8718}
8719
7809592d
MC
8720static void bnxt_setup_msix(struct bnxt *bp)
8721{
8722 const int len = sizeof(bp->irq_tbl[0].name);
8723 struct net_device *dev = bp->dev;
8724 int tcs, i;
8725
8726 tcs = netdev_get_num_tc(dev);
18e4960c 8727 if (tcs) {
d1e7925e 8728 int i, off, count;
7809592d 8729
d1e7925e
MC
8730 for (i = 0; i < tcs; i++) {
8731 count = bp->tx_nr_rings_per_tc;
8732 off = i * count;
8733 netdev_set_tc_queue(dev, i, count, off);
7809592d
MC
8734 }
8735 }
8736
8737 for (i = 0; i < bp->cp_nr_rings; i++) {
e5811b8c 8738 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7809592d
MC
8739 char *attr;
8740
8741 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8742 attr = "TxRx";
8743 else if (i < bp->rx_nr_rings)
8744 attr = "rx";
8745 else
8746 attr = "tx";
8747
e5811b8c
MC
8748 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8749 attr, i);
8750 bp->irq_tbl[map_idx].handler = bnxt_msix;
7809592d
MC
8751 }
8752}
8753
8754static void bnxt_setup_inta(struct bnxt *bp)
8755{
8756 const int len = sizeof(bp->irq_tbl[0].name);
8757
8758 if (netdev_get_num_tc(bp->dev))
8759 netdev_reset_tc(bp->dev);
8760
8761 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8762 0);
8763 bp->irq_tbl[0].handler = bnxt_inta;
8764}
8765
20d7d1c5
EP
8766static int bnxt_init_int_mode(struct bnxt *bp);
8767
7809592d
MC
8768static int bnxt_setup_int_mode(struct bnxt *bp)
8769{
8770 int rc;
8771
20d7d1c5
EP
8772 if (!bp->irq_tbl) {
8773 rc = bnxt_init_int_mode(bp);
8774 if (rc || !bp->irq_tbl)
8775 return rc ?: -ENODEV;
8776 }
8777
7809592d
MC
8778 if (bp->flags & BNXT_FLAG_USING_MSIX)
8779 bnxt_setup_msix(bp);
8780 else
8781 bnxt_setup_inta(bp);
8782
8783 rc = bnxt_set_real_num_queues(bp);
8784 return rc;
8785}
8786
b7429954 8787#ifdef CONFIG_RFS_ACCEL
8079e8f1
MC
8788static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
8789{
6a4f2947 8790 return bp->hw_resc.max_rsscos_ctxs;
8079e8f1
MC
8791}
8792
8793static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
8794{
6a4f2947 8795 return bp->hw_resc.max_vnics;
8079e8f1 8796}
b7429954 8797#endif
8079e8f1 8798
e4060d30
MC
8799unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
8800{
6a4f2947 8801 return bp->hw_resc.max_stat_ctxs;
e4060d30
MC
8802}
8803
8804unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
8805{
6a4f2947 8806 return bp->hw_resc.max_cp_rings;
e4060d30
MC
8807}
8808
e916b081 8809static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
a588e458 8810{
c0b8cda0
MC
8811 unsigned int cp = bp->hw_resc.max_cp_rings;
8812
8813 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8814 cp -= bnxt_get_ulp_msix_num(bp);
8815
8816 return cp;
a588e458
MC
8817}
8818
ad95c27b 8819static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
7809592d 8820{
6a4f2947
MC
8821 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8822
f7588cd8
MC
8823 if (bp->flags & BNXT_FLAG_CHIP_P5)
8824 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
8825
6a4f2947 8826 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
7809592d
MC
8827}
8828
30f52947 8829static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
33c2657e 8830{
6a4f2947 8831 bp->hw_resc.max_irqs = max_irqs;
33c2657e
MC
8832}
8833
e916b081
MC
8834unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
8835{
8836 unsigned int cp;
8837
8838 cp = bnxt_get_max_func_cp_rings_for_en(bp);
8839 if (bp->flags & BNXT_FLAG_CHIP_P5)
8840 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8841 else
8842 return cp - bp->cp_nr_rings;
8843}
8844
c027c6b4
VV
8845unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8846{
d77b1ad8 8847 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
c027c6b4
VV
8848}
8849
fbcfc8e4
MC
8850int bnxt_get_avail_msix(struct bnxt *bp, int num)
8851{
8852 int max_cp = bnxt_get_max_func_cp_rings(bp);
8853 int max_irq = bnxt_get_max_func_irqs(bp);
8854 int total_req = bp->cp_nr_rings + num;
8855 int max_idx, avail_msix;
8856
75720e63
MC
8857 max_idx = bp->total_irqs;
8858 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8859 max_idx = min_t(int, bp->total_irqs, max_cp);
fbcfc8e4 8860 avail_msix = max_idx - bp->cp_nr_rings;
f1ca94de 8861 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
fbcfc8e4
MC
8862 return avail_msix;
8863
8864 if (max_irq < total_req) {
8865 num = max_irq - bp->cp_nr_rings;
8866 if (num <= 0)
8867 return 0;
8868 }
8869 return num;
8870}
8871
08654eb2
MC
8872static int bnxt_get_num_msix(struct bnxt *bp)
8873{
f1ca94de 8874 if (!BNXT_NEW_RM(bp))
08654eb2
MC
8875 return bnxt_get_max_func_irqs(bp);
8876
c0b8cda0 8877 return bnxt_nq_rings_in_use(bp);
08654eb2
MC
8878}
8879
7809592d 8880static int bnxt_init_msix(struct bnxt *bp)
c0c050c5 8881{
fbcfc8e4 8882 int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
7809592d 8883 struct msix_entry *msix_ent;
c0c050c5 8884
08654eb2
MC
8885 total_vecs = bnxt_get_num_msix(bp);
8886 max = bnxt_get_max_func_irqs(bp);
8887 if (total_vecs > max)
8888 total_vecs = max;
8889
2773dfb2
MC
8890 if (!total_vecs)
8891 return 0;
8892
c0c050c5
MC
8893 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8894 if (!msix_ent)
8895 return -ENOMEM;
8896
8897 for (i = 0; i < total_vecs; i++) {
8898 msix_ent[i].entry = i;
8899 msix_ent[i].vector = 0;
8900 }
8901
01657bcd
MC
8902 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8903 min = 2;
8904
8905 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
fbcfc8e4
MC
8906 ulp_msix = bnxt_get_ulp_msix_num(bp);
8907 if (total_vecs < 0 || total_vecs < ulp_msix) {
c0c050c5
MC
8908 rc = -ENODEV;
8909 goto msix_setup_exit;
8910 }
8911
8912 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8913 if (bp->irq_tbl) {
7809592d
MC
8914 for (i = 0; i < total_vecs; i++)
8915 bp->irq_tbl[i].vector = msix_ent[i].vector;
c0c050c5 8916
7809592d 8917 bp->total_irqs = total_vecs;
c0c050c5 8918 /* Trim rings based upon num of vectors allocated */
6e6c5a57 8919 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
fbcfc8e4 8920 total_vecs - ulp_msix, min == 1);
6e6c5a57
MC
8921 if (rc)
8922 goto msix_setup_exit;
8923
7809592d
MC
8924 bp->cp_nr_rings = (min == 1) ?
8925 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8926 bp->tx_nr_rings + bp->rx_nr_rings;
c0c050c5 8927
c0c050c5
MC
8928 } else {
8929 rc = -ENOMEM;
8930 goto msix_setup_exit;
8931 }
8932 bp->flags |= BNXT_FLAG_USING_MSIX;
8933 kfree(msix_ent);
8934 return 0;
8935
8936msix_setup_exit:
7809592d
MC
8937 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8938 kfree(bp->irq_tbl);
8939 bp->irq_tbl = NULL;
c0c050c5
MC
8940 pci_disable_msix(bp->pdev);
8941 kfree(msix_ent);
8942 return rc;
8943}
8944
7809592d 8945static int bnxt_init_inta(struct bnxt *bp)
c0c050c5 8946{
33dbcf60 8947 bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
7809592d
MC
8948 if (!bp->irq_tbl)
8949 return -ENOMEM;
8950
8951 bp->total_irqs = 1;
c0c050c5
MC
8952 bp->rx_nr_rings = 1;
8953 bp->tx_nr_rings = 1;
8954 bp->cp_nr_rings = 1;
01657bcd 8955 bp->flags |= BNXT_FLAG_SHARED_RINGS;
c0c050c5 8956 bp->irq_tbl[0].vector = bp->pdev->irq;
7809592d 8957 return 0;
c0c050c5
MC
8958}
8959
7809592d 8960static int bnxt_init_int_mode(struct bnxt *bp)
c0c050c5 8961{
20d7d1c5 8962 int rc = -ENODEV;
c0c050c5
MC
8963
8964 if (bp->flags & BNXT_FLAG_MSIX_CAP)
7809592d 8965 rc = bnxt_init_msix(bp);
c0c050c5 8966
1fa72e29 8967 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
c0c050c5 8968 /* fallback to INTA */
7809592d 8969 rc = bnxt_init_inta(bp);
c0c050c5
MC
8970 }
8971 return rc;
8972}
8973
7809592d
MC
8974static void bnxt_clear_int_mode(struct bnxt *bp)
8975{
8976 if (bp->flags & BNXT_FLAG_USING_MSIX)
8977 pci_disable_msix(bp->pdev);
8978
8979 kfree(bp->irq_tbl);
8980 bp->irq_tbl = NULL;
8981 bp->flags &= ~BNXT_FLAG_USING_MSIX;
8982}
8983
1b3f0b75 8984int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
674f50a5 8985{
674f50a5 8986 int tcs = netdev_get_num_tc(bp->dev);
1b3f0b75 8987 bool irq_cleared = false;
674f50a5
MC
8988 int rc;
8989
8990 if (!bnxt_need_reserve_rings(bp))
8991 return 0;
8992
1b3f0b75
MC
8993 if (irq_re_init && BNXT_NEW_RM(bp) &&
8994 bnxt_get_num_msix(bp) != bp->total_irqs) {
ec86f14e 8995 bnxt_ulp_irq_stop(bp);
674f50a5 8996 bnxt_clear_int_mode(bp);
1b3f0b75 8997 irq_cleared = true;
36d65be9
MC
8998 }
8999 rc = __bnxt_reserve_rings(bp);
1b3f0b75 9000 if (irq_cleared) {
36d65be9
MC
9001 if (!rc)
9002 rc = bnxt_init_int_mode(bp);
ec86f14e 9003 bnxt_ulp_irq_restart(bp, rc);
36d65be9
MC
9004 }
9005 if (rc) {
9006 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
9007 return rc;
674f50a5
MC
9008 }
9009 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
9010 netdev_err(bp->dev, "tx ring reservation failure\n");
9011 netdev_reset_tc(bp->dev);
9012 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
9013 return -ENOMEM;
9014 }
674f50a5
MC
9015 return 0;
9016}
9017
c0c050c5
MC
9018static void bnxt_free_irq(struct bnxt *bp)
9019{
9020 struct bnxt_irq *irq;
9021 int i;
9022
9023#ifdef CONFIG_RFS_ACCEL
9024 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
9025 bp->dev->rx_cpu_rmap = NULL;
9026#endif
cb98526b 9027 if (!bp->irq_tbl || !bp->bnapi)
c0c050c5
MC
9028 return;
9029
9030 for (i = 0; i < bp->cp_nr_rings; i++) {
e5811b8c
MC
9031 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9032
9033 irq = &bp->irq_tbl[map_idx];
56f0fd80
VV
9034 if (irq->requested) {
9035 if (irq->have_cpumask) {
9036 irq_set_affinity_hint(irq->vector, NULL);
9037 free_cpumask_var(irq->cpu_mask);
9038 irq->have_cpumask = 0;
9039 }
c0c050c5 9040 free_irq(irq->vector, bp->bnapi[i]);
56f0fd80
VV
9041 }
9042
c0c050c5
MC
9043 irq->requested = 0;
9044 }
c0c050c5
MC
9045}
9046
9047static int bnxt_request_irq(struct bnxt *bp)
9048{
b81a90d3 9049 int i, j, rc = 0;
c0c050c5
MC
9050 unsigned long flags = 0;
9051#ifdef CONFIG_RFS_ACCEL
e5811b8c 9052 struct cpu_rmap *rmap;
c0c050c5
MC
9053#endif
9054
e5811b8c
MC
9055 rc = bnxt_setup_int_mode(bp);
9056 if (rc) {
9057 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
9058 rc);
9059 return rc;
9060 }
9061#ifdef CONFIG_RFS_ACCEL
9062 rmap = bp->dev->rx_cpu_rmap;
9063#endif
c0c050c5
MC
9064 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
9065 flags = IRQF_SHARED;
9066
b81a90d3 9067 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
e5811b8c
MC
9068 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9069 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
9070
c0c050c5 9071#ifdef CONFIG_RFS_ACCEL
b81a90d3 9072 if (rmap && bp->bnapi[i]->rx_ring) {
c0c050c5
MC
9073 rc = irq_cpu_rmap_add(rmap, irq->vector);
9074 if (rc)
9075 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
b81a90d3
MC
9076 j);
9077 j++;
c0c050c5
MC
9078 }
9079#endif
9080 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
9081 bp->bnapi[i]);
9082 if (rc)
9083 break;
9084
9085 irq->requested = 1;
56f0fd80
VV
9086
9087 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
9088 int numa_node = dev_to_node(&bp->pdev->dev);
9089
9090 irq->have_cpumask = 1;
9091 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
9092 irq->cpu_mask);
9093 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
9094 if (rc) {
9095 netdev_warn(bp->dev,
9096 "Set affinity failed, IRQ = %d\n",
9097 irq->vector);
9098 break;
9099 }
9100 }
c0c050c5
MC
9101 }
9102 return rc;
9103}
9104
9105static void bnxt_del_napi(struct bnxt *bp)
9106{
9107 int i;
9108
9109 if (!bp->bnapi)
9110 return;
9111
9112 for (i = 0; i < bp->cp_nr_rings; i++) {
9113 struct bnxt_napi *bnapi = bp->bnapi[i];
9114
5198d545 9115 __netif_napi_del(&bnapi->napi);
c0c050c5 9116 }
5198d545 9117 /* We called __netif_napi_del(), we need
e5f6f564
ED
9118 * to respect an RCU grace period before freeing napi structures.
9119 */
9120 synchronize_net();
c0c050c5
MC
9121}
9122
9123static void bnxt_init_napi(struct bnxt *bp)
9124{
9125 int i;
10bbdaf5 9126 unsigned int cp_nr_rings = bp->cp_nr_rings;
c0c050c5
MC
9127 struct bnxt_napi *bnapi;
9128
9129 if (bp->flags & BNXT_FLAG_USING_MSIX) {
0fcec985
MC
9130 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
9131
9132 if (bp->flags & BNXT_FLAG_CHIP_P5)
9133 poll_fn = bnxt_poll_p5;
9134 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
10bbdaf5
PS
9135 cp_nr_rings--;
9136 for (i = 0; i < cp_nr_rings; i++) {
c0c050c5 9137 bnapi = bp->bnapi[i];
0fcec985 9138 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
c0c050c5 9139 }
10bbdaf5
PS
9140 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9141 bnapi = bp->bnapi[cp_nr_rings];
9142 netif_napi_add(bp->dev, &bnapi->napi,
9143 bnxt_poll_nitroa0, 64);
10bbdaf5 9144 }
c0c050c5
MC
9145 } else {
9146 bnapi = bp->bnapi[0];
9147 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
c0c050c5
MC
9148 }
9149}
9150
9151static void bnxt_disable_napi(struct bnxt *bp)
9152{
9153 int i;
9154
e340a5c4
MC
9155 if (!bp->bnapi ||
9156 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
c0c050c5
MC
9157 return;
9158
0bc0b97f
AG
9159 for (i = 0; i < bp->cp_nr_rings; i++) {
9160 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
9161
01cca6b9 9162 napi_disable(&bp->bnapi[i]->napi);
0bc0b97f
AG
9163 if (bp->bnapi[i]->rx_ring)
9164 cancel_work_sync(&cpr->dim.work);
0bc0b97f 9165 }
c0c050c5
MC
9166}
9167
9168static void bnxt_enable_napi(struct bnxt *bp)
9169{
9170 int i;
9171
e340a5c4 9172 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
c0c050c5 9173 for (i = 0; i < bp->cp_nr_rings; i++) {
8a27d4b9
MC
9174 struct bnxt_napi *bnapi = bp->bnapi[i];
9175 struct bnxt_cp_ring_info *cpr;
9176
9177 cpr = &bnapi->cp_ring;
9178 if (bnapi->in_reset)
9179 cpr->sw_stats.rx.rx_resets++;
9180 bnapi->in_reset = false;
6a8788f2 9181
8a27d4b9 9182 if (bnapi->rx_ring) {
6a8788f2 9183 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
c002bd52 9184 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6a8788f2 9185 }
8a27d4b9 9186 napi_enable(&bnapi->napi);
c0c050c5
MC
9187 }
9188}
9189
7df4ae9f 9190void bnxt_tx_disable(struct bnxt *bp)
c0c050c5
MC
9191{
9192 int i;
c0c050c5 9193 struct bnxt_tx_ring_info *txr;
c0c050c5 9194
b6ab4b01 9195 if (bp->tx_ring) {
c0c050c5 9196 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 9197 txr = &bp->tx_ring[i];
3c603136 9198 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
c0c050c5
MC
9199 }
9200 }
3c603136
JK
9201 /* Make sure napi polls see @dev_state change */
9202 synchronize_net();
132e0b65
EP
9203 /* Drop carrier first to prevent TX timeout */
9204 netif_carrier_off(bp->dev);
c0c050c5
MC
9205 /* Stop all TX queues */
9206 netif_tx_disable(bp->dev);
c0c050c5
MC
9207}
9208
7df4ae9f 9209void bnxt_tx_enable(struct bnxt *bp)
c0c050c5
MC
9210{
9211 int i;
c0c050c5 9212 struct bnxt_tx_ring_info *txr;
c0c050c5
MC
9213
9214 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 9215 txr = &bp->tx_ring[i];
3c603136 9216 WRITE_ONCE(txr->dev_state, 0);
c0c050c5 9217 }
3c603136
JK
9218 /* Make sure napi polls see @dev_state change */
9219 synchronize_net();
c0c050c5
MC
9220 netif_tx_wake_all_queues(bp->dev);
9221 if (bp->link_info.link_up)
9222 netif_carrier_on(bp->dev);
9223}
9224
2046e3c3
MC
9225static char *bnxt_report_fec(struct bnxt_link_info *link_info)
9226{
9227 u8 active_fec = link_info->active_fec_sig_mode &
9228 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
9229
9230 switch (active_fec) {
9231 default:
9232 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
9233 return "None";
9234 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
9235 return "Clause 74 BaseR";
9236 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
9237 return "Clause 91 RS(528,514)";
9238 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
9239 return "Clause 91 RS544_1XN";
9240 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
9241 return "Clause 91 RS(544,514)";
9242 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
9243 return "Clause 91 RS272_1XN";
9244 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
9245 return "Clause 91 RS(272,257)";
9246 }
9247}
9248
c0c050c5
MC
9249static void bnxt_report_link(struct bnxt *bp)
9250{
9251 if (bp->link_info.link_up) {
1d2deb61 9252 const char *signal = "";
c0c050c5 9253 const char *flow_ctrl;
1d2deb61 9254 const char *duplex;
38a21b34
DK
9255 u32 speed;
9256 u16 fec;
c0c050c5
MC
9257
9258 netif_carrier_on(bp->dev);
8eddb3e7
MC
9259 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9260 if (speed == SPEED_UNKNOWN) {
9261 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9262 return;
9263 }
c0c050c5
MC
9264 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9265 duplex = "full";
9266 else
9267 duplex = "half";
9268 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9269 flow_ctrl = "ON - receive & transmit";
9270 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9271 flow_ctrl = "ON - transmit";
9272 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9273 flow_ctrl = "ON - receive";
9274 else
9275 flow_ctrl = "none";
1d2deb61
EP
9276 if (bp->link_info.phy_qcfg_resp.option_flags &
9277 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
9278 u8 sig_mode = bp->link_info.active_fec_sig_mode &
9279 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
9280 switch (sig_mode) {
9281 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
9282 signal = "(NRZ) ";
9283 break;
9284 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
9285 signal = "(PAM4) ";
9286 break;
9287 default:
9288 break;
9289 }
9290 }
9291 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
9292 speed, signal, duplex, flow_ctrl);
b0d28207 9293 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
170ce013
MC
9294 netdev_info(bp->dev, "EEE is %s\n",
9295 bp->eee.eee_active ? "active" :
9296 "not active");
e70c752f
MC
9297 fec = bp->link_info.fec_cfg;
9298 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
2046e3c3 9299 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
e70c752f 9300 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
2046e3c3 9301 bnxt_report_fec(&bp->link_info));
c0c050c5
MC
9302 } else {
9303 netif_carrier_off(bp->dev);
9304 netdev_err(bp->dev, "NIC Link is Down\n");
9305 }
9306}
9307
3128e811
MC
9308static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
9309{
9310 if (!resp->supported_speeds_auto_mode &&
9311 !resp->supported_speeds_force_mode &&
9312 !resp->supported_pam4_speeds_auto_mode &&
9313 !resp->supported_pam4_speeds_force_mode)
9314 return true;
9315 return false;
9316}
9317
170ce013
MC
9318static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9319{
93ed8117 9320 struct bnxt_link_info *link_info = &bp->link_info;
bbf33d1d
EP
9321 struct hwrm_port_phy_qcaps_output *resp;
9322 struct hwrm_port_phy_qcaps_input *req;
9323 int rc = 0;
170ce013
MC
9324
9325 if (bp->hwrm_spec_code < 0x10201)
9326 return 0;
9327
bbf33d1d
EP
9328 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
9329 if (rc)
9330 return rc;
170ce013 9331
bbf33d1d
EP
9332 resp = hwrm_req_hold(bp, req);
9333 rc = hwrm_req_send(bp, req);
170ce013
MC
9334 if (rc)
9335 goto hwrm_phy_qcaps_exit;
9336
b0d28207 9337 bp->phy_flags = resp->flags;
acb20054 9338 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
170ce013
MC
9339 struct ethtool_eee *eee = &bp->eee;
9340 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
9341
170ce013
MC
9342 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9343 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9344 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
9345 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9346 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
9347 }
fea6b333 9348
3128e811
MC
9349 if (bp->hwrm_spec_code >= 0x10a01) {
9350 if (bnxt_phy_qcaps_no_speed(resp)) {
9351 link_info->phy_state = BNXT_PHY_STATE_DISABLED;
9352 netdev_warn(bp->dev, "Ethernet link disabled\n");
9353 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
9354 link_info->phy_state = BNXT_PHY_STATE_ENABLED;
9355 netdev_info(bp->dev, "Ethernet link enabled\n");
9356 /* Phy re-enabled, reprobe the speeds */
9357 link_info->support_auto_speeds = 0;
9358 link_info->support_pam4_auto_speeds = 0;
9359 }
9360 }
520ad89a
MC
9361 if (resp->supported_speeds_auto_mode)
9362 link_info->support_auto_speeds =
9363 le16_to_cpu(resp->supported_speeds_auto_mode);
d058426e
EP
9364 if (resp->supported_pam4_speeds_auto_mode)
9365 link_info->support_pam4_auto_speeds =
9366 le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
170ce013 9367
d5430d31
MC
9368 bp->port_count = resp->port_cnt;
9369
170ce013 9370hwrm_phy_qcaps_exit:
bbf33d1d 9371 hwrm_req_drop(bp, req);
170ce013
MC
9372 return rc;
9373}
9374
c916062a
EP
9375static bool bnxt_support_dropped(u16 advertising, u16 supported)
9376{
9377 u16 diff = advertising ^ supported;
9378
9379 return ((supported | diff) != supported);
9380}
9381
ccd6a9dc 9382int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
c0c050c5 9383{
c0c050c5 9384 struct bnxt_link_info *link_info = &bp->link_info;
bbf33d1d
EP
9385 struct hwrm_port_phy_qcfg_output *resp;
9386 struct hwrm_port_phy_qcfg_input *req;
c0c050c5 9387 u8 link_up = link_info->link_up;
d058426e 9388 bool support_changed = false;
bbf33d1d 9389 int rc;
c0c050c5 9390
bbf33d1d
EP
9391 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
9392 if (rc)
9393 return rc;
c0c050c5 9394
bbf33d1d
EP
9395 resp = hwrm_req_hold(bp, req);
9396 rc = hwrm_req_send(bp, req);
c0c050c5 9397 if (rc) {
bbf33d1d 9398 hwrm_req_drop(bp, req);
c0c050c5
MC
9399 return rc;
9400 }
9401
9402 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
9403 link_info->phy_link_status = resp->link;
acb20054
MC
9404 link_info->duplex = resp->duplex_cfg;
9405 if (bp->hwrm_spec_code >= 0x10800)
9406 link_info->duplex = resp->duplex_state;
c0c050c5
MC
9407 link_info->pause = resp->pause;
9408 link_info->auto_mode = resp->auto_mode;
9409 link_info->auto_pause_setting = resp->auto_pause;
3277360e 9410 link_info->lp_pause = resp->link_partner_adv_pause;
c0c050c5 9411 link_info->force_pause_setting = resp->force_pause;
acb20054 9412 link_info->duplex_setting = resp->duplex_cfg;
c0c050c5
MC
9413 if (link_info->phy_link_status == BNXT_LINK_LINK)
9414 link_info->link_speed = le16_to_cpu(resp->link_speed);
9415 else
9416 link_info->link_speed = 0;
9417 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
d058426e
EP
9418 link_info->force_pam4_link_speed =
9419 le16_to_cpu(resp->force_pam4_link_speed);
c0c050c5 9420 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
d058426e 9421 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
c0c050c5 9422 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
d058426e
EP
9423 link_info->auto_pam4_link_speeds =
9424 le16_to_cpu(resp->auto_pam4_link_speed_mask);
3277360e
MC
9425 link_info->lp_auto_link_speeds =
9426 le16_to_cpu(resp->link_partner_adv_speeds);
d058426e
EP
9427 link_info->lp_auto_pam4_link_speeds =
9428 resp->link_partner_pam4_adv_speeds;
c0c050c5
MC
9429 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
9430 link_info->phy_ver[0] = resp->phy_maj;
9431 link_info->phy_ver[1] = resp->phy_min;
9432 link_info->phy_ver[2] = resp->phy_bld;
9433 link_info->media_type = resp->media_type;
03efbec0 9434 link_info->phy_type = resp->phy_type;
11f15ed3 9435 link_info->transceiver = resp->xcvr_pkg_type;
170ce013
MC
9436 link_info->phy_addr = resp->eee_config_phy_addr &
9437 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
42ee18fe 9438 link_info->module_status = resp->module_status;
170ce013 9439
b0d28207 9440 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
170ce013
MC
9441 struct ethtool_eee *eee = &bp->eee;
9442 u16 fw_speeds;
9443
9444 eee->eee_active = 0;
9445 if (resp->eee_config_phy_addr &
9446 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
9447 eee->eee_active = 1;
9448 fw_speeds = le16_to_cpu(
9449 resp->link_partner_adv_eee_link_speed_mask);
9450 eee->lp_advertised =
9451 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9452 }
9453
9454 /* Pull initial EEE config */
9455 if (!chng_link_state) {
9456 if (resp->eee_config_phy_addr &
9457 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
9458 eee->eee_enabled = 1;
c0c050c5 9459
170ce013
MC
9460 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
9461 eee->advertised =
9462 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9463
9464 if (resp->eee_config_phy_addr &
9465 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
9466 __le32 tmr;
9467
9468 eee->tx_lpi_enabled = 1;
9469 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
9470 eee->tx_lpi_timer = le32_to_cpu(tmr) &
9471 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
9472 }
9473 }
9474 }
e70c752f
MC
9475
9476 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
8b277589 9477 if (bp->hwrm_spec_code >= 0x10504) {
e70c752f 9478 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
8b277589
MC
9479 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
9480 }
c0c050c5
MC
9481 /* TODO: need to add more logic to report VF link */
9482 if (chng_link_state) {
9483 if (link_info->phy_link_status == BNXT_LINK_LINK)
9484 link_info->link_up = 1;
9485 else
9486 link_info->link_up = 0;
9487 if (link_up != link_info->link_up)
9488 bnxt_report_link(bp);
9489 } else {
9490 /* alwasy link down if not require to update link state */
9491 link_info->link_up = 0;
9492 }
bbf33d1d 9493 hwrm_req_drop(bp, req);
286ef9d6 9494
c7e457f4 9495 if (!BNXT_PHY_CFG_ABLE(bp))
dac04907
MC
9496 return 0;
9497
c916062a
EP
9498 /* Check if any advertised speeds are no longer supported. The caller
9499 * holds the link_lock mutex, so we can modify link_info settings.
9500 */
9501 if (bnxt_support_dropped(link_info->advertising,
9502 link_info->support_auto_speeds)) {
286ef9d6 9503 link_info->advertising = link_info->support_auto_speeds;
d058426e 9504 support_changed = true;
286ef9d6 9505 }
d058426e
EP
9506 if (bnxt_support_dropped(link_info->advertising_pam4,
9507 link_info->support_pam4_auto_speeds)) {
9508 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
9509 support_changed = true;
9510 }
9511 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
9512 bnxt_hwrm_set_link_setting(bp, true, false);
c0c050c5
MC
9513 return 0;
9514}
9515
10289bec
MC
9516static void bnxt_get_port_module_status(struct bnxt *bp)
9517{
9518 struct bnxt_link_info *link_info = &bp->link_info;
9519 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
9520 u8 module_status;
9521
9522 if (bnxt_update_link(bp, true))
9523 return;
9524
9525 module_status = link_info->module_status;
9526 switch (module_status) {
9527 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
9528 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
9529 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
9530 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9531 bp->pf.port_id);
9532 if (bp->hwrm_spec_code >= 0x10201) {
9533 netdev_warn(bp->dev, "Module part number %s\n",
9534 resp->phy_vendor_partnumber);
9535 }
9536 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
9537 netdev_warn(bp->dev, "TX is disabled\n");
9538 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
9539 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9540 }
9541}
9542
c0c050c5
MC
9543static void
9544bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9545{
9546 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
c9ee9516
MC
9547 if (bp->hwrm_spec_code >= 0x10201)
9548 req->auto_pause =
9549 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
c0c050c5
MC
9550 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9551 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
9552 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
49b5c7a1 9553 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
c0c050c5
MC
9554 req->enables |=
9555 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9556 } else {
9557 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9558 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
9559 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9560 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9561 req->enables |=
9562 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
c9ee9516
MC
9563 if (bp->hwrm_spec_code >= 0x10201) {
9564 req->auto_pause = req->force_pause;
9565 req->enables |= cpu_to_le32(
9566 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9567 }
c0c050c5
MC
9568 }
9569}
9570
d058426e 9571static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
c0c050c5 9572{
d058426e
EP
9573 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9574 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9575 if (bp->link_info.advertising) {
9576 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9577 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9578 }
9579 if (bp->link_info.advertising_pam4) {
9580 req->enables |=
9581 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
9582 req->auto_link_pam4_speed_mask =
9583 cpu_to_le16(bp->link_info.advertising_pam4);
9584 }
c0c050c5 9585 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
d058426e 9586 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
c0c050c5 9587 } else {
c0c050c5 9588 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
d058426e
EP
9589 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9590 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9591 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
9592 } else {
9593 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9594 }
c0c050c5
MC
9595 }
9596
c0c050c5
MC
9597 /* tell chimp that the setting takes effect immediately */
9598 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9599}
9600
9601int bnxt_hwrm_set_pause(struct bnxt *bp)
9602{
bbf33d1d 9603 struct hwrm_port_phy_cfg_input *req;
c0c050c5
MC
9604 int rc;
9605
bbf33d1d
EP
9606 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9607 if (rc)
9608 return rc;
9609
9610 bnxt_hwrm_set_pause_common(bp, req);
c0c050c5
MC
9611
9612 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9613 bp->link_info.force_link_chng)
bbf33d1d 9614 bnxt_hwrm_set_link_common(bp, req);
c0c050c5 9615
bbf33d1d 9616 rc = hwrm_req_send(bp, req);
c0c050c5
MC
9617 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9618 /* since changing of pause setting doesn't trigger any link
9619 * change event, the driver needs to update the current pause
9620 * result upon successfully return of the phy_cfg command
9621 */
9622 bp->link_info.pause =
9623 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9624 bp->link_info.auto_pause_setting = 0;
9625 if (!bp->link_info.force_link_chng)
9626 bnxt_report_link(bp);
9627 }
9628 bp->link_info.force_link_chng = false;
c0c050c5
MC
9629 return rc;
9630}
9631
939f7f0c
MC
9632static void bnxt_hwrm_set_eee(struct bnxt *bp,
9633 struct hwrm_port_phy_cfg_input *req)
9634{
9635 struct ethtool_eee *eee = &bp->eee;
9636
9637 if (eee->eee_enabled) {
9638 u16 eee_speeds;
9639 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9640
9641 if (eee->tx_lpi_enabled)
9642 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9643 else
9644 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9645
9646 req->flags |= cpu_to_le32(flags);
9647 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9648 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9649 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9650 } else {
9651 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9652 }
9653}
9654
9655int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
c0c050c5 9656{
bbf33d1d
EP
9657 struct hwrm_port_phy_cfg_input *req;
9658 int rc;
9659
9660 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9661 if (rc)
9662 return rc;
c0c050c5 9663
c0c050c5 9664 if (set_pause)
bbf33d1d 9665 bnxt_hwrm_set_pause_common(bp, req);
c0c050c5 9666
bbf33d1d 9667 bnxt_hwrm_set_link_common(bp, req);
939f7f0c
MC
9668
9669 if (set_eee)
bbf33d1d
EP
9670 bnxt_hwrm_set_eee(bp, req);
9671 return hwrm_req_send(bp, req);
c0c050c5
MC
9672}
9673
33f7d55f
MC
9674static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9675{
bbf33d1d
EP
9676 struct hwrm_port_phy_cfg_input *req;
9677 int rc;
33f7d55f 9678
567b2abe 9679 if (!BNXT_SINGLE_PF(bp))
33f7d55f
MC
9680 return 0;
9681
d5ca9905
MC
9682 if (pci_num_vf(bp->pdev) &&
9683 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
33f7d55f
MC
9684 return 0;
9685
bbf33d1d
EP
9686 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9687 if (rc)
9688 return rc;
9689
9690 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
9691 return hwrm_req_send(bp, req);
33f7d55f
MC
9692}
9693
ec5d31e3
MC
9694static int bnxt_fw_init_one(struct bnxt *bp);
9695
b187e4ba
EP
9696static int bnxt_fw_reset_via_optee(struct bnxt *bp)
9697{
9698#ifdef CONFIG_TEE_BNXT_FW
9699 int rc = tee_bnxt_fw_load();
9700
9701 if (rc)
9702 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
9703
9704 return rc;
9705#else
9706 netdev_err(bp->dev, "OP-TEE not supported\n");
9707 return -ENODEV;
9708#endif
9709}
9710
9711static int bnxt_try_recover_fw(struct bnxt *bp)
9712{
9713 if (bp->fw_health && bp->fw_health->status_reliable) {
d1cbd165
MC
9714 int retry = 0, rc;
9715 u32 sts;
9716
d1cbd165 9717 do {
d1cbd165 9718 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
7b370ad7 9719 rc = bnxt_hwrm_poll(bp);
17e1be34
MC
9720 if (!BNXT_FW_IS_BOOTING(sts) &&
9721 !BNXT_FW_IS_RECOVERING(sts))
d1cbd165
MC
9722 break;
9723 retry++;
9724 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
b187e4ba 9725
d1cbd165
MC
9726 if (!BNXT_FW_IS_HEALTHY(sts)) {
9727 netdev_err(bp->dev,
9728 "Firmware not responding, status: 0x%x\n",
9729 sts);
9730 rc = -ENODEV;
9731 }
b187e4ba
EP
9732 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
9733 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
9734 return bnxt_fw_reset_via_optee(bp);
9735 }
d1cbd165 9736 return rc;
b187e4ba
EP
9737 }
9738
9739 return -ENODEV;
9740}
9741
25e1acd6
MC
9742static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
9743{
bbf33d1d
EP
9744 struct hwrm_func_drv_if_change_output *resp;
9745 struct hwrm_func_drv_if_change_input *req;
20d7d1c5
EP
9746 bool fw_reset = !bp->irq_tbl;
9747 bool resc_reinit = false;
5d06eb5c 9748 int rc, retry = 0;
ec5d31e3 9749 u32 flags = 0;
25e1acd6
MC
9750
9751 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
9752 return 0;
9753
bbf33d1d
EP
9754 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
9755 if (rc)
9756 return rc;
9757
25e1acd6 9758 if (up)
bbf33d1d
EP
9759 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
9760 resp = hwrm_req_hold(bp, req);
9761
9762 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
5d06eb5c 9763 while (retry < BNXT_FW_IF_RETRY) {
bbf33d1d 9764 rc = hwrm_req_send(bp, req);
5d06eb5c
VV
9765 if (rc != -EAGAIN)
9766 break;
9767
9768 msleep(50);
9769 retry++;
9770 }
5d06eb5c 9771
bbf33d1d
EP
9772 if (rc == -EAGAIN) {
9773 hwrm_req_drop(bp, req);
5d06eb5c 9774 return rc;
bbf33d1d
EP
9775 } else if (!rc) {
9776 flags = le32_to_cpu(resp->flags);
9777 } else if (up) {
b187e4ba
EP
9778 rc = bnxt_try_recover_fw(bp);
9779 fw_reset = true;
9780 }
bbf33d1d 9781 hwrm_req_drop(bp, req);
ec5d31e3
MC
9782 if (rc)
9783 return rc;
25e1acd6 9784
43a440c4
MC
9785 if (!up) {
9786 bnxt_inv_fw_health_reg(bp);
ec5d31e3 9787 return 0;
43a440c4 9788 }
25e1acd6 9789
ec5d31e3
MC
9790 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
9791 resc_reinit = true;
9792 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
9793 fw_reset = true;
43a440c4
MC
9794 else if (bp->fw_health && !bp->fw_health->status_reliable)
9795 bnxt_try_map_fw_health_reg(bp);
ec5d31e3 9796
3bc7d4a3
MC
9797 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
9798 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
20d7d1c5 9799 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
3bc7d4a3
MC
9800 return -ENODEV;
9801 }
ec5d31e3
MC
9802 if (resc_reinit || fw_reset) {
9803 if (fw_reset) {
2924ad95 9804 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
f3a6d206
VV
9805 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9806 bnxt_ulp_stop(bp);
325f85f3
MC
9807 bnxt_free_ctx_mem(bp);
9808 kfree(bp->ctx);
9809 bp->ctx = NULL;
843d699d 9810 bnxt_dcb_free(bp);
ec5d31e3
MC
9811 rc = bnxt_fw_init_one(bp);
9812 if (rc) {
2924ad95 9813 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
ec5d31e3
MC
9814 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9815 return rc;
9816 }
9817 bnxt_clear_int_mode(bp);
9818 rc = bnxt_init_int_mode(bp);
9819 if (rc) {
2924ad95 9820 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
ec5d31e3
MC
9821 netdev_err(bp->dev, "init int mode failed\n");
9822 return rc;
9823 }
ec5d31e3
MC
9824 }
9825 if (BNXT_NEW_RM(bp)) {
9826 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9827
9828 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
15a7deb8
SB
9829 if (rc)
9830 netdev_err(bp->dev, "resc_qcaps failed\n");
9831
ec5d31e3
MC
9832 hw_resc->resv_cp_rings = 0;
9833 hw_resc->resv_stat_ctxs = 0;
9834 hw_resc->resv_irqs = 0;
9835 hw_resc->resv_tx_rings = 0;
9836 hw_resc->resv_rx_rings = 0;
9837 hw_resc->resv_hw_ring_grps = 0;
9838 hw_resc->resv_vnics = 0;
9839 if (!fw_reset) {
9840 bp->tx_nr_rings = 0;
9841 bp->rx_nr_rings = 0;
9842 }
9843 }
25e1acd6 9844 }
15a7deb8 9845 return rc;
25e1acd6
MC
9846}
9847
5ad2cbee
MC
9848static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
9849{
bbf33d1d
EP
9850 struct hwrm_port_led_qcaps_output *resp;
9851 struct hwrm_port_led_qcaps_input *req;
5ad2cbee
MC
9852 struct bnxt_pf_info *pf = &bp->pf;
9853 int rc;
9854
ba642ab7 9855 bp->num_leds = 0;
5ad2cbee
MC
9856 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
9857 return 0;
9858
bbf33d1d
EP
9859 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
9860 if (rc)
9861 return rc;
9862
9863 req->port_id = cpu_to_le16(pf->port_id);
9864 resp = hwrm_req_hold(bp, req);
9865 rc = hwrm_req_send(bp, req);
5ad2cbee 9866 if (rc) {
bbf33d1d 9867 hwrm_req_drop(bp, req);
5ad2cbee
MC
9868 return rc;
9869 }
9870 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
9871 int i;
9872
9873 bp->num_leds = resp->num_leds;
9874 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
9875 bp->num_leds);
9876 for (i = 0; i < bp->num_leds; i++) {
9877 struct bnxt_led_info *led = &bp->leds[i];
9878 __le16 caps = led->led_state_caps;
9879
9880 if (!led->led_group_id ||
9881 !BNXT_LED_ALT_BLINK_CAP(caps)) {
9882 bp->num_leds = 0;
9883 break;
9884 }
9885 }
9886 }
bbf33d1d 9887 hwrm_req_drop(bp, req);
5ad2cbee
MC
9888 return 0;
9889}
9890
5282db6c
MC
9891int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
9892{
bbf33d1d
EP
9893 struct hwrm_wol_filter_alloc_output *resp;
9894 struct hwrm_wol_filter_alloc_input *req;
5282db6c
MC
9895 int rc;
9896
bbf33d1d
EP
9897 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
9898 if (rc)
9899 return rc;
9900
9901 req->port_id = cpu_to_le16(bp->pf.port_id);
9902 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
9903 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
9904 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
9905
9906 resp = hwrm_req_hold(bp, req);
9907 rc = hwrm_req_send(bp, req);
5282db6c
MC
9908 if (!rc)
9909 bp->wol_filter_id = resp->wol_filter_id;
bbf33d1d 9910 hwrm_req_drop(bp, req);
5282db6c
MC
9911 return rc;
9912}
9913
9914int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
9915{
bbf33d1d
EP
9916 struct hwrm_wol_filter_free_input *req;
9917 int rc;
5282db6c 9918
bbf33d1d
EP
9919 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
9920 if (rc)
9921 return rc;
9922
9923 req->port_id = cpu_to_le16(bp->pf.port_id);
9924 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
9925 req->wol_filter_id = bp->wol_filter_id;
9926
9927 return hwrm_req_send(bp, req);
5282db6c
MC
9928}
9929
c1ef146a
MC
9930static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
9931{
bbf33d1d
EP
9932 struct hwrm_wol_filter_qcfg_output *resp;
9933 struct hwrm_wol_filter_qcfg_input *req;
c1ef146a
MC
9934 u16 next_handle = 0;
9935 int rc;
9936
bbf33d1d
EP
9937 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
9938 if (rc)
9939 return rc;
9940
9941 req->port_id = cpu_to_le16(bp->pf.port_id);
9942 req->handle = cpu_to_le16(handle);
9943 resp = hwrm_req_hold(bp, req);
9944 rc = hwrm_req_send(bp, req);
c1ef146a
MC
9945 if (!rc) {
9946 next_handle = le16_to_cpu(resp->next_handle);
9947 if (next_handle != 0) {
9948 if (resp->wol_type ==
9949 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
9950 bp->wol = 1;
9951 bp->wol_filter_id = resp->wol_filter_id;
9952 }
9953 }
9954 }
bbf33d1d 9955 hwrm_req_drop(bp, req);
c1ef146a
MC
9956 return next_handle;
9957}
9958
9959static void bnxt_get_wol_settings(struct bnxt *bp)
9960{
9961 u16 handle = 0;
9962
ba642ab7 9963 bp->wol = 0;
c1ef146a
MC
9964 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
9965 return;
9966
9967 do {
9968 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
9969 } while (handle && handle != 0xffff);
9970}
9971
cde49a42
VV
9972#ifdef CONFIG_BNXT_HWMON
9973static ssize_t bnxt_show_temp(struct device *dev,
9974 struct device_attribute *devattr, char *buf)
9975{
cde49a42 9976 struct hwrm_temp_monitor_query_output *resp;
bbf33d1d 9977 struct hwrm_temp_monitor_query_input *req;
cde49a42 9978 struct bnxt *bp = dev_get_drvdata(dev);
12cce90b 9979 u32 len = 0;
d69753fa 9980 int rc;
cde49a42 9981
bbf33d1d
EP
9982 rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
9983 if (rc)
9984 return rc;
9985 resp = hwrm_req_hold(bp, req);
9986 rc = hwrm_req_send(bp, req);
d69753fa 9987 if (!rc)
12cce90b 9988 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
bbf33d1d 9989 hwrm_req_drop(bp, req);
27537929
DC
9990 if (rc)
9991 return rc;
9992 return len;
cde49a42
VV
9993}
9994static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
9995
9996static struct attribute *bnxt_attrs[] = {
9997 &sensor_dev_attr_temp1_input.dev_attr.attr,
9998 NULL
9999};
10000ATTRIBUTE_GROUPS(bnxt);
10001
10002static void bnxt_hwmon_close(struct bnxt *bp)
10003{
10004 if (bp->hwmon_dev) {
10005 hwmon_device_unregister(bp->hwmon_dev);
10006 bp->hwmon_dev = NULL;
10007 }
10008}
10009
10010static void bnxt_hwmon_open(struct bnxt *bp)
10011{
bbf33d1d 10012 struct hwrm_temp_monitor_query_input *req;
cde49a42 10013 struct pci_dev *pdev = bp->pdev;
d69753fa
EP
10014 int rc;
10015
bbf33d1d
EP
10016 rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
10017 if (!rc)
10018 rc = hwrm_req_send_silent(bp, req);
d69753fa
EP
10019 if (rc == -EACCES || rc == -EOPNOTSUPP) {
10020 bnxt_hwmon_close(bp);
10021 return;
10022 }
cde49a42 10023
ba642ab7
MC
10024 if (bp->hwmon_dev)
10025 return;
10026
cde49a42
VV
10027 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
10028 DRV_MODULE_NAME, bp,
10029 bnxt_groups);
10030 if (IS_ERR(bp->hwmon_dev)) {
10031 bp->hwmon_dev = NULL;
10032 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
10033 }
10034}
10035#else
10036static void bnxt_hwmon_close(struct bnxt *bp)
10037{
10038}
10039
10040static void bnxt_hwmon_open(struct bnxt *bp)
10041{
10042}
10043#endif
10044
939f7f0c
MC
10045static bool bnxt_eee_config_ok(struct bnxt *bp)
10046{
10047 struct ethtool_eee *eee = &bp->eee;
10048 struct bnxt_link_info *link_info = &bp->link_info;
10049
b0d28207 10050 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
939f7f0c
MC
10051 return true;
10052
10053 if (eee->eee_enabled) {
10054 u32 advertising =
10055 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
10056
10057 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10058 eee->eee_enabled = 0;
10059 return false;
10060 }
10061 if (eee->advertised & ~advertising) {
10062 eee->advertised = advertising & eee->supported;
10063 return false;
10064 }
10065 }
10066 return true;
10067}
10068
c0c050c5
MC
10069static int bnxt_update_phy_setting(struct bnxt *bp)
10070{
10071 int rc;
10072 bool update_link = false;
10073 bool update_pause = false;
939f7f0c 10074 bool update_eee = false;
c0c050c5
MC
10075 struct bnxt_link_info *link_info = &bp->link_info;
10076
10077 rc = bnxt_update_link(bp, true);
10078 if (rc) {
10079 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
10080 rc);
10081 return rc;
10082 }
33dac24a
MC
10083 if (!BNXT_SINGLE_PF(bp))
10084 return 0;
10085
c0c050c5 10086 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
c9ee9516
MC
10087 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
10088 link_info->req_flow_ctrl)
c0c050c5
MC
10089 update_pause = true;
10090 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10091 link_info->force_pause_setting != link_info->req_flow_ctrl)
10092 update_pause = true;
c0c050c5
MC
10093 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10094 if (BNXT_AUTO_MODE(link_info->auto_mode))
10095 update_link = true;
d058426e
EP
10096 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
10097 link_info->req_link_speed != link_info->force_link_speed)
10098 update_link = true;
10099 else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
10100 link_info->req_link_speed != link_info->force_pam4_link_speed)
c0c050c5 10101 update_link = true;
de73018f
MC
10102 if (link_info->req_duplex != link_info->duplex_setting)
10103 update_link = true;
c0c050c5
MC
10104 } else {
10105 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
10106 update_link = true;
d058426e
EP
10107 if (link_info->advertising != link_info->auto_link_speeds ||
10108 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
c0c050c5 10109 update_link = true;
c0c050c5
MC
10110 }
10111
16d663a6
MC
10112 /* The last close may have shutdown the link, so need to call
10113 * PHY_CFG to bring it back up.
10114 */
83d8f5e9 10115 if (!bp->link_info.link_up)
16d663a6
MC
10116 update_link = true;
10117
939f7f0c
MC
10118 if (!bnxt_eee_config_ok(bp))
10119 update_eee = true;
10120
c0c050c5 10121 if (update_link)
939f7f0c 10122 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
c0c050c5
MC
10123 else if (update_pause)
10124 rc = bnxt_hwrm_set_pause(bp);
10125 if (rc) {
10126 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
10127 rc);
10128 return rc;
10129 }
10130
10131 return rc;
10132}
10133
11809490
JH
10134/* Common routine to pre-map certain register block to different GRC window.
10135 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
10136 * in PF and 3 windows in VF that can be customized to map in different
10137 * register blocks.
10138 */
10139static void bnxt_preset_reg_win(struct bnxt *bp)
10140{
10141 if (BNXT_PF(bp)) {
10142 /* CAG registers map to GRC window #4 */
10143 writel(BNXT_CAG_REG_BASE,
10144 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
10145 }
10146}
10147
47558acd
MC
10148static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
10149
6882c36c
EP
10150static int bnxt_reinit_after_abort(struct bnxt *bp)
10151{
10152 int rc;
10153
10154 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10155 return -EBUSY;
10156
d20cd745
VV
10157 if (bp->dev->reg_state == NETREG_UNREGISTERED)
10158 return -ENODEV;
10159
6882c36c
EP
10160 rc = bnxt_fw_init_one(bp);
10161 if (!rc) {
10162 bnxt_clear_int_mode(bp);
10163 rc = bnxt_init_int_mode(bp);
10164 if (!rc) {
10165 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10166 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10167 }
10168 }
10169 return rc;
10170}
10171
c0c050c5
MC
10172static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10173{
10174 int rc = 0;
10175
11809490 10176 bnxt_preset_reg_win(bp);
c0c050c5
MC
10177 netif_carrier_off(bp->dev);
10178 if (irq_re_init) {
47558acd
MC
10179 /* Reserve rings now if none were reserved at driver probe. */
10180 rc = bnxt_init_dflt_ring_mode(bp);
10181 if (rc) {
10182 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
10183 return rc;
10184 }
c0c050c5 10185 }
1b3f0b75 10186 rc = bnxt_reserve_rings(bp, irq_re_init);
41e8d798
MC
10187 if (rc)
10188 return rc;
c0c050c5
MC
10189 if ((bp->flags & BNXT_FLAG_RFS) &&
10190 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
10191 /* disable RFS if falling back to INTA */
10192 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
10193 bp->flags &= ~BNXT_FLAG_RFS;
10194 }
10195
10196 rc = bnxt_alloc_mem(bp, irq_re_init);
10197 if (rc) {
10198 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10199 goto open_err_free_mem;
10200 }
10201
10202 if (irq_re_init) {
10203 bnxt_init_napi(bp);
10204 rc = bnxt_request_irq(bp);
10205 if (rc) {
10206 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
c58387ab 10207 goto open_err_irq;
c0c050c5
MC
10208 }
10209 }
10210
c0c050c5
MC
10211 rc = bnxt_init_nic(bp, irq_re_init);
10212 if (rc) {
10213 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
96ecdcc9 10214 goto open_err_irq;
c0c050c5
MC
10215 }
10216
96ecdcc9
JK
10217 bnxt_enable_napi(bp);
10218 bnxt_debug_dev_init(bp);
10219
c0c050c5 10220 if (link_re_init) {
e2dc9b6e 10221 mutex_lock(&bp->link_lock);
c0c050c5 10222 rc = bnxt_update_phy_setting(bp);
e2dc9b6e 10223 mutex_unlock(&bp->link_lock);
a1ef4a79 10224 if (rc) {
ba41d46f 10225 netdev_warn(bp->dev, "failed to update phy settings\n");
a1ef4a79
MC
10226 if (BNXT_SINGLE_PF(bp)) {
10227 bp->link_info.phy_retry = true;
10228 bp->link_info.phy_retry_expires =
10229 jiffies + 5 * HZ;
10230 }
10231 }
c0c050c5
MC
10232 }
10233
7cdd5fc3 10234 if (irq_re_init)
442a35a5 10235 udp_tunnel_nic_reset_ntf(bp->dev);
c0c050c5 10236
caefe526 10237 set_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
10238 bnxt_enable_int(bp);
10239 /* Enable TX queues */
10240 bnxt_tx_enable(bp);
10241 mod_timer(&bp->timer, jiffies + bp->current_interval);
10289bec 10242 /* Poll link status and check for SFP+ module status */
3c10ed49 10243 mutex_lock(&bp->link_lock);
10289bec 10244 bnxt_get_port_module_status(bp);
3c10ed49 10245 mutex_unlock(&bp->link_lock);
c0c050c5 10246
ee5c7fb3
SP
10247 /* VF-reps may need to be re-opened after the PF is re-opened */
10248 if (BNXT_PF(bp))
10249 bnxt_vf_reps_open(bp);
c0c050c5
MC
10250 return 0;
10251
c58387ab 10252open_err_irq:
c0c050c5
MC
10253 bnxt_del_napi(bp);
10254
10255open_err_free_mem:
10256 bnxt_free_skbs(bp);
10257 bnxt_free_irq(bp);
10258 bnxt_free_mem(bp, true);
10259 return rc;
10260}
10261
10262/* rtnl_lock held */
10263int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10264{
10265 int rc = 0;
10266
a1301f08
MC
10267 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
10268 rc = -EIO;
10269 if (!rc)
10270 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
c0c050c5
MC
10271 if (rc) {
10272 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
10273 dev_close(bp->dev);
10274 }
10275 return rc;
10276}
10277
f7dc1ea6
MC
10278/* rtnl_lock held, open the NIC half way by allocating all resources, but
10279 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
10280 * self tests.
10281 */
10282int bnxt_half_open_nic(struct bnxt *bp)
10283{
10284 int rc = 0;
10285
11a39259
SK
10286 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10287 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
10288 rc = -ENODEV;
10289 goto half_open_err;
10290 }
10291
f7dc1ea6
MC
10292 rc = bnxt_alloc_mem(bp, false);
10293 if (rc) {
10294 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10295 goto half_open_err;
10296 }
10297 rc = bnxt_init_nic(bp, false);
10298 if (rc) {
10299 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10300 goto half_open_err;
10301 }
10302 return 0;
10303
10304half_open_err:
10305 bnxt_free_skbs(bp);
10306 bnxt_free_mem(bp, false);
10307 dev_close(bp->dev);
10308 return rc;
10309}
10310
10311/* rtnl_lock held, this call can only be made after a previous successful
10312 * call to bnxt_half_open_nic().
10313 */
10314void bnxt_half_close_nic(struct bnxt *bp)
10315{
10316 bnxt_hwrm_resource_free(bp, false, false);
10317 bnxt_free_skbs(bp);
10318 bnxt_free_mem(bp, false);
10319}
10320
c16d4ee0
MC
10321static void bnxt_reenable_sriov(struct bnxt *bp)
10322{
10323 if (BNXT_PF(bp)) {
10324 struct bnxt_pf_info *pf = &bp->pf;
10325 int n = pf->active_vfs;
10326
10327 if (n)
10328 bnxt_cfg_hw_sriov(bp, &n, true);
10329 }
10330}
10331
c0c050c5
MC
10332static int bnxt_open(struct net_device *dev)
10333{
10334 struct bnxt *bp = netdev_priv(dev);
25e1acd6 10335 int rc;
c0c050c5 10336
ec5d31e3 10337 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
6882c36c
EP
10338 rc = bnxt_reinit_after_abort(bp);
10339 if (rc) {
10340 if (rc == -EBUSY)
10341 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
10342 else
10343 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
10344 return -ENODEV;
10345 }
ec5d31e3
MC
10346 }
10347
10348 rc = bnxt_hwrm_if_change(bp, true);
25e1acd6 10349 if (rc)
ec5d31e3 10350 return rc;
d7859afb 10351
ec5d31e3
MC
10352 rc = __bnxt_open_nic(bp, true, true);
10353 if (rc) {
25e1acd6 10354 bnxt_hwrm_if_change(bp, false);
ec5d31e3 10355 } else {
f3a6d206 10356 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
12de2ead 10357 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
f3a6d206 10358 bnxt_ulp_start(bp, 0);
12de2ead
MC
10359 bnxt_reenable_sriov(bp);
10360 }
ec5d31e3
MC
10361 }
10362 bnxt_hwmon_open(bp);
10363 }
cde49a42 10364
25e1acd6 10365 return rc;
c0c050c5
MC
10366}
10367
f9b76ebd
MC
10368static bool bnxt_drv_busy(struct bnxt *bp)
10369{
10370 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
10371 test_bit(BNXT_STATE_READ_STATS, &bp->state));
10372}
10373
b8875ca3
MC
10374static void bnxt_get_ring_stats(struct bnxt *bp,
10375 struct rtnl_link_stats64 *stats);
10376
86e953db
MC
10377static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
10378 bool link_re_init)
c0c050c5 10379{
ee5c7fb3
SP
10380 /* Close the VF-reps before closing PF */
10381 if (BNXT_PF(bp))
10382 bnxt_vf_reps_close(bp);
86e953db 10383
c0c050c5
MC
10384 /* Change device state to avoid TX queue wake up's */
10385 bnxt_tx_disable(bp);
10386
caefe526 10387 clear_bit(BNXT_STATE_OPEN, &bp->state);
4cebdcec 10388 smp_mb__after_atomic();
f9b76ebd 10389 while (bnxt_drv_busy(bp))
4cebdcec 10390 msleep(20);
c0c050c5 10391
9d8bc097 10392 /* Flush rings and and disable interrupts */
c0c050c5
MC
10393 bnxt_shutdown_nic(bp, irq_re_init);
10394
10395 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
10396
cabfb09d 10397 bnxt_debug_dev_exit(bp);
c0c050c5 10398 bnxt_disable_napi(bp);
c0c050c5
MC
10399 del_timer_sync(&bp->timer);
10400 bnxt_free_skbs(bp);
10401
b8875ca3 10402 /* Save ring stats before shutdown */
b8056e84 10403 if (bp->bnapi && irq_re_init)
b8875ca3 10404 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
c0c050c5
MC
10405 if (irq_re_init) {
10406 bnxt_free_irq(bp);
10407 bnxt_del_napi(bp);
10408 }
10409 bnxt_free_mem(bp, irq_re_init);
86e953db
MC
10410}
10411
10412int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10413{
10414 int rc = 0;
10415
3bc7d4a3
MC
10416 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10417 /* If we get here, it means firmware reset is in progress
10418 * while we are trying to close. We can safely proceed with
10419 * the close because we are holding rtnl_lock(). Some firmware
10420 * messages may fail as we proceed to close. We set the
10421 * ABORT_ERR flag here so that the FW reset thread will later
10422 * abort when it gets the rtnl_lock() and sees the flag.
10423 */
10424 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10425 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10426 }
10427
86e953db
MC
10428#ifdef CONFIG_BNXT_SRIOV
10429 if (bp->sriov_cfg) {
10430 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10431 !bp->sriov_cfg,
10432 BNXT_SRIOV_CFG_WAIT_TMO);
10433 if (rc)
10434 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
10435 }
10436#endif
10437 __bnxt_close_nic(bp, irq_re_init, link_re_init);
c0c050c5
MC
10438 return rc;
10439}
10440
10441static int bnxt_close(struct net_device *dev)
10442{
10443 struct bnxt *bp = netdev_priv(dev);
10444
cde49a42 10445 bnxt_hwmon_close(bp);
c0c050c5 10446 bnxt_close_nic(bp, true, true);
33f7d55f 10447 bnxt_hwrm_shutdown_link(bp);
25e1acd6 10448 bnxt_hwrm_if_change(bp, false);
c0c050c5
MC
10449 return 0;
10450}
10451
0ca12be9
VV
10452static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10453 u16 *val)
10454{
bbf33d1d
EP
10455 struct hwrm_port_phy_mdio_read_output *resp;
10456 struct hwrm_port_phy_mdio_read_input *req;
0ca12be9
VV
10457 int rc;
10458
10459 if (bp->hwrm_spec_code < 0x10a00)
10460 return -EOPNOTSUPP;
10461
bbf33d1d
EP
10462 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
10463 if (rc)
10464 return rc;
10465
10466 req->port_id = cpu_to_le16(bp->pf.port_id);
10467 req->phy_addr = phy_addr;
10468 req->reg_addr = cpu_to_le16(reg & 0x1f);
2730214d 10469 if (mdio_phy_id_is_c45(phy_addr)) {
bbf33d1d
EP
10470 req->cl45_mdio = 1;
10471 req->phy_addr = mdio_phy_id_prtad(phy_addr);
10472 req->dev_addr = mdio_phy_id_devad(phy_addr);
10473 req->reg_addr = cpu_to_le16(reg);
0ca12be9
VV
10474 }
10475
bbf33d1d
EP
10476 resp = hwrm_req_hold(bp, req);
10477 rc = hwrm_req_send(bp, req);
0ca12be9
VV
10478 if (!rc)
10479 *val = le16_to_cpu(resp->reg_data);
bbf33d1d 10480 hwrm_req_drop(bp, req);
0ca12be9
VV
10481 return rc;
10482}
10483
10484static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10485 u16 val)
10486{
bbf33d1d
EP
10487 struct hwrm_port_phy_mdio_write_input *req;
10488 int rc;
0ca12be9
VV
10489
10490 if (bp->hwrm_spec_code < 0x10a00)
10491 return -EOPNOTSUPP;
10492
bbf33d1d
EP
10493 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
10494 if (rc)
10495 return rc;
10496
10497 req->port_id = cpu_to_le16(bp->pf.port_id);
10498 req->phy_addr = phy_addr;
10499 req->reg_addr = cpu_to_le16(reg & 0x1f);
2730214d 10500 if (mdio_phy_id_is_c45(phy_addr)) {
bbf33d1d
EP
10501 req->cl45_mdio = 1;
10502 req->phy_addr = mdio_phy_id_prtad(phy_addr);
10503 req->dev_addr = mdio_phy_id_devad(phy_addr);
10504 req->reg_addr = cpu_to_le16(reg);
0ca12be9 10505 }
bbf33d1d 10506 req->reg_data = cpu_to_le16(val);
0ca12be9 10507
bbf33d1d 10508 return hwrm_req_send(bp, req);
0ca12be9
VV
10509}
10510
c0c050c5
MC
10511/* rtnl_lock held */
10512static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10513{
0ca12be9
VV
10514 struct mii_ioctl_data *mdio = if_mii(ifr);
10515 struct bnxt *bp = netdev_priv(dev);
10516 int rc;
10517
c0c050c5
MC
10518 switch (cmd) {
10519 case SIOCGMIIPHY:
0ca12be9
VV
10520 mdio->phy_id = bp->link_info.phy_addr;
10521
df561f66 10522 fallthrough;
c0c050c5 10523 case SIOCGMIIREG: {
0ca12be9
VV
10524 u16 mii_regval = 0;
10525
c0c050c5
MC
10526 if (!netif_running(dev))
10527 return -EAGAIN;
10528
0ca12be9
VV
10529 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10530 &mii_regval);
10531 mdio->val_out = mii_regval;
10532 return rc;
c0c050c5
MC
10533 }
10534
10535 case SIOCSMIIREG:
10536 if (!netif_running(dev))
10537 return -EAGAIN;
10538
0ca12be9
VV
10539 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10540 mdio->val_in);
c0c050c5 10541
118612d5
MC
10542 case SIOCSHWTSTAMP:
10543 return bnxt_hwtstamp_set(dev, ifr);
10544
10545 case SIOCGHWTSTAMP:
10546 return bnxt_hwtstamp_get(dev, ifr);
10547
c0c050c5
MC
10548 default:
10549 /* do nothing */
10550 break;
10551 }
10552 return -EOPNOTSUPP;
10553}
10554
b8875ca3
MC
10555static void bnxt_get_ring_stats(struct bnxt *bp,
10556 struct rtnl_link_stats64 *stats)
c0c050c5 10557{
b8875ca3 10558 int i;
c0c050c5 10559
c0c050c5
MC
10560 for (i = 0; i < bp->cp_nr_rings; i++) {
10561 struct bnxt_napi *bnapi = bp->bnapi[i];
10562 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
a0c30621 10563 u64 *sw = cpr->stats.sw_stats;
c0c050c5 10564
a0c30621
MC
10565 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
10566 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10567 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
c0c050c5 10568
a0c30621
MC
10569 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
10570 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
10571 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
c0c050c5 10572
a0c30621
MC
10573 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
10574 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
10575 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
c0c050c5 10576
a0c30621
MC
10577 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
10578 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
10579 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
c0c050c5
MC
10580
10581 stats->rx_missed_errors +=
a0c30621 10582 BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
c0c050c5 10583
a0c30621 10584 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
c0c050c5 10585
a0c30621 10586 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
40bedf7c 10587
907fd4a2
JK
10588 stats->rx_dropped +=
10589 cpr->sw_stats.rx.rx_netpoll_discards +
10590 cpr->sw_stats.rx.rx_oom_discards;
c0c050c5 10591 }
b8875ca3
MC
10592}
10593
10594static void bnxt_add_prev_stats(struct bnxt *bp,
10595 struct rtnl_link_stats64 *stats)
10596{
10597 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10598
10599 stats->rx_packets += prev_stats->rx_packets;
10600 stats->tx_packets += prev_stats->tx_packets;
10601 stats->rx_bytes += prev_stats->rx_bytes;
10602 stats->tx_bytes += prev_stats->tx_bytes;
10603 stats->rx_missed_errors += prev_stats->rx_missed_errors;
10604 stats->multicast += prev_stats->multicast;
40bedf7c 10605 stats->rx_dropped += prev_stats->rx_dropped;
b8875ca3
MC
10606 stats->tx_dropped += prev_stats->tx_dropped;
10607}
10608
10609static void
10610bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
10611{
10612 struct bnxt *bp = netdev_priv(dev);
10613
10614 set_bit(BNXT_STATE_READ_STATS, &bp->state);
10615 /* Make sure bnxt_close_nic() sees that we are reading stats before
10616 * we check the BNXT_STATE_OPEN flag.
10617 */
10618 smp_mb__after_atomic();
10619 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10620 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10621 *stats = bp->net_stats_prev;
10622 return;
10623 }
10624
10625 bnxt_get_ring_stats(bp, stats);
10626 bnxt_add_prev_stats(bp, stats);
c0c050c5 10627
9947f83f 10628 if (bp->flags & BNXT_FLAG_PORT_STATS) {
a0c30621
MC
10629 u64 *rx = bp->port_stats.sw_stats;
10630 u64 *tx = bp->port_stats.sw_stats +
10631 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10632
10633 stats->rx_crc_errors =
10634 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
10635 stats->rx_frame_errors =
10636 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
10637 stats->rx_length_errors =
10638 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
10639 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
10640 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
10641 stats->rx_errors =
10642 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
10643 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
10644 stats->collisions =
10645 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
10646 stats->tx_fifo_errors =
10647 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
10648 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
9947f83f 10649 }
f9b76ebd 10650 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
c0c050c5
MC
10651}
10652
10653static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10654{
10655 struct net_device *dev = bp->dev;
10656 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10657 struct netdev_hw_addr *ha;
10658 u8 *haddr;
10659 int mc_count = 0;
10660 bool update = false;
10661 int off = 0;
10662
10663 netdev_for_each_mc_addr(ha, dev) {
10664 if (mc_count >= BNXT_MAX_MC_ADDRS) {
10665 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10666 vnic->mc_list_count = 0;
10667 return false;
10668 }
10669 haddr = ha->addr;
10670 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
10671 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
10672 update = true;
10673 }
10674 off += ETH_ALEN;
10675 mc_count++;
10676 }
10677 if (mc_count)
10678 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
10679
10680 if (mc_count != vnic->mc_list_count) {
10681 vnic->mc_list_count = mc_count;
10682 update = true;
10683 }
10684 return update;
10685}
10686
10687static bool bnxt_uc_list_updated(struct bnxt *bp)
10688{
10689 struct net_device *dev = bp->dev;
10690 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10691 struct netdev_hw_addr *ha;
10692 int off = 0;
10693
10694 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
10695 return true;
10696
10697 netdev_for_each_uc_addr(ha, dev) {
10698 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
10699 return true;
10700
10701 off += ETH_ALEN;
10702 }
10703 return false;
10704}
10705
10706static void bnxt_set_rx_mode(struct net_device *dev)
10707{
10708 struct bnxt *bp = netdev_priv(dev);
268d0895 10709 struct bnxt_vnic_info *vnic;
c0c050c5
MC
10710 bool mc_update = false;
10711 bool uc_update;
268d0895 10712 u32 mask;
c0c050c5 10713
268d0895 10714 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
c0c050c5
MC
10715 return;
10716
268d0895
MC
10717 vnic = &bp->vnic_info[0];
10718 mask = vnic->rx_mask;
c0c050c5
MC
10719 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
10720 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
30e33848
MC
10721 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
10722 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
c0c050c5 10723
dd85fc0a 10724 if (dev->flags & IFF_PROMISC)
c0c050c5
MC
10725 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10726
10727 uc_update = bnxt_uc_list_updated(bp);
10728
30e33848
MC
10729 if (dev->flags & IFF_BROADCAST)
10730 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
c0c050c5
MC
10731 if (dev->flags & IFF_ALLMULTI) {
10732 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10733 vnic->mc_list_count = 0;
10734 } else {
10735 mc_update = bnxt_mc_list_updated(bp, &mask);
10736 }
10737
10738 if (mask != vnic->rx_mask || uc_update || mc_update) {
10739 vnic->rx_mask = mask;
10740
10741 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
c213eae8 10742 bnxt_queue_sp_work(bp);
c0c050c5
MC
10743 }
10744}
10745
b664f008 10746static int bnxt_cfg_rx_mode(struct bnxt *bp)
c0c050c5
MC
10747{
10748 struct net_device *dev = bp->dev;
10749 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
bbf33d1d 10750 struct hwrm_cfa_l2_filter_free_input *req;
c0c050c5
MC
10751 struct netdev_hw_addr *ha;
10752 int i, off = 0, rc;
10753 bool uc_update;
10754
10755 netif_addr_lock_bh(dev);
10756 uc_update = bnxt_uc_list_updated(bp);
10757 netif_addr_unlock_bh(dev);
10758
10759 if (!uc_update)
10760 goto skip_uc;
10761
bbf33d1d
EP
10762 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
10763 if (rc)
10764 return rc;
10765 hwrm_req_hold(bp, req);
c0c050c5 10766 for (i = 1; i < vnic->uc_filter_count; i++) {
bbf33d1d 10767 req->l2_filter_id = vnic->fw_l2_filter_id[i];
c0c050c5 10768
bbf33d1d 10769 rc = hwrm_req_send(bp, req);
c0c050c5 10770 }
bbf33d1d 10771 hwrm_req_drop(bp, req);
c0c050c5
MC
10772
10773 vnic->uc_filter_count = 1;
10774
10775 netif_addr_lock_bh(dev);
10776 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
10777 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10778 } else {
10779 netdev_for_each_uc_addr(ha, dev) {
10780 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
10781 off += ETH_ALEN;
10782 vnic->uc_filter_count++;
10783 }
10784 }
10785 netif_addr_unlock_bh(dev);
10786
10787 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
10788 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
10789 if (rc) {
10790 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
10791 rc);
10792 vnic->uc_filter_count = i;
b664f008 10793 return rc;
c0c050c5
MC
10794 }
10795 }
10796
10797skip_uc:
dd85fc0a
EP
10798 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
10799 !bnxt_promisc_ok(bp))
10800 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
c0c050c5 10801 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
b4e30e8e
MC
10802 if (rc && vnic->mc_list_count) {
10803 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
10804 rc);
10805 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10806 vnic->mc_list_count = 0;
10807 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10808 }
c0c050c5 10809 if (rc)
b4e30e8e 10810 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
c0c050c5 10811 rc);
b664f008
MC
10812
10813 return rc;
c0c050c5
MC
10814}
10815
2773dfb2
MC
10816static bool bnxt_can_reserve_rings(struct bnxt *bp)
10817{
10818#ifdef CONFIG_BNXT_SRIOV
f1ca94de 10819 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
2773dfb2
MC
10820 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10821
10822 /* No minimum rings were provisioned by the PF. Don't
10823 * reserve rings by default when device is down.
10824 */
10825 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
10826 return true;
10827
10828 if (!netif_running(bp->dev))
10829 return false;
10830 }
10831#endif
10832 return true;
10833}
10834
8079e8f1
MC
10835/* If the chip and firmware supports RFS */
10836static bool bnxt_rfs_supported(struct bnxt *bp)
10837{
e969ae5b 10838 if (bp->flags & BNXT_FLAG_CHIP_P5) {
41136ab3 10839 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
e969ae5b 10840 return true;
41e8d798 10841 return false;
e969ae5b 10842 }
976e52b7
MC
10843 /* 212 firmware is broken for aRFS */
10844 if (BNXT_FW_MAJ(bp) == 212)
10845 return false;
8079e8f1
MC
10846 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
10847 return true;
ae10ae74
MC
10848 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10849 return true;
8079e8f1
MC
10850 return false;
10851}
10852
10853/* If runtime conditions support RFS */
2bcfa6f6
MC
10854static bool bnxt_rfs_capable(struct bnxt *bp)
10855{
10856#ifdef CONFIG_RFS_ACCEL
8079e8f1 10857 int vnics, max_vnics, max_rss_ctxs;
2bcfa6f6 10858
41e8d798 10859 if (bp->flags & BNXT_FLAG_CHIP_P5)
ac33906c 10860 return bnxt_rfs_supported(bp);
2773dfb2 10861 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
2bcfa6f6
MC
10862 return false;
10863
10864 vnics = 1 + bp->rx_nr_rings;
8079e8f1
MC
10865 max_vnics = bnxt_get_max_func_vnics(bp);
10866 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
ae10ae74
MC
10867
10868 /* RSS contexts not a limiting factor */
10869 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10870 max_rss_ctxs = max_vnics;
8079e8f1 10871 if (vnics > max_vnics || vnics > max_rss_ctxs) {
6a1eef5b
MC
10872 if (bp->rx_nr_rings > 1)
10873 netdev_warn(bp->dev,
10874 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
10875 min(max_rss_ctxs - 1, max_vnics - 1));
2bcfa6f6 10876 return false;
a2304909 10877 }
2bcfa6f6 10878
f1ca94de 10879 if (!BNXT_NEW_RM(bp))
6a1eef5b
MC
10880 return true;
10881
10882 if (vnics == bp->hw_resc.resv_vnics)
10883 return true;
10884
780baad4 10885 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
6a1eef5b
MC
10886 if (vnics <= bp->hw_resc.resv_vnics)
10887 return true;
10888
10889 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
780baad4 10890 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
6a1eef5b 10891 return false;
2bcfa6f6
MC
10892#else
10893 return false;
10894#endif
10895}
10896
c0c050c5
MC
10897static netdev_features_t bnxt_fix_features(struct net_device *dev,
10898 netdev_features_t features)
10899{
2bcfa6f6 10900 struct bnxt *bp = netdev_priv(dev);
c72cb303 10901 netdev_features_t vlan_features;
2bcfa6f6 10902
a2304909 10903 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
2bcfa6f6 10904 features &= ~NETIF_F_NTUPLE;
5a9f6b23 10905
1054aee8
MC
10906 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10907 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10908
10909 if (!(features & NETIF_F_GRO))
10910 features &= ~NETIF_F_GRO_HW;
10911
10912 if (features & NETIF_F_GRO_HW)
10913 features &= ~NETIF_F_LRO;
10914
5a9f6b23
MC
10915 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
10916 * turned on or off together.
10917 */
a196e96b
EP
10918 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
10919 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
10920 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10921 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
c72cb303 10922 else if (vlan_features)
a196e96b 10923 features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
5a9f6b23 10924 }
cf6645f8 10925#ifdef CONFIG_BNXT_SRIOV
a196e96b
EP
10926 if (BNXT_VF(bp) && bp->vf.vlan)
10927 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
cf6645f8 10928#endif
c0c050c5
MC
10929 return features;
10930}
10931
10932static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
10933{
10934 struct bnxt *bp = netdev_priv(dev);
10935 u32 flags = bp->flags;
10936 u32 changes;
10937 int rc = 0;
10938 bool re_init = false;
10939 bool update_tpa = false;
10940
10941 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
1054aee8 10942 if (features & NETIF_F_GRO_HW)
c0c050c5 10943 flags |= BNXT_FLAG_GRO;
1054aee8 10944 else if (features & NETIF_F_LRO)
c0c050c5
MC
10945 flags |= BNXT_FLAG_LRO;
10946
bdbd1eb5
MC
10947 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10948 flags &= ~BNXT_FLAG_TPA;
10949
a196e96b 10950 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
c0c050c5
MC
10951 flags |= BNXT_FLAG_STRIP_VLAN;
10952
10953 if (features & NETIF_F_NTUPLE)
10954 flags |= BNXT_FLAG_RFS;
10955
10956 changes = flags ^ bp->flags;
10957 if (changes & BNXT_FLAG_TPA) {
10958 update_tpa = true;
10959 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
f45b7b78
MC
10960 (flags & BNXT_FLAG_TPA) == 0 ||
10961 (bp->flags & BNXT_FLAG_CHIP_P5))
c0c050c5
MC
10962 re_init = true;
10963 }
10964
10965 if (changes & ~BNXT_FLAG_TPA)
10966 re_init = true;
10967
10968 if (flags != bp->flags) {
10969 u32 old_flags = bp->flags;
10970
2bcfa6f6 10971 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
f45b7b78 10972 bp->flags = flags;
c0c050c5
MC
10973 if (update_tpa)
10974 bnxt_set_ring_params(bp);
10975 return rc;
10976 }
10977
10978 if (re_init) {
10979 bnxt_close_nic(bp, false, false);
f45b7b78 10980 bp->flags = flags;
c0c050c5
MC
10981 if (update_tpa)
10982 bnxt_set_ring_params(bp);
10983
10984 return bnxt_open_nic(bp, false, false);
10985 }
10986 if (update_tpa) {
f45b7b78 10987 bp->flags = flags;
c0c050c5
MC
10988 rc = bnxt_set_tpa(bp,
10989 (flags & BNXT_FLAG_TPA) ?
10990 true : false);
10991 if (rc)
10992 bp->flags = old_flags;
10993 }
10994 }
10995 return rc;
10996}
10997
aa473d6c
MC
10998static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
10999 u8 **nextp)
11000{
11001 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
11002 int hdr_count = 0;
11003 u8 *nexthdr;
11004 int start;
11005
11006 /* Check that there are at most 2 IPv6 extension headers, no
11007 * fragment header, and each is <= 64 bytes.
11008 */
11009 start = nw_off + sizeof(*ip6h);
11010 nexthdr = &ip6h->nexthdr;
11011 while (ipv6_ext_hdr(*nexthdr)) {
11012 struct ipv6_opt_hdr *hp;
11013 int hdrlen;
11014
11015 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
11016 *nexthdr == NEXTHDR_FRAGMENT)
11017 return false;
11018 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
11019 skb_headlen(skb), NULL);
11020 if (!hp)
11021 return false;
11022 if (*nexthdr == NEXTHDR_AUTH)
11023 hdrlen = ipv6_authlen(hp);
11024 else
11025 hdrlen = ipv6_optlen(hp);
11026
11027 if (hdrlen > 64)
11028 return false;
11029 nexthdr = &hp->nexthdr;
11030 start += hdrlen;
11031 hdr_count++;
11032 }
11033 if (nextp) {
11034 /* Caller will check inner protocol */
11035 if (skb->encapsulation) {
11036 *nextp = nexthdr;
11037 return true;
11038 }
11039 *nextp = NULL;
11040 }
11041 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
11042 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
11043}
11044
11045/* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
11046static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
11047{
11048 struct udphdr *uh = udp_hdr(skb);
11049 __be16 udp_port = uh->dest;
11050
11051 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
11052 return false;
11053 if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
11054 struct ethhdr *eh = inner_eth_hdr(skb);
11055
11056 switch (eh->h_proto) {
11057 case htons(ETH_P_IP):
11058 return true;
11059 case htons(ETH_P_IPV6):
11060 return bnxt_exthdr_check(bp, skb,
11061 skb_inner_network_offset(skb),
11062 NULL);
11063 }
11064 }
11065 return false;
11066}
11067
11068static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
11069{
11070 switch (l4_proto) {
11071 case IPPROTO_UDP:
11072 return bnxt_udp_tunl_check(bp, skb);
11073 case IPPROTO_IPIP:
11074 return true;
11075 case IPPROTO_GRE: {
11076 switch (skb->inner_protocol) {
11077 default:
11078 return false;
11079 case htons(ETH_P_IP):
11080 return true;
11081 case htons(ETH_P_IPV6):
11082 fallthrough;
11083 }
11084 }
11085 case IPPROTO_IPV6:
11086 /* Check ext headers of inner ipv6 */
11087 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
11088 NULL);
11089 }
11090 return false;
11091}
11092
1698d600
MC
11093static netdev_features_t bnxt_features_check(struct sk_buff *skb,
11094 struct net_device *dev,
11095 netdev_features_t features)
11096{
aa473d6c
MC
11097 struct bnxt *bp = netdev_priv(dev);
11098 u8 *l4_proto;
1698d600
MC
11099
11100 features = vlan_features_check(skb, features);
1698d600
MC
11101 switch (vlan_get_protocol(skb)) {
11102 case htons(ETH_P_IP):
aa473d6c
MC
11103 if (!skb->encapsulation)
11104 return features;
11105 l4_proto = &ip_hdr(skb)->protocol;
11106 if (bnxt_tunl_check(bp, skb, *l4_proto))
11107 return features;
1698d600
MC
11108 break;
11109 case htons(ETH_P_IPV6):
aa473d6c
MC
11110 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
11111 &l4_proto))
11112 break;
11113 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
11114 return features;
1698d600 11115 break;
1698d600 11116 }
1698d600
MC
11117 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11118}
11119
b5d600b0
VV
11120int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
11121 u32 *reg_buf)
11122{
bbf33d1d
EP
11123 struct hwrm_dbg_read_direct_output *resp;
11124 struct hwrm_dbg_read_direct_input *req;
b5d600b0
VV
11125 __le32 *dbg_reg_buf;
11126 dma_addr_t mapping;
11127 int rc, i;
11128
bbf33d1d
EP
11129 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
11130 if (rc)
11131 return rc;
11132
11133 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
11134 &mapping);
11135 if (!dbg_reg_buf) {
11136 rc = -ENOMEM;
11137 goto dbg_rd_reg_exit;
11138 }
11139
11140 req->host_dest_addr = cpu_to_le64(mapping);
11141
11142 resp = hwrm_req_hold(bp, req);
11143 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
11144 req->read_len32 = cpu_to_le32(num_words);
11145
11146 rc = hwrm_req_send(bp, req);
b5d600b0
VV
11147 if (rc || resp->error_code) {
11148 rc = -EIO;
11149 goto dbg_rd_reg_exit;
11150 }
11151 for (i = 0; i < num_words; i++)
11152 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
11153
11154dbg_rd_reg_exit:
bbf33d1d 11155 hwrm_req_drop(bp, req);
b5d600b0
VV
11156 return rc;
11157}
11158
ffd77621
MC
11159static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
11160 u32 ring_id, u32 *prod, u32 *cons)
11161{
bbf33d1d
EP
11162 struct hwrm_dbg_ring_info_get_output *resp;
11163 struct hwrm_dbg_ring_info_get_input *req;
ffd77621
MC
11164 int rc;
11165
bbf33d1d
EP
11166 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
11167 if (rc)
11168 return rc;
11169
11170 req->ring_type = ring_type;
11171 req->fw_ring_id = cpu_to_le32(ring_id);
11172 resp = hwrm_req_hold(bp, req);
11173 rc = hwrm_req_send(bp, req);
ffd77621
MC
11174 if (!rc) {
11175 *prod = le32_to_cpu(resp->producer_index);
11176 *cons = le32_to_cpu(resp->consumer_index);
11177 }
bbf33d1d 11178 hwrm_req_drop(bp, req);
ffd77621
MC
11179 return rc;
11180}
11181
9f554590
MC
11182static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
11183{
b6ab4b01 11184 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
9f554590
MC
11185 int i = bnapi->index;
11186
3b2b7d9d
MC
11187 if (!txr)
11188 return;
11189
9f554590
MC
11190 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
11191 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
11192 txr->tx_cons);
11193}
11194
11195static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
11196{
b6ab4b01 11197 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
9f554590
MC
11198 int i = bnapi->index;
11199
3b2b7d9d
MC
11200 if (!rxr)
11201 return;
11202
9f554590
MC
11203 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
11204 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
11205 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
11206 rxr->rx_sw_agg_prod);
11207}
11208
11209static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
11210{
11211 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
11212 int i = bnapi->index;
11213
11214 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
11215 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
11216}
11217
c0c050c5
MC
11218static void bnxt_dbg_dump_states(struct bnxt *bp)
11219{
11220 int i;
11221 struct bnxt_napi *bnapi;
c0c050c5
MC
11222
11223 for (i = 0; i < bp->cp_nr_rings; i++) {
11224 bnapi = bp->bnapi[i];
c0c050c5 11225 if (netif_msg_drv(bp)) {
9f554590
MC
11226 bnxt_dump_tx_sw_state(bnapi);
11227 bnxt_dump_rx_sw_state(bnapi);
11228 bnxt_dump_cp_sw_state(bnapi);
c0c050c5
MC
11229 }
11230 }
11231}
11232
8fbf58e1
MC
11233static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
11234{
11235 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
bbf33d1d 11236 struct hwrm_ring_reset_input *req;
8fbf58e1
MC
11237 struct bnxt_napi *bnapi = rxr->bnapi;
11238 struct bnxt_cp_ring_info *cpr;
11239 u16 cp_ring_id;
bbf33d1d
EP
11240 int rc;
11241
11242 rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
11243 if (rc)
11244 return rc;
8fbf58e1
MC
11245
11246 cpr = &bnapi->cp_ring;
11247 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
bbf33d1d
EP
11248 req->cmpl_ring = cpu_to_le16(cp_ring_id);
11249 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
11250 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
11251 return hwrm_req_send_silent(bp, req);
8fbf58e1
MC
11252}
11253
6988bd92 11254static void bnxt_reset_task(struct bnxt *bp, bool silent)
c0c050c5 11255{
6988bd92
MC
11256 if (!silent)
11257 bnxt_dbg_dump_states(bp);
028de140 11258 if (netif_running(bp->dev)) {
b386cd36
MC
11259 int rc;
11260
aa46dfff
VV
11261 if (silent) {
11262 bnxt_close_nic(bp, false, false);
11263 bnxt_open_nic(bp, false, false);
11264 } else {
b386cd36 11265 bnxt_ulp_stop(bp);
aa46dfff
VV
11266 bnxt_close_nic(bp, true, false);
11267 rc = bnxt_open_nic(bp, true, false);
11268 bnxt_ulp_start(bp, rc);
11269 }
028de140 11270 }
c0c050c5
MC
11271}
11272
0290bd29 11273static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
c0c050c5
MC
11274{
11275 struct bnxt *bp = netdev_priv(dev);
11276
11277 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
11278 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
c213eae8 11279 bnxt_queue_sp_work(bp);
c0c050c5
MC
11280}
11281
acfb50e4
VV
11282static void bnxt_fw_health_check(struct bnxt *bp)
11283{
11284 struct bnxt_fw_health *fw_health = bp->fw_health;
11285 u32 val;
11286
0797c10d 11287 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
acfb50e4
VV
11288 return;
11289
1b2b9183
MC
11290 /* Make sure it is enabled before checking the tmr_counter. */
11291 smp_rmb();
acfb50e4
VV
11292 if (fw_health->tmr_counter) {
11293 fw_health->tmr_counter--;
11294 return;
11295 }
11296
11297 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11298 if (val == fw_health->last_fw_heartbeat)
11299 goto fw_reset;
11300
11301 fw_health->last_fw_heartbeat = val;
11302
11303 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11304 if (val != fw_health->last_fw_reset_cnt)
11305 goto fw_reset;
11306
11307 fw_health->tmr_counter = fw_health->tmr_multiplier;
11308 return;
11309
11310fw_reset:
11311 set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
11312 bnxt_queue_sp_work(bp);
11313}
11314
e99e88a9 11315static void bnxt_timer(struct timer_list *t)
c0c050c5 11316{
e99e88a9 11317 struct bnxt *bp = from_timer(bp, t, timer);
c0c050c5
MC
11318 struct net_device *dev = bp->dev;
11319
e0009404 11320 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
c0c050c5
MC
11321 return;
11322
11323 if (atomic_read(&bp->intr_sem) != 0)
11324 goto bnxt_restart_timer;
11325
acfb50e4
VV
11326 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
11327 bnxt_fw_health_check(bp);
11328
fea6b333 11329 if (bp->link_info.link_up && bp->stats_coal_ticks) {
3bdf56c4 11330 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
c213eae8 11331 bnxt_queue_sp_work(bp);
3bdf56c4 11332 }
5a84acbe
SP
11333
11334 if (bnxt_tc_flower_enabled(bp)) {
11335 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
11336 bnxt_queue_sp_work(bp);
11337 }
a1ef4a79 11338
87d67f59
PC
11339#ifdef CONFIG_RFS_ACCEL
11340 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
11341 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11342 bnxt_queue_sp_work(bp);
11343 }
11344#endif /*CONFIG_RFS_ACCEL*/
11345
a1ef4a79
MC
11346 if (bp->link_info.phy_retry) {
11347 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
acda6180 11348 bp->link_info.phy_retry = false;
a1ef4a79
MC
11349 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
11350 } else {
11351 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
11352 bnxt_queue_sp_work(bp);
11353 }
11354 }
ffd77621 11355
5313845f
MC
11356 if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
11357 netif_carrier_ok(dev)) {
ffd77621
MC
11358 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
11359 bnxt_queue_sp_work(bp);
11360 }
c0c050c5
MC
11361bnxt_restart_timer:
11362 mod_timer(&bp->timer, jiffies + bp->current_interval);
11363}
11364
a551ee94 11365static void bnxt_rtnl_lock_sp(struct bnxt *bp)
6988bd92 11366{
a551ee94
MC
11367 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
11368 * set. If the device is being closed, bnxt_close() may be holding
6988bd92
MC
11369 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
11370 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
11371 */
11372 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11373 rtnl_lock();
a551ee94
MC
11374}
11375
11376static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
11377{
6988bd92
MC
11378 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11379 rtnl_unlock();
11380}
11381
a551ee94
MC
11382/* Only called from bnxt_sp_task() */
11383static void bnxt_reset(struct bnxt *bp, bool silent)
11384{
11385 bnxt_rtnl_lock_sp(bp);
11386 if (test_bit(BNXT_STATE_OPEN, &bp->state))
11387 bnxt_reset_task(bp, silent);
11388 bnxt_rtnl_unlock_sp(bp);
11389}
11390
8fbf58e1
MC
11391/* Only called from bnxt_sp_task() */
11392static void bnxt_rx_ring_reset(struct bnxt *bp)
11393{
11394 int i;
11395
11396 bnxt_rtnl_lock_sp(bp);
11397 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11398 bnxt_rtnl_unlock_sp(bp);
11399 return;
11400 }
11401 /* Disable and flush TPA before resetting the RX ring */
11402 if (bp->flags & BNXT_FLAG_TPA)
11403 bnxt_set_tpa(bp, false);
11404 for (i = 0; i < bp->rx_nr_rings; i++) {
11405 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
11406 struct bnxt_cp_ring_info *cpr;
11407 int rc;
11408
11409 if (!rxr->bnapi->in_reset)
11410 continue;
11411
11412 rc = bnxt_hwrm_rx_ring_reset(bp, i);
11413 if (rc) {
11414 if (rc == -EINVAL || rc == -EOPNOTSUPP)
11415 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
11416 else
11417 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
11418 rc);
8fb35cd3 11419 bnxt_reset_task(bp, true);
8fbf58e1
MC
11420 break;
11421 }
11422 bnxt_free_one_rx_ring_skbs(bp, i);
11423 rxr->rx_prod = 0;
11424 rxr->rx_agg_prod = 0;
11425 rxr->rx_sw_agg_prod = 0;
11426 rxr->rx_next_cons = 0;
11427 rxr->bnapi->in_reset = false;
11428 bnxt_alloc_one_rx_ring(bp, i);
11429 cpr = &rxr->bnapi->cp_ring;
8a27d4b9 11430 cpr->sw_stats.rx.rx_resets++;
8fbf58e1
MC
11431 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11432 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
11433 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
11434 }
11435 if (bp->flags & BNXT_FLAG_TPA)
11436 bnxt_set_tpa(bp, true);
11437 bnxt_rtnl_unlock_sp(bp);
11438}
11439
230d1f0d
MC
11440static void bnxt_fw_reset_close(struct bnxt *bp)
11441{
f3a6d206 11442 bnxt_ulp_stop(bp);
4f036b2e
MC
11443 /* When firmware is in fatal state, quiesce device and disable
11444 * bus master to prevent any potential bad DMAs before freeing
11445 * kernel memory.
d4073028 11446 */
4f036b2e 11447 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
dab62e7c
MC
11448 u16 val = 0;
11449
11450 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
11451 if (val == 0xffff)
11452 bp->fw_reset_min_dsecs = 0;
4f036b2e
MC
11453 bnxt_tx_disable(bp);
11454 bnxt_disable_napi(bp);
11455 bnxt_disable_int_sync(bp);
11456 bnxt_free_irq(bp);
11457 bnxt_clear_int_mode(bp);
d4073028 11458 pci_disable_device(bp->pdev);
4f036b2e 11459 }
230d1f0d 11460 __bnxt_close_nic(bp, true, false);
ac797ced 11461 bnxt_vf_reps_free(bp);
230d1f0d
MC
11462 bnxt_clear_int_mode(bp);
11463 bnxt_hwrm_func_drv_unrgtr(bp);
d4073028
VV
11464 if (pci_is_enabled(bp->pdev))
11465 pci_disable_device(bp->pdev);
230d1f0d
MC
11466 bnxt_free_ctx_mem(bp);
11467 kfree(bp->ctx);
11468 bp->ctx = NULL;
11469}
11470
acfb50e4
VV
11471static bool is_bnxt_fw_ok(struct bnxt *bp)
11472{
11473 struct bnxt_fw_health *fw_health = bp->fw_health;
11474 bool no_heartbeat = false, has_reset = false;
11475 u32 val;
11476
11477 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11478 if (val == fw_health->last_fw_heartbeat)
11479 no_heartbeat = true;
11480
11481 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11482 if (val != fw_health->last_fw_reset_cnt)
11483 has_reset = true;
11484
11485 if (!no_heartbeat && has_reset)
11486 return true;
11487
11488 return false;
11489}
11490
d1db9e16
MC
11491/* rtnl_lock is acquired before calling this function */
11492static void bnxt_force_fw_reset(struct bnxt *bp)
11493{
11494 struct bnxt_fw_health *fw_health = bp->fw_health;
30e96f48 11495 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
d1db9e16
MC
11496 u32 wait_dsecs;
11497
11498 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
11499 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11500 return;
11501
30e96f48
MC
11502 if (ptp) {
11503 spin_lock_bh(&ptp->ptp_lock);
11504 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11505 spin_unlock_bh(&ptp->ptp_lock);
11506 } else {
11507 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11508 }
d1db9e16
MC
11509 bnxt_fw_reset_close(bp);
11510 wait_dsecs = fw_health->master_func_wait_dsecs;
11511 if (fw_health->master) {
11512 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
11513 wait_dsecs = 0;
11514 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11515 } else {
11516 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
11517 wait_dsecs = fw_health->normal_func_wait_dsecs;
11518 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11519 }
4037eb71
VV
11520
11521 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
d1db9e16
MC
11522 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
11523 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11524}
11525
11526void bnxt_fw_exception(struct bnxt *bp)
11527{
a2b31e27 11528 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
d1db9e16
MC
11529 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11530 bnxt_rtnl_lock_sp(bp);
11531 bnxt_force_fw_reset(bp);
11532 bnxt_rtnl_unlock_sp(bp);
11533}
11534
e72cb7d6
MC
11535/* Returns the number of registered VFs, or 1 if VF configuration is pending, or
11536 * < 0 on error.
11537 */
11538static int bnxt_get_registered_vfs(struct bnxt *bp)
230d1f0d 11539{
e72cb7d6 11540#ifdef CONFIG_BNXT_SRIOV
230d1f0d
MC
11541 int rc;
11542
e72cb7d6
MC
11543 if (!BNXT_PF(bp))
11544 return 0;
11545
11546 rc = bnxt_hwrm_func_qcfg(bp);
11547 if (rc) {
11548 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
11549 return rc;
11550 }
11551 if (bp->pf.registered_vfs)
11552 return bp->pf.registered_vfs;
11553 if (bp->sriov_cfg)
11554 return 1;
11555#endif
11556 return 0;
11557}
11558
11559void bnxt_fw_reset(struct bnxt *bp)
11560{
230d1f0d
MC
11561 bnxt_rtnl_lock_sp(bp);
11562 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11563 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
30e96f48 11564 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
4037eb71 11565 int n = 0, tmo;
e72cb7d6 11566
30e96f48
MC
11567 if (ptp) {
11568 spin_lock_bh(&ptp->ptp_lock);
11569 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11570 spin_unlock_bh(&ptp->ptp_lock);
11571 } else {
11572 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11573 }
e72cb7d6
MC
11574 if (bp->pf.active_vfs &&
11575 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11576 n = bnxt_get_registered_vfs(bp);
11577 if (n < 0) {
11578 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11579 n);
11580 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11581 dev_close(bp->dev);
11582 goto fw_reset_exit;
11583 } else if (n > 0) {
11584 u16 vf_tmo_dsecs = n * 10;
11585
11586 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11587 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11588 bp->fw_reset_state =
11589 BNXT_FW_RESET_STATE_POLL_VF;
11590 bnxt_queue_fw_reset_work(bp, HZ / 10);
11591 goto fw_reset_exit;
230d1f0d
MC
11592 }
11593 bnxt_fw_reset_close(bp);
4037eb71
VV
11594 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11595 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11596 tmo = HZ / 10;
11597 } else {
11598 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11599 tmo = bp->fw_reset_min_dsecs * HZ / 10;
11600 }
11601 bnxt_queue_fw_reset_work(bp, tmo);
230d1f0d
MC
11602 }
11603fw_reset_exit:
11604 bnxt_rtnl_unlock_sp(bp);
11605}
11606
ffd77621
MC
11607static void bnxt_chk_missed_irq(struct bnxt *bp)
11608{
11609 int i;
11610
11611 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11612 return;
11613
11614 for (i = 0; i < bp->cp_nr_rings; i++) {
11615 struct bnxt_napi *bnapi = bp->bnapi[i];
11616 struct bnxt_cp_ring_info *cpr;
11617 u32 fw_ring_id;
11618 int j;
11619
11620 if (!bnapi)
11621 continue;
11622
11623 cpr = &bnapi->cp_ring;
11624 for (j = 0; j < 2; j++) {
11625 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
11626 u32 val[2];
11627
11628 if (!cpr2 || cpr2->has_more_work ||
11629 !bnxt_has_work(bp, cpr2))
11630 continue;
11631
11632 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
11633 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
11634 continue;
11635 }
11636 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
11637 bnxt_dbg_hwrm_ring_info_get(bp,
11638 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
11639 fw_ring_id, &val[0], &val[1]);
9d8b5f05 11640 cpr->sw_stats.cmn.missed_irqs++;
ffd77621
MC
11641 }
11642 }
11643}
11644
c0c050c5
MC
11645static void bnxt_cfg_ntp_filters(struct bnxt *);
11646
8119e49b
MC
11647static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
11648{
11649 struct bnxt_link_info *link_info = &bp->link_info;
11650
11651 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
11652 link_info->autoneg = BNXT_AUTONEG_SPEED;
11653 if (bp->hwrm_spec_code >= 0x10201) {
11654 if (link_info->auto_pause_setting &
11655 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
11656 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11657 } else {
11658 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11659 }
11660 link_info->advertising = link_info->auto_link_speeds;
d058426e 11661 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
8119e49b
MC
11662 } else {
11663 link_info->req_link_speed = link_info->force_link_speed;
d058426e
EP
11664 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
11665 if (link_info->force_pam4_link_speed) {
11666 link_info->req_link_speed =
11667 link_info->force_pam4_link_speed;
11668 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
11669 }
8119e49b
MC
11670 link_info->req_duplex = link_info->duplex_setting;
11671 }
11672 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
11673 link_info->req_flow_ctrl =
11674 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
11675 else
11676 link_info->req_flow_ctrl = link_info->force_pause_setting;
11677}
11678
df97b34d
MC
11679static void bnxt_fw_echo_reply(struct bnxt *bp)
11680{
11681 struct bnxt_fw_health *fw_health = bp->fw_health;
bbf33d1d
EP
11682 struct hwrm_func_echo_response_input *req;
11683 int rc;
df97b34d 11684
bbf33d1d
EP
11685 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
11686 if (rc)
11687 return;
11688 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
11689 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
11690 hwrm_req_send(bp, req);
df97b34d
MC
11691}
11692
c0c050c5
MC
11693static void bnxt_sp_task(struct work_struct *work)
11694{
11695 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
c0c050c5 11696
4cebdcec
MC
11697 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11698 smp_mb__after_atomic();
11699 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11700 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5 11701 return;
4cebdcec 11702 }
c0c050c5
MC
11703
11704 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
11705 bnxt_cfg_rx_mode(bp);
11706
11707 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
11708 bnxt_cfg_ntp_filters(bp);
c0c050c5
MC
11709 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
11710 bnxt_hwrm_exec_fwd_req(bp);
00db3cba 11711 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
531d1d26
MC
11712 bnxt_hwrm_port_qstats(bp, 0);
11713 bnxt_hwrm_port_qstats_ext(bp, 0);
fea6b333 11714 bnxt_accumulate_all_stats(bp);
00db3cba 11715 }
3bdf56c4 11716
0eaa24b9 11717 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
e2dc9b6e 11718 int rc;
0eaa24b9 11719
e2dc9b6e 11720 mutex_lock(&bp->link_lock);
0eaa24b9
MC
11721 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
11722 &bp->sp_event))
11723 bnxt_hwrm_phy_qcaps(bp);
11724
e2dc9b6e 11725 rc = bnxt_update_link(bp, true);
0eaa24b9
MC
11726 if (rc)
11727 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
11728 rc);
ca0c7538
VV
11729
11730 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
11731 &bp->sp_event))
11732 bnxt_init_ethtool_link_settings(bp);
11733 mutex_unlock(&bp->link_lock);
0eaa24b9 11734 }
a1ef4a79
MC
11735 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
11736 int rc;
11737
11738 mutex_lock(&bp->link_lock);
11739 rc = bnxt_update_phy_setting(bp);
11740 mutex_unlock(&bp->link_lock);
11741 if (rc) {
11742 netdev_warn(bp->dev, "update phy settings retry failed\n");
11743 } else {
11744 bp->link_info.phy_retry = false;
11745 netdev_info(bp->dev, "update phy settings retry succeeded\n");
11746 }
11747 }
90c694bb 11748 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
e2dc9b6e
MC
11749 mutex_lock(&bp->link_lock);
11750 bnxt_get_port_module_status(bp);
11751 mutex_unlock(&bp->link_lock);
90c694bb 11752 }
5a84acbe
SP
11753
11754 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
11755 bnxt_tc_flow_stats_work(bp);
11756
ffd77621
MC
11757 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
11758 bnxt_chk_missed_irq(bp);
11759
df97b34d
MC
11760 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
11761 bnxt_fw_echo_reply(bp);
11762
e2dc9b6e
MC
11763 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
11764 * must be the last functions to be called before exiting.
11765 */
6988bd92
MC
11766 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
11767 bnxt_reset(bp, false);
4cebdcec 11768
fc0f1929
MC
11769 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
11770 bnxt_reset(bp, true);
11771
8fbf58e1
MC
11772 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
11773 bnxt_rx_ring_reset(bp);
11774
657a33c8
VV
11775 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
11776 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
11777
acfb50e4
VV
11778 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
11779 if (!is_bnxt_fw_ok(bp))
11780 bnxt_devlink_health_report(bp,
11781 BNXT_FW_EXCEPTION_SP_EVENT);
11782 }
11783
4cebdcec
MC
11784 smp_mb__before_atomic();
11785 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5
MC
11786}
11787
d1e7925e 11788/* Under rtnl_lock */
98fdbe73
MC
11789int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
11790 int tx_xdp)
d1e7925e
MC
11791{
11792 int max_rx, max_tx, tx_sets = 1;
780baad4 11793 int tx_rings_needed, stats;
8f23d638 11794 int rx_rings = rx;
6fc2ffdf 11795 int cp, vnics, rc;
d1e7925e 11796
d1e7925e
MC
11797 if (tcs)
11798 tx_sets = tcs;
11799
11800 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
11801 if (rc)
11802 return rc;
11803
11804 if (max_rx < rx)
11805 return -ENOMEM;
11806
5f449249 11807 tx_rings_needed = tx * tx_sets + tx_xdp;
d1e7925e
MC
11808 if (max_tx < tx_rings_needed)
11809 return -ENOMEM;
11810
6fc2ffdf 11811 vnics = 1;
9b3d15e6 11812 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
6fc2ffdf
EW
11813 vnics += rx_rings;
11814
8f23d638
MC
11815 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11816 rx_rings <<= 1;
11817 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
780baad4
VV
11818 stats = cp;
11819 if (BNXT_NEW_RM(bp)) {
11c3ec7b 11820 cp += bnxt_get_ulp_msix_num(bp);
780baad4
VV
11821 stats += bnxt_get_ulp_stat_ctxs(bp);
11822 }
6fc2ffdf 11823 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
780baad4 11824 stats, vnics);
d1e7925e
MC
11825}
11826
17086399
SP
11827static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
11828{
11829 if (bp->bar2) {
11830 pci_iounmap(pdev, bp->bar2);
11831 bp->bar2 = NULL;
11832 }
11833
11834 if (bp->bar1) {
11835 pci_iounmap(pdev, bp->bar1);
11836 bp->bar1 = NULL;
11837 }
11838
11839 if (bp->bar0) {
11840 pci_iounmap(pdev, bp->bar0);
11841 bp->bar0 = NULL;
11842 }
11843}
11844
11845static void bnxt_cleanup_pci(struct bnxt *bp)
11846{
11847 bnxt_unmap_bars(bp, bp->pdev);
11848 pci_release_regions(bp->pdev);
f6824308
VV
11849 if (pci_is_enabled(bp->pdev))
11850 pci_disable_device(bp->pdev);
17086399
SP
11851}
11852
18775aa8
MC
11853static void bnxt_init_dflt_coal(struct bnxt *bp)
11854{
11855 struct bnxt_coal *coal;
11856
11857 /* Tick values in micro seconds.
11858 * 1 coal_buf x bufs_per_record = 1 completion record.
11859 */
11860 coal = &bp->rx_coal;
0c2ff8d7 11861 coal->coal_ticks = 10;
18775aa8
MC
11862 coal->coal_bufs = 30;
11863 coal->coal_ticks_irq = 1;
11864 coal->coal_bufs_irq = 2;
05abe4dd 11865 coal->idle_thresh = 50;
18775aa8
MC
11866 coal->bufs_per_record = 2;
11867 coal->budget = 64; /* NAPI budget */
11868
11869 coal = &bp->tx_coal;
11870 coal->coal_ticks = 28;
11871 coal->coal_bufs = 30;
11872 coal->coal_ticks_irq = 2;
11873 coal->coal_bufs_irq = 2;
11874 coal->bufs_per_record = 1;
11875
11876 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
11877}
11878
7c380918
MC
11879static int bnxt_fw_init_one_p1(struct bnxt *bp)
11880{
11881 int rc;
11882
11883 bp->fw_cap = 0;
11884 rc = bnxt_hwrm_ver_get(bp);
ba02629f
EP
11885 bnxt_try_map_fw_health_reg(bp);
11886 if (rc) {
b187e4ba
EP
11887 rc = bnxt_try_recover_fw(bp);
11888 if (rc)
11889 return rc;
11890 rc = bnxt_hwrm_ver_get(bp);
87f7ab8d
EP
11891 if (rc)
11892 return rc;
ba02629f 11893 }
7c380918 11894
4933f675
VV
11895 bnxt_nvm_cfg_ver_get(bp);
11896
7c380918
MC
11897 rc = bnxt_hwrm_func_reset(bp);
11898 if (rc)
11899 return -ENODEV;
11900
11901 bnxt_hwrm_fw_set_time(bp);
11902 return 0;
11903}
11904
11905static int bnxt_fw_init_one_p2(struct bnxt *bp)
11906{
11907 int rc;
11908
11909 /* Get the MAX capabilities for this function */
11910 rc = bnxt_hwrm_func_qcaps(bp);
11911 if (rc) {
11912 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
11913 rc);
11914 return -ENODEV;
11915 }
11916
11917 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
11918 if (rc)
11919 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
11920 rc);
11921
3e9ec2bb
EP
11922 if (bnxt_alloc_fw_health(bp)) {
11923 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
11924 } else {
11925 rc = bnxt_hwrm_error_recovery_qcfg(bp);
11926 if (rc)
11927 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
11928 rc);
11929 }
07f83d72 11930
2e882468 11931 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
7c380918
MC
11932 if (rc)
11933 return -ENODEV;
11934
11935 bnxt_hwrm_func_qcfg(bp);
11936 bnxt_hwrm_vnic_qcaps(bp);
11937 bnxt_hwrm_port_led_qcaps(bp);
11938 bnxt_ethtool_init(bp);
11939 bnxt_dcb_init(bp);
11940 return 0;
11941}
11942
ba642ab7
MC
11943static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
11944{
11945 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
11946 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
11947 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
11948 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
11949 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
c66c06c5 11950 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
ba642ab7
MC
11951 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
11952 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
11953 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
11954 }
11955}
11956
11957static void bnxt_set_dflt_rfs(struct bnxt *bp)
11958{
11959 struct net_device *dev = bp->dev;
11960
11961 dev->hw_features &= ~NETIF_F_NTUPLE;
11962 dev->features &= ~NETIF_F_NTUPLE;
11963 bp->flags &= ~BNXT_FLAG_RFS;
11964 if (bnxt_rfs_supported(bp)) {
11965 dev->hw_features |= NETIF_F_NTUPLE;
11966 if (bnxt_rfs_capable(bp)) {
11967 bp->flags |= BNXT_FLAG_RFS;
11968 dev->features |= NETIF_F_NTUPLE;
11969 }
11970 }
11971}
11972
11973static void bnxt_fw_init_one_p3(struct bnxt *bp)
11974{
11975 struct pci_dev *pdev = bp->pdev;
11976
11977 bnxt_set_dflt_rss_hash_type(bp);
11978 bnxt_set_dflt_rfs(bp);
11979
11980 bnxt_get_wol_settings(bp);
11981 if (bp->flags & BNXT_FLAG_WOL_CAP)
11982 device_set_wakeup_enable(&pdev->dev, bp->wol);
11983 else
11984 device_set_wakeup_capable(&pdev->dev, false);
11985
11986 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
11987 bnxt_hwrm_coal_params_qcaps(bp);
11988}
11989
0afd6a4e
MC
11990static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
11991
ec5d31e3
MC
11992static int bnxt_fw_init_one(struct bnxt *bp)
11993{
11994 int rc;
11995
11996 rc = bnxt_fw_init_one_p1(bp);
11997 if (rc) {
11998 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
11999 return rc;
12000 }
12001 rc = bnxt_fw_init_one_p2(bp);
12002 if (rc) {
12003 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
12004 return rc;
12005 }
0afd6a4e
MC
12006 rc = bnxt_probe_phy(bp, false);
12007 if (rc)
12008 return rc;
ec5d31e3
MC
12009 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
12010 if (rc)
12011 return rc;
937f188c
VV
12012
12013 /* In case fw capabilities have changed, destroy the unneeded
12014 * reporters and create newly capable ones.
12015 */
12016 bnxt_dl_fw_reporters_destroy(bp, false);
12017 bnxt_dl_fw_reporters_create(bp);
ec5d31e3
MC
12018 bnxt_fw_init_one_p3(bp);
12019 return 0;
12020}
12021
cbb51067
MC
12022static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
12023{
12024 struct bnxt_fw_health *fw_health = bp->fw_health;
12025 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
12026 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
12027 u32 reg_type, reg_off, delay_msecs;
12028
12029 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
12030 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
12031 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
12032 switch (reg_type) {
12033 case BNXT_FW_HEALTH_REG_TYPE_CFG:
12034 pci_write_config_dword(bp->pdev, reg_off, val);
12035 break;
12036 case BNXT_FW_HEALTH_REG_TYPE_GRC:
12037 writel(reg_off & BNXT_GRC_BASE_MASK,
12038 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
12039 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
df561f66 12040 fallthrough;
cbb51067
MC
12041 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
12042 writel(val, bp->bar0 + reg_off);
12043 break;
12044 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
12045 writel(val, bp->bar1 + reg_off);
12046 break;
12047 }
12048 if (delay_msecs) {
12049 pci_read_config_dword(bp->pdev, 0, &val);
12050 msleep(delay_msecs);
12051 }
12052}
12053
12054static void bnxt_reset_all(struct bnxt *bp)
12055{
12056 struct bnxt_fw_health *fw_health = bp->fw_health;
e07ab202
VV
12057 int i, rc;
12058
12059 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
87f7ab8d 12060 bnxt_fw_reset_via_optee(bp);
e07ab202 12061 bp->fw_reset_timestamp = jiffies;
e07ab202
VV
12062 return;
12063 }
cbb51067
MC
12064
12065 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
12066 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
12067 bnxt_fw_reset_writel(bp, i);
12068 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
bbf33d1d
EP
12069 struct hwrm_fw_reset_input *req;
12070
12071 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
12072 if (!rc) {
12073 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
12074 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
12075 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
12076 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
12077 rc = hwrm_req_send(bp, req);
12078 }
a2f3835c 12079 if (rc != -ENODEV)
cbb51067
MC
12080 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
12081 }
12082 bp->fw_reset_timestamp = jiffies;
12083}
12084
339eeb4b
MC
12085static bool bnxt_fw_reset_timeout(struct bnxt *bp)
12086{
12087 return time_after(jiffies, bp->fw_reset_timestamp +
12088 (bp->fw_reset_max_dsecs * HZ / 10));
12089}
12090
3958b1da
SK
12091static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
12092{
12093 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12094 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
12095 bnxt_ulp_start(bp, rc);
12096 bnxt_dl_health_status_update(bp, false);
12097 }
12098 bp->fw_reset_state = 0;
12099 dev_close(bp->dev);
12100}
12101
230d1f0d
MC
12102static void bnxt_fw_reset_task(struct work_struct *work)
12103{
12104 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
3958b1da 12105 int rc = 0;
230d1f0d
MC
12106
12107 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12108 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
12109 return;
12110 }
12111
12112 switch (bp->fw_reset_state) {
e72cb7d6
MC
12113 case BNXT_FW_RESET_STATE_POLL_VF: {
12114 int n = bnxt_get_registered_vfs(bp);
4037eb71 12115 int tmo;
e72cb7d6
MC
12116
12117 if (n < 0) {
230d1f0d 12118 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
e72cb7d6 12119 n, jiffies_to_msecs(jiffies -
230d1f0d
MC
12120 bp->fw_reset_timestamp));
12121 goto fw_reset_abort;
e72cb7d6 12122 } else if (n > 0) {
339eeb4b 12123 if (bnxt_fw_reset_timeout(bp)) {
230d1f0d
MC
12124 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12125 bp->fw_reset_state = 0;
e72cb7d6
MC
12126 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
12127 n);
230d1f0d
MC
12128 return;
12129 }
12130 bnxt_queue_fw_reset_work(bp, HZ / 10);
12131 return;
12132 }
12133 bp->fw_reset_timestamp = jiffies;
12134 rtnl_lock();
6cd657cb 12135 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
3958b1da 12136 bnxt_fw_reset_abort(bp, rc);
6cd657cb 12137 rtnl_unlock();
3958b1da 12138 return;
6cd657cb 12139 }
230d1f0d 12140 bnxt_fw_reset_close(bp);
4037eb71
VV
12141 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12142 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
12143 tmo = HZ / 10;
12144 } else {
12145 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12146 tmo = bp->fw_reset_min_dsecs * HZ / 10;
12147 }
230d1f0d 12148 rtnl_unlock();
4037eb71 12149 bnxt_queue_fw_reset_work(bp, tmo);
230d1f0d 12150 return;
e72cb7d6 12151 }
4037eb71
VV
12152 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
12153 u32 val;
12154
12155 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12156 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
339eeb4b 12157 !bnxt_fw_reset_timeout(bp)) {
4037eb71
VV
12158 bnxt_queue_fw_reset_work(bp, HZ / 5);
12159 return;
12160 }
12161
12162 if (!bp->fw_health->master) {
12163 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
12164
12165 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12166 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
12167 return;
12168 }
12169 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
12170 }
df561f66 12171 fallthrough;
c6a9e7aa 12172 case BNXT_FW_RESET_STATE_RESET_FW:
cbb51067
MC
12173 bnxt_reset_all(bp);
12174 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
c6a9e7aa 12175 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
cbb51067 12176 return;
230d1f0d 12177 case BNXT_FW_RESET_STATE_ENABLE_DEV:
43a440c4 12178 bnxt_inv_fw_health_reg(bp);
bae8a003
VV
12179 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
12180 !bp->fw_reset_min_dsecs) {
12181 u16 val;
12182
12183 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
12184 if (val == 0xffff) {
12185 if (bnxt_fw_reset_timeout(bp)) {
12186 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
3958b1da 12187 rc = -ETIMEDOUT;
bae8a003 12188 goto fw_reset_abort;
dab62e7c 12189 }
bae8a003
VV
12190 bnxt_queue_fw_reset_work(bp, HZ / 1000);
12191 return;
dab62e7c 12192 }
d1db9e16 12193 }
b4fff207 12194 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
230d1f0d
MC
12195 if (pci_enable_device(bp->pdev)) {
12196 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
3958b1da 12197 rc = -ENODEV;
230d1f0d
MC
12198 goto fw_reset_abort;
12199 }
12200 pci_set_master(bp->pdev);
12201 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
df561f66 12202 fallthrough;
230d1f0d
MC
12203 case BNXT_FW_RESET_STATE_POLL_FW:
12204 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
7b370ad7 12205 rc = bnxt_hwrm_poll(bp);
230d1f0d 12206 if (rc) {
339eeb4b 12207 if (bnxt_fw_reset_timeout(bp)) {
230d1f0d 12208 netdev_err(bp->dev, "Firmware reset aborted\n");
fc8864e0 12209 goto fw_reset_abort_status;
230d1f0d
MC
12210 }
12211 bnxt_queue_fw_reset_work(bp, HZ / 5);
12212 return;
12213 }
12214 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
12215 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
df561f66 12216 fallthrough;
230d1f0d
MC
12217 case BNXT_FW_RESET_STATE_OPENING:
12218 while (!rtnl_trylock()) {
12219 bnxt_queue_fw_reset_work(bp, HZ / 10);
12220 return;
12221 }
12222 rc = bnxt_open(bp->dev);
12223 if (rc) {
3958b1da
SK
12224 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
12225 bnxt_fw_reset_abort(bp, rc);
12226 rtnl_unlock();
12227 return;
230d1f0d 12228 }
230d1f0d 12229
eca4cf12
MC
12230 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
12231 bp->fw_health->enabled) {
12232 bp->fw_health->last_fw_reset_cnt =
12233 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
12234 }
230d1f0d
MC
12235 bp->fw_reset_state = 0;
12236 /* Make sure fw_reset_state is 0 before clearing the flag */
12237 smp_mb__before_atomic();
12238 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
758684e4
SK
12239 bnxt_ulp_start(bp, 0);
12240 bnxt_reenable_sriov(bp);
ac797ced
SB
12241 bnxt_vf_reps_alloc(bp);
12242 bnxt_vf_reps_open(bp);
9e518f25 12243 bnxt_ptp_reapply_pps(bp);
737d7a6c 12244 bnxt_dl_health_recovery_done(bp);
e4e38237 12245 bnxt_dl_health_status_update(bp, true);
f3a6d206 12246 rtnl_unlock();
230d1f0d
MC
12247 break;
12248 }
12249 return;
12250
fc8864e0
MC
12251fw_reset_abort_status:
12252 if (bp->fw_health->status_reliable ||
12253 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
12254 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12255
12256 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
12257 }
230d1f0d 12258fw_reset_abort:
230d1f0d 12259 rtnl_lock();
3958b1da 12260 bnxt_fw_reset_abort(bp, rc);
230d1f0d
MC
12261 rtnl_unlock();
12262}
12263
c0c050c5
MC
12264static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
12265{
12266 int rc;
12267 struct bnxt *bp = netdev_priv(dev);
12268
12269 SET_NETDEV_DEV(dev, &pdev->dev);
12270
12271 /* enable device (incl. PCI PM wakeup), and bus-mastering */
12272 rc = pci_enable_device(pdev);
12273 if (rc) {
12274 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
12275 goto init_err;
12276 }
12277
12278 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12279 dev_err(&pdev->dev,
12280 "Cannot find PCI device base address, aborting\n");
12281 rc = -ENODEV;
12282 goto init_err_disable;
12283 }
12284
12285 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12286 if (rc) {
12287 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
12288 goto init_err_disable;
12289 }
12290
12291 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
12292 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12293 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
3383176e 12294 rc = -EIO;
c54bc3ce 12295 goto init_err_release;
c0c050c5
MC
12296 }
12297
12298 pci_set_master(pdev);
12299
12300 bp->dev = dev;
12301 bp->pdev = pdev;
12302
8ae24738
MC
12303 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
12304 * determines the BAR size.
12305 */
c0c050c5
MC
12306 bp->bar0 = pci_ioremap_bar(pdev, 0);
12307 if (!bp->bar0) {
12308 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
12309 rc = -ENOMEM;
12310 goto init_err_release;
12311 }
12312
c0c050c5
MC
12313 bp->bar2 = pci_ioremap_bar(pdev, 4);
12314 if (!bp->bar2) {
12315 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
12316 rc = -ENOMEM;
12317 goto init_err_release;
12318 }
12319
6316ea6d
SB
12320 pci_enable_pcie_error_reporting(pdev);
12321
c0c050c5 12322 INIT_WORK(&bp->sp_task, bnxt_sp_task);
230d1f0d 12323 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
c0c050c5
MC
12324
12325 spin_lock_init(&bp->ntp_fltr_lock);
697197e5
MC
12326#if BITS_PER_LONG == 32
12327 spin_lock_init(&bp->db_lock);
12328#endif
c0c050c5
MC
12329
12330 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
12331 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
12332
18775aa8 12333 bnxt_init_dflt_coal(bp);
51f30785 12334
e99e88a9 12335 timer_setup(&bp->timer, bnxt_timer, 0);
c0c050c5
MC
12336 bp->current_interval = BNXT_TIMER_INTERVAL;
12337
442a35a5
JK
12338 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
12339 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
12340
caefe526 12341 clear_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
12342 return 0;
12343
12344init_err_release:
17086399 12345 bnxt_unmap_bars(bp, pdev);
c0c050c5
MC
12346 pci_release_regions(pdev);
12347
12348init_err_disable:
12349 pci_disable_device(pdev);
12350
12351init_err:
12352 return rc;
12353}
12354
12355/* rtnl_lock held */
12356static int bnxt_change_mac_addr(struct net_device *dev, void *p)
12357{
12358 struct sockaddr *addr = p;
1fc2cfd0
JH
12359 struct bnxt *bp = netdev_priv(dev);
12360 int rc = 0;
c0c050c5
MC
12361
12362 if (!is_valid_ether_addr(addr->sa_data))
12363 return -EADDRNOTAVAIL;
12364
c1a7bdff
MC
12365 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
12366 return 0;
12367
28ea334b 12368 rc = bnxt_approve_mac(bp, addr->sa_data, true);
84c33dd3
MC
12369 if (rc)
12370 return rc;
bdd4347b 12371
c0c050c5 12372 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1fc2cfd0
JH
12373 if (netif_running(dev)) {
12374 bnxt_close_nic(bp, false, false);
12375 rc = bnxt_open_nic(bp, false, false);
12376 }
c0c050c5 12377
1fc2cfd0 12378 return rc;
c0c050c5
MC
12379}
12380
12381/* rtnl_lock held */
12382static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
12383{
12384 struct bnxt *bp = netdev_priv(dev);
12385
c0c050c5 12386 if (netif_running(dev))
a9b952d2 12387 bnxt_close_nic(bp, true, false);
c0c050c5
MC
12388
12389 dev->mtu = new_mtu;
12390 bnxt_set_ring_params(bp);
12391
12392 if (netif_running(dev))
a9b952d2 12393 return bnxt_open_nic(bp, true, false);
c0c050c5
MC
12394
12395 return 0;
12396}
12397
c5e3deb8 12398int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
c0c050c5
MC
12399{
12400 struct bnxt *bp = netdev_priv(dev);
3ffb6a39 12401 bool sh = false;
d1e7925e 12402 int rc;
16e5cc64 12403
c0c050c5 12404 if (tc > bp->max_tc) {
b451c8b6 12405 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
c0c050c5
MC
12406 tc, bp->max_tc);
12407 return -EINVAL;
12408 }
12409
12410 if (netdev_get_num_tc(dev) == tc)
12411 return 0;
12412
3ffb6a39
MC
12413 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
12414 sh = true;
12415
98fdbe73
MC
12416 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
12417 sh, tc, bp->tx_nr_rings_xdp);
d1e7925e
MC
12418 if (rc)
12419 return rc;
c0c050c5
MC
12420
12421 /* Needs to close the device and do hw resource re-allocations */
12422 if (netif_running(bp->dev))
12423 bnxt_close_nic(bp, true, false);
12424
12425 if (tc) {
12426 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
12427 netdev_set_num_tc(dev, tc);
12428 } else {
12429 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12430 netdev_reset_tc(dev);
12431 }
87e9b377 12432 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
3ffb6a39
MC
12433 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
12434 bp->tx_nr_rings + bp->rx_nr_rings;
c0c050c5
MC
12435
12436 if (netif_running(bp->dev))
12437 return bnxt_open_nic(bp, true, false);
12438
12439 return 0;
12440}
12441
9e0fd15d
JP
12442static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
12443 void *cb_priv)
c5e3deb8 12444{
9e0fd15d 12445 struct bnxt *bp = cb_priv;
de4784ca 12446
312324f1
JK
12447 if (!bnxt_tc_flower_enabled(bp) ||
12448 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
38cf0426 12449 return -EOPNOTSUPP;
c5e3deb8 12450
9e0fd15d
JP
12451 switch (type) {
12452 case TC_SETUP_CLSFLOWER:
12453 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
12454 default:
12455 return -EOPNOTSUPP;
12456 }
12457}
12458
627c89d0 12459LIST_HEAD(bnxt_block_cb_list);
955bcb6e 12460
2ae7408f
SP
12461static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
12462 void *type_data)
12463{
4e95bc26
PNA
12464 struct bnxt *bp = netdev_priv(dev);
12465
2ae7408f 12466 switch (type) {
9e0fd15d 12467 case TC_SETUP_BLOCK:
955bcb6e
PNA
12468 return flow_block_cb_setup_simple(type_data,
12469 &bnxt_block_cb_list,
4e95bc26
PNA
12470 bnxt_setup_tc_block_cb,
12471 bp, bp, true);
575ed7d3 12472 case TC_SETUP_QDISC_MQPRIO: {
2ae7408f
SP
12473 struct tc_mqprio_qopt *mqprio = type_data;
12474
12475 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
56f36acd 12476
2ae7408f
SP
12477 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
12478 }
12479 default:
12480 return -EOPNOTSUPP;
12481 }
c5e3deb8
MC
12482}
12483
c0c050c5
MC
12484#ifdef CONFIG_RFS_ACCEL
12485static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
12486 struct bnxt_ntuple_filter *f2)
12487{
12488 struct flow_keys *keys1 = &f1->fkeys;
12489 struct flow_keys *keys2 = &f2->fkeys;
12490
6fc7caa8
MC
12491 if (keys1->basic.n_proto != keys2->basic.n_proto ||
12492 keys1->basic.ip_proto != keys2->basic.ip_proto)
12493 return false;
12494
12495 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
12496 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
12497 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
12498 return false;
12499 } else {
12500 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
12501 sizeof(keys1->addrs.v6addrs.src)) ||
12502 memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
12503 sizeof(keys1->addrs.v6addrs.dst)))
12504 return false;
12505 }
12506
12507 if (keys1->ports.ports == keys2->ports.ports &&
61aad724 12508 keys1->control.flags == keys2->control.flags &&
a54c4d74
MC
12509 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
12510 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
c0c050c5
MC
12511 return true;
12512
12513 return false;
12514}
12515
12516static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
12517 u16 rxq_index, u32 flow_id)
12518{
12519 struct bnxt *bp = netdev_priv(dev);
12520 struct bnxt_ntuple_filter *fltr, *new_fltr;
12521 struct flow_keys *fkeys;
12522 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
a54c4d74 12523 int rc = 0, idx, bit_id, l2_idx = 0;
c0c050c5 12524 struct hlist_head *head;
f47d0e19 12525 u32 flags;
c0c050c5 12526
a54c4d74
MC
12527 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
12528 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12529 int off = 0, j;
12530
12531 netif_addr_lock_bh(dev);
12532 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
12533 if (ether_addr_equal(eth->h_dest,
12534 vnic->uc_list + off)) {
12535 l2_idx = j + 1;
12536 break;
12537 }
12538 }
12539 netif_addr_unlock_bh(dev);
12540 if (!l2_idx)
12541 return -EINVAL;
12542 }
c0c050c5
MC
12543 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
12544 if (!new_fltr)
12545 return -ENOMEM;
12546
12547 fkeys = &new_fltr->fkeys;
12548 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
12549 rc = -EPROTONOSUPPORT;
12550 goto err_free;
12551 }
12552
dda0e746
MC
12553 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
12554 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
c0c050c5
MC
12555 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
12556 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
12557 rc = -EPROTONOSUPPORT;
12558 goto err_free;
12559 }
dda0e746
MC
12560 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
12561 bp->hwrm_spec_code < 0x10601) {
12562 rc = -EPROTONOSUPPORT;
12563 goto err_free;
12564 }
f47d0e19
MC
12565 flags = fkeys->control.flags;
12566 if (((flags & FLOW_DIS_ENCAPSULATION) &&
12567 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
61aad724
MC
12568 rc = -EPROTONOSUPPORT;
12569 goto err_free;
12570 }
c0c050c5 12571
a54c4d74 12572 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
c0c050c5
MC
12573 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
12574
12575 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
12576 head = &bp->ntp_fltr_hash_tbl[idx];
12577 rcu_read_lock();
12578 hlist_for_each_entry_rcu(fltr, head, hash) {
12579 if (bnxt_fltr_match(fltr, new_fltr)) {
12580 rcu_read_unlock();
12581 rc = 0;
12582 goto err_free;
12583 }
12584 }
12585 rcu_read_unlock();
12586
12587 spin_lock_bh(&bp->ntp_fltr_lock);
84e86b98
MC
12588 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
12589 BNXT_NTP_FLTR_MAX_FLTR, 0);
12590 if (bit_id < 0) {
c0c050c5
MC
12591 spin_unlock_bh(&bp->ntp_fltr_lock);
12592 rc = -ENOMEM;
12593 goto err_free;
12594 }
12595
84e86b98 12596 new_fltr->sw_id = (u16)bit_id;
c0c050c5 12597 new_fltr->flow_id = flow_id;
a54c4d74 12598 new_fltr->l2_fltr_idx = l2_idx;
c0c050c5
MC
12599 new_fltr->rxq = rxq_index;
12600 hlist_add_head_rcu(&new_fltr->hash, head);
12601 bp->ntp_fltr_count++;
12602 spin_unlock_bh(&bp->ntp_fltr_lock);
12603
12604 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
c213eae8 12605 bnxt_queue_sp_work(bp);
c0c050c5
MC
12606
12607 return new_fltr->sw_id;
12608
12609err_free:
12610 kfree(new_fltr);
12611 return rc;
12612}
12613
12614static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12615{
12616 int i;
12617
12618 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
12619 struct hlist_head *head;
12620 struct hlist_node *tmp;
12621 struct bnxt_ntuple_filter *fltr;
12622 int rc;
12623
12624 head = &bp->ntp_fltr_hash_tbl[i];
12625 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
12626 bool del = false;
12627
12628 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
12629 if (rps_may_expire_flow(bp->dev, fltr->rxq,
12630 fltr->flow_id,
12631 fltr->sw_id)) {
12632 bnxt_hwrm_cfa_ntuple_filter_free(bp,
12633 fltr);
12634 del = true;
12635 }
12636 } else {
12637 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
12638 fltr);
12639 if (rc)
12640 del = true;
12641 else
12642 set_bit(BNXT_FLTR_VALID, &fltr->state);
12643 }
12644
12645 if (del) {
12646 spin_lock_bh(&bp->ntp_fltr_lock);
12647 hlist_del_rcu(&fltr->hash);
12648 bp->ntp_fltr_count--;
12649 spin_unlock_bh(&bp->ntp_fltr_lock);
12650 synchronize_rcu();
12651 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
12652 kfree(fltr);
12653 }
12654 }
12655 }
19241368 12656 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
9a005c38 12657 netdev_info(bp->dev, "Receive PF driver unload event!\n");
c0c050c5
MC
12658}
12659
12660#else
12661
12662static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12663{
12664}
12665
12666#endif /* CONFIG_RFS_ACCEL */
12667
442a35a5 12668static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
c0c050c5 12669{
442a35a5
JK
12670 struct bnxt *bp = netdev_priv(netdev);
12671 struct udp_tunnel_info ti;
12672 unsigned int cmd;
c0c050c5 12673
442a35a5 12674 udp_tunnel_nic_get_port(netdev, table, 0, &ti);
7ae9dc35 12675 if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
442a35a5 12676 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
7ae9dc35 12677 else
442a35a5 12678 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
7cdd5fc3 12679
442a35a5
JK
12680 if (ti.port)
12681 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
ad51b8e9 12682
442a35a5 12683 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
c0c050c5
MC
12684}
12685
442a35a5
JK
12686static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
12687 .sync_table = bnxt_udp_tunnel_sync,
12688 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
12689 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
12690 .tables = {
12691 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
12692 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
12693 },
12694};
c0c050c5 12695
39d8ba2e
MC
12696static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12697 struct net_device *dev, u32 filter_mask,
12698 int nlflags)
12699{
12700 struct bnxt *bp = netdev_priv(dev);
12701
12702 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
12703 nlflags, filter_mask, NULL);
12704}
12705
12706static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
2fd527b7 12707 u16 flags, struct netlink_ext_ack *extack)
39d8ba2e
MC
12708{
12709 struct bnxt *bp = netdev_priv(dev);
12710 struct nlattr *attr, *br_spec;
12711 int rem, rc = 0;
12712
12713 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
12714 return -EOPNOTSUPP;
12715
12716 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12717 if (!br_spec)
12718 return -EINVAL;
12719
12720 nla_for_each_nested(attr, br_spec, rem) {
12721 u16 mode;
12722
12723 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12724 continue;
12725
12726 if (nla_len(attr) < sizeof(mode))
12727 return -EINVAL;
12728
12729 mode = nla_get_u16(attr);
12730 if (mode == bp->br_mode)
12731 break;
12732
12733 rc = bnxt_hwrm_set_br_mode(bp, mode);
12734 if (!rc)
12735 bp->br_mode = mode;
12736 break;
12737 }
12738 return rc;
12739}
12740
52d5254a
FF
12741int bnxt_get_port_parent_id(struct net_device *dev,
12742 struct netdev_phys_item_id *ppid)
c124a62f 12743{
52d5254a
FF
12744 struct bnxt *bp = netdev_priv(dev);
12745
c124a62f
SP
12746 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
12747 return -EOPNOTSUPP;
12748
12749 /* The PF and it's VF-reps only support the switchdev framework */
d061b241 12750 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
c124a62f
SP
12751 return -EOPNOTSUPP;
12752
b014232f
VV
12753 ppid->id_len = sizeof(bp->dsn);
12754 memcpy(ppid->id, bp->dsn, ppid->id_len);
c124a62f 12755
52d5254a 12756 return 0;
c124a62f
SP
12757}
12758
c9c49a65
JP
12759static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
12760{
12761 struct bnxt *bp = netdev_priv(dev);
12762
12763 return &bp->dl_port;
12764}
12765
c0c050c5
MC
12766static const struct net_device_ops bnxt_netdev_ops = {
12767 .ndo_open = bnxt_open,
12768 .ndo_start_xmit = bnxt_start_xmit,
12769 .ndo_stop = bnxt_close,
12770 .ndo_get_stats64 = bnxt_get_stats64,
12771 .ndo_set_rx_mode = bnxt_set_rx_mode,
a7605370 12772 .ndo_eth_ioctl = bnxt_ioctl,
c0c050c5
MC
12773 .ndo_validate_addr = eth_validate_addr,
12774 .ndo_set_mac_address = bnxt_change_mac_addr,
12775 .ndo_change_mtu = bnxt_change_mtu,
12776 .ndo_fix_features = bnxt_fix_features,
12777 .ndo_set_features = bnxt_set_features,
1698d600 12778 .ndo_features_check = bnxt_features_check,
c0c050c5
MC
12779 .ndo_tx_timeout = bnxt_tx_timeout,
12780#ifdef CONFIG_BNXT_SRIOV
12781 .ndo_get_vf_config = bnxt_get_vf_config,
12782 .ndo_set_vf_mac = bnxt_set_vf_mac,
12783 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
12784 .ndo_set_vf_rate = bnxt_set_vf_bw,
12785 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
12786 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
746df139 12787 .ndo_set_vf_trust = bnxt_set_vf_trust,
c0c050c5
MC
12788#endif
12789 .ndo_setup_tc = bnxt_setup_tc,
12790#ifdef CONFIG_RFS_ACCEL
12791 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
12792#endif
f4e63525 12793 .ndo_bpf = bnxt_xdp,
f18c2b77 12794 .ndo_xdp_xmit = bnxt_xdp_xmit,
39d8ba2e
MC
12795 .ndo_bridge_getlink = bnxt_bridge_getlink,
12796 .ndo_bridge_setlink = bnxt_bridge_setlink,
c9c49a65 12797 .ndo_get_devlink_port = bnxt_get_devlink_port,
c0c050c5
MC
12798};
12799
12800static void bnxt_remove_one(struct pci_dev *pdev)
12801{
12802 struct net_device *dev = pci_get_drvdata(pdev);
12803 struct bnxt *bp = netdev_priv(dev);
12804
7e334fc8 12805 if (BNXT_PF(bp))
c0c050c5
MC
12806 bnxt_sriov_disable(bp);
12807
21d6a11e
VV
12808 if (BNXT_PF(bp))
12809 devlink_port_type_clear(&bp->dl_port);
93cb62d9 12810
a521c8a0 12811 bnxt_ptp_clear(bp);
21d6a11e
VV
12812 pci_disable_pcie_error_reporting(pdev);
12813 unregister_netdev(dev);
b16939b5 12814 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
21d6a11e 12815 /* Flush any pending tasks */
631ce27a
VV
12816 cancel_work_sync(&bp->sp_task);
12817 cancel_delayed_work_sync(&bp->fw_reset_task);
b16939b5
VV
12818 bp->sp_event = 0;
12819
7e334fc8 12820 bnxt_dl_fw_reporters_destroy(bp, true);
cda2cab0 12821 bnxt_dl_unregister(bp);
2ae7408f 12822 bnxt_shutdown_tc(bp);
c0c050c5 12823
7809592d 12824 bnxt_clear_int_mode(bp);
be58a0da 12825 bnxt_hwrm_func_drv_unrgtr(bp);
c0c050c5 12826 bnxt_free_hwrm_resources(bp);
eb513658 12827 bnxt_ethtool_free(bp);
7df4ae9f 12828 bnxt_dcb_free(bp);
a588e458
MC
12829 kfree(bp->edev);
12830 bp->edev = NULL;
ae5c42f0
MC
12831 kfree(bp->ptp_cfg);
12832 bp->ptp_cfg = NULL;
8280b38e
VV
12833 kfree(bp->fw_health);
12834 bp->fw_health = NULL;
c20dc142 12835 bnxt_cleanup_pci(bp);
98f04cf0
MC
12836 bnxt_free_ctx_mem(bp);
12837 kfree(bp->ctx);
12838 bp->ctx = NULL;
1667cbf6
MC
12839 kfree(bp->rss_indir_tbl);
12840 bp->rss_indir_tbl = NULL;
fd3ab1c7 12841 bnxt_free_port_stats(bp);
c0c050c5 12842 free_netdev(dev);
c0c050c5
MC
12843}
12844
ba642ab7 12845static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
c0c050c5
MC
12846{
12847 int rc = 0;
12848 struct bnxt_link_info *link_info = &bp->link_info;
c0c050c5 12849
b0d28207 12850 bp->phy_flags = 0;
170ce013
MC
12851 rc = bnxt_hwrm_phy_qcaps(bp);
12852 if (rc) {
12853 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
12854 rc);
12855 return rc;
12856 }
dade5e15
MC
12857 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
12858 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
12859 else
12860 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
43a5107d
MC
12861 if (!fw_dflt)
12862 return 0;
12863
3c10ed49 12864 mutex_lock(&bp->link_lock);
c0c050c5
MC
12865 rc = bnxt_update_link(bp, false);
12866 if (rc) {
3c10ed49 12867 mutex_unlock(&bp->link_lock);
c0c050c5
MC
12868 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
12869 rc);
12870 return rc;
12871 }
12872
93ed8117
MC
12873 /* Older firmware does not have supported_auto_speeds, so assume
12874 * that all supported speeds can be autonegotiated.
12875 */
12876 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
12877 link_info->support_auto_speeds = link_info->support_speeds;
12878
8119e49b 12879 bnxt_init_ethtool_link_settings(bp);
3c10ed49 12880 mutex_unlock(&bp->link_lock);
ba642ab7 12881 return 0;
c0c050c5
MC
12882}
12883
12884static int bnxt_get_max_irq(struct pci_dev *pdev)
12885{
12886 u16 ctrl;
12887
12888 if (!pdev->msix_cap)
12889 return 1;
12890
12891 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
12892 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
12893}
12894
6e6c5a57
MC
12895static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12896 int *max_cp)
c0c050c5 12897{
6a4f2947 12898 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
e30fbc33 12899 int max_ring_grps = 0, max_irq;
c0c050c5 12900
6a4f2947
MC
12901 *max_tx = hw_resc->max_tx_rings;
12902 *max_rx = hw_resc->max_rx_rings;
e30fbc33
MC
12903 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
12904 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
12905 bnxt_get_ulp_msix_num(bp),
c027c6b4 12906 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
e30fbc33
MC
12907 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
12908 *max_cp = min_t(int, *max_cp, max_irq);
6a4f2947 12909 max_ring_grps = hw_resc->max_hw_ring_grps;
76595193
PS
12910 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
12911 *max_cp -= 1;
12912 *max_rx -= 2;
12913 }
c0c050c5
MC
12914 if (bp->flags & BNXT_FLAG_AGG_RINGS)
12915 *max_rx >>= 1;
e30fbc33
MC
12916 if (bp->flags & BNXT_FLAG_CHIP_P5) {
12917 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
12918 /* On P5 chips, max_cp output param should be available NQs */
12919 *max_cp = max_irq;
12920 }
b72d4a68 12921 *max_rx = min_t(int, *max_rx, max_ring_grps);
6e6c5a57
MC
12922}
12923
12924int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
12925{
12926 int rx, tx, cp;
12927
12928 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
78f058a4
MC
12929 *max_rx = rx;
12930 *max_tx = tx;
6e6c5a57
MC
12931 if (!rx || !tx || !cp)
12932 return -ENOMEM;
12933
6e6c5a57
MC
12934 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
12935}
12936
e4060d30
MC
12937static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12938 bool shared)
12939{
12940 int rc;
12941
12942 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
bdbd1eb5
MC
12943 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
12944 /* Not enough rings, try disabling agg rings. */
12945 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
12946 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
07f4fde5
MC
12947 if (rc) {
12948 /* set BNXT_FLAG_AGG_RINGS back for consistency */
12949 bp->flags |= BNXT_FLAG_AGG_RINGS;
bdbd1eb5 12950 return rc;
07f4fde5 12951 }
bdbd1eb5 12952 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
1054aee8
MC
12953 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12954 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
bdbd1eb5
MC
12955 bnxt_set_ring_params(bp);
12956 }
e4060d30
MC
12957
12958 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
12959 int max_cp, max_stat, max_irq;
12960
12961 /* Reserve minimum resources for RoCE */
12962 max_cp = bnxt_get_max_func_cp_rings(bp);
12963 max_stat = bnxt_get_max_func_stat_ctxs(bp);
12964 max_irq = bnxt_get_max_func_irqs(bp);
12965 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
12966 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
12967 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
12968 return 0;
12969
12970 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
12971 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
12972 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
12973 max_cp = min_t(int, max_cp, max_irq);
12974 max_cp = min_t(int, max_cp, max_stat);
12975 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
12976 if (rc)
12977 rc = 0;
12978 }
12979 return rc;
12980}
12981
58ea801a
MC
12982/* In initial default shared ring setting, each shared ring must have a
12983 * RX/TX ring pair.
12984 */
12985static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
12986{
12987 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
12988 bp->rx_nr_rings = bp->cp_nr_rings;
12989 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
12990 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12991}
12992
702c221c 12993static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
6e6c5a57
MC
12994{
12995 int dflt_rings, max_rx_rings, max_tx_rings, rc;
6e6c5a57 12996
2773dfb2
MC
12997 if (!bnxt_can_reserve_rings(bp))
12998 return 0;
12999
6e6c5a57
MC
13000 if (sh)
13001 bp->flags |= BNXT_FLAG_SHARED_RINGS;
d629522e 13002 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
1d3ef13d
MC
13003 /* Reduce default rings on multi-port cards so that total default
13004 * rings do not exceed CPU count.
13005 */
13006 if (bp->port_count > 1) {
13007 int max_rings =
13008 max_t(int, num_online_cpus() / bp->port_count, 1);
13009
13010 dflt_rings = min_t(int, dflt_rings, max_rings);
13011 }
e4060d30 13012 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
6e6c5a57
MC
13013 if (rc)
13014 return rc;
13015 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
13016 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
58ea801a
MC
13017 if (sh)
13018 bnxt_trim_dflt_sh_rings(bp);
13019 else
13020 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
13021 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
391be5c2 13022
674f50a5 13023 rc = __bnxt_reserve_rings(bp);
391be5c2
MC
13024 if (rc)
13025 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
58ea801a
MC
13026 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13027 if (sh)
13028 bnxt_trim_dflt_sh_rings(bp);
391be5c2 13029
674f50a5
MC
13030 /* Rings may have been trimmed, re-reserve the trimmed rings. */
13031 if (bnxt_need_reserve_rings(bp)) {
13032 rc = __bnxt_reserve_rings(bp);
13033 if (rc)
13034 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
13035 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13036 }
76595193
PS
13037 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
13038 bp->rx_nr_rings++;
13039 bp->cp_nr_rings++;
13040 }
5d765a5e
VV
13041 if (rc) {
13042 bp->tx_nr_rings = 0;
13043 bp->rx_nr_rings = 0;
13044 }
6e6c5a57 13045 return rc;
c0c050c5
MC
13046}
13047
47558acd
MC
13048static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
13049{
13050 int rc;
13051
13052 if (bp->tx_nr_rings)
13053 return 0;
13054
6b95c3e9
MC
13055 bnxt_ulp_irq_stop(bp);
13056 bnxt_clear_int_mode(bp);
47558acd
MC
13057 rc = bnxt_set_dflt_rings(bp, true);
13058 if (rc) {
13059 netdev_err(bp->dev, "Not enough rings available.\n");
6b95c3e9 13060 goto init_dflt_ring_err;
47558acd
MC
13061 }
13062 rc = bnxt_init_int_mode(bp);
13063 if (rc)
6b95c3e9
MC
13064 goto init_dflt_ring_err;
13065
47558acd
MC
13066 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13067 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
13068 bp->flags |= BNXT_FLAG_RFS;
13069 bp->dev->features |= NETIF_F_NTUPLE;
13070 }
6b95c3e9
MC
13071init_dflt_ring_err:
13072 bnxt_ulp_irq_restart(bp, rc);
13073 return rc;
47558acd
MC
13074}
13075
80fcaf46 13076int bnxt_restore_pf_fw_resources(struct bnxt *bp)
7b08f661 13077{
80fcaf46
MC
13078 int rc;
13079
7b08f661
MC
13080 ASSERT_RTNL();
13081 bnxt_hwrm_func_qcaps(bp);
1a037782
VD
13082
13083 if (netif_running(bp->dev))
13084 __bnxt_close_nic(bp, true, false);
13085
ec86f14e 13086 bnxt_ulp_irq_stop(bp);
80fcaf46
MC
13087 bnxt_clear_int_mode(bp);
13088 rc = bnxt_init_int_mode(bp);
ec86f14e 13089 bnxt_ulp_irq_restart(bp, rc);
1a037782
VD
13090
13091 if (netif_running(bp->dev)) {
13092 if (rc)
13093 dev_close(bp->dev);
13094 else
13095 rc = bnxt_open_nic(bp, true, false);
13096 }
13097
80fcaf46 13098 return rc;
7b08f661
MC
13099}
13100
a22a6ac2
MC
13101static int bnxt_init_mac_addr(struct bnxt *bp)
13102{
13103 int rc = 0;
13104
13105 if (BNXT_PF(bp)) {
13106 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
13107 } else {
13108#ifdef CONFIG_BNXT_SRIOV
13109 struct bnxt_vf_info *vf = &bp->vf;
28ea334b 13110 bool strict_approval = true;
a22a6ac2
MC
13111
13112 if (is_valid_ether_addr(vf->mac_addr)) {
91cdda40 13113 /* overwrite netdev dev_addr with admin VF MAC */
a22a6ac2 13114 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
28ea334b
MC
13115 /* Older PF driver or firmware may not approve this
13116 * correctly.
13117 */
13118 strict_approval = false;
a22a6ac2
MC
13119 } else {
13120 eth_hw_addr_random(bp->dev);
a22a6ac2 13121 }
28ea334b 13122 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
a22a6ac2
MC
13123#endif
13124 }
13125 return rc;
13126}
13127
a0d0fd70
VV
13128static void bnxt_vpd_read_info(struct bnxt *bp)
13129{
13130 struct pci_dev *pdev = bp->pdev;
0ff25f6a
HK
13131 unsigned int vpd_size, kw_len;
13132 int pos, size;
a0d0fd70
VV
13133 u8 *vpd_data;
13134
550cd7c1
HK
13135 vpd_data = pci_vpd_alloc(pdev, &vpd_size);
13136 if (IS_ERR(vpd_data)) {
13137 pci_warn(pdev, "Unable to read VPD\n");
a0d0fd70 13138 return;
4fd13157
DM
13139 }
13140
0ff25f6a
HK
13141 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
13142 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
a0d0fd70
VV
13143 if (pos < 0)
13144 goto read_sn;
13145
0ff25f6a 13146 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
492adcf4 13147 memcpy(bp->board_partno, &vpd_data[pos], size);
a0d0fd70
VV
13148
13149read_sn:
0ff25f6a
HK
13150 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
13151 PCI_VPD_RO_KEYWORD_SERIALNO,
13152 &kw_len);
a0d0fd70
VV
13153 if (pos < 0)
13154 goto exit;
13155
0ff25f6a 13156 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
492adcf4 13157 memcpy(bp->board_serialno, &vpd_data[pos], size);
a0d0fd70
VV
13158exit:
13159 kfree(vpd_data);
13160}
13161
03213a99
JP
13162static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
13163{
13164 struct pci_dev *pdev = bp->pdev;
8d85b75b 13165 u64 qword;
03213a99 13166
8d85b75b
JK
13167 qword = pci_get_dsn(pdev);
13168 if (!qword) {
13169 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
03213a99
JP
13170 return -EOPNOTSUPP;
13171 }
13172
8d85b75b
JK
13173 put_unaligned_le64(qword, dsn);
13174
d061b241 13175 bp->flags |= BNXT_FLAG_DSN_VALID;
03213a99
JP
13176 return 0;
13177}
13178
8ae24738
MC
13179static int bnxt_map_db_bar(struct bnxt *bp)
13180{
13181 if (!bp->db_size)
13182 return -ENODEV;
13183 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
13184 if (!bp->bar1)
13185 return -ENOMEM;
13186 return 0;
13187}
13188
c0c050c5
MC
13189static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
13190{
c0c050c5
MC
13191 struct net_device *dev;
13192 struct bnxt *bp;
6e6c5a57 13193 int rc, max_irqs;
c0c050c5 13194
4e00338a 13195 if (pci_is_bridge(pdev))
fa853dda
PS
13196 return -ENODEV;
13197
8743db4a
VV
13198 /* Clear any pending DMA transactions from crash kernel
13199 * while loading driver in capture kernel.
13200 */
13201 if (is_kdump_kernel()) {
13202 pci_clear_master(pdev);
13203 pcie_flr(pdev);
13204 }
13205
c0c050c5
MC
13206 max_irqs = bnxt_get_max_irq(pdev);
13207 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
13208 if (!dev)
13209 return -ENOMEM;
13210
13211 bp = netdev_priv(dev);
8fb35cd3 13212 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
9c1fabdf 13213 bnxt_set_max_func_irqs(bp, max_irqs);
c0c050c5
MC
13214
13215 if (bnxt_vf_pciid(ent->driver_data))
13216 bp->flags |= BNXT_FLAG_VF;
13217
2bcfa6f6 13218 if (pdev->msix_cap)
c0c050c5 13219 bp->flags |= BNXT_FLAG_MSIX_CAP;
c0c050c5
MC
13220
13221 rc = bnxt_init_board(pdev, dev);
13222 if (rc < 0)
13223 goto init_err_free;
13224
13225 dev->netdev_ops = &bnxt_netdev_ops;
13226 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
13227 dev->ethtool_ops = &bnxt_ethtool_ops;
c0c050c5
MC
13228 pci_set_drvdata(pdev, dev);
13229
3e8060fa
PS
13230 rc = bnxt_alloc_hwrm_resources(bp);
13231 if (rc)
17086399 13232 goto init_err_pci_clean;
3e8060fa
PS
13233
13234 mutex_init(&bp->hwrm_cmd_lock);
ba642ab7 13235 mutex_init(&bp->link_lock);
7c380918
MC
13236
13237 rc = bnxt_fw_init_one_p1(bp);
3e8060fa 13238 if (rc)
17086399 13239 goto init_err_pci_clean;
3e8060fa 13240
3e3c09b0
VV
13241 if (BNXT_PF(bp))
13242 bnxt_vpd_read_info(bp);
13243
9d6b648c 13244 if (BNXT_CHIP_P5(bp)) {
e38287b7 13245 bp->flags |= BNXT_FLAG_CHIP_P5;
9d6b648c
MC
13246 if (BNXT_CHIP_SR2(bp))
13247 bp->flags |= BNXT_FLAG_CHIP_SR2;
13248 }
e38287b7 13249
5fa65524
EP
13250 rc = bnxt_alloc_rss_indir_tbl(bp);
13251 if (rc)
13252 goto init_err_pci_clean;
13253
7c380918 13254 rc = bnxt_fw_init_one_p2(bp);
3c2217a6
MC
13255 if (rc)
13256 goto init_err_pci_clean;
13257
8ae24738
MC
13258 rc = bnxt_map_db_bar(bp);
13259 if (rc) {
13260 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
13261 rc);
13262 goto init_err_pci_clean;
13263 }
13264
c0c050c5
MC
13265 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13266 NETIF_F_TSO | NETIF_F_TSO6 |
13267 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
7e13318d 13268 NETIF_F_GSO_IPXIP4 |
152971ee
AD
13269 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13270 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
3e8060fa
PS
13271 NETIF_F_RXCSUM | NETIF_F_GRO;
13272
e38287b7 13273 if (BNXT_SUPPORTS_TPA(bp))
3e8060fa 13274 dev->hw_features |= NETIF_F_LRO;
c0c050c5 13275
c0c050c5
MC
13276 dev->hw_enc_features =
13277 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13278 NETIF_F_TSO | NETIF_F_TSO6 |
13279 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
152971ee 13280 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
7e13318d 13281 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
442a35a5
JK
13282 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
13283
152971ee
AD
13284 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
13285 NETIF_F_GSO_GRE_CSUM;
c0c050c5 13286 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
1da63ddd
EP
13287 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
13288 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13289 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
13290 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
e38287b7 13291 if (BNXT_SUPPORTS_TPA(bp))
1054aee8 13292 dev->hw_features |= NETIF_F_GRO_HW;
c0c050c5 13293 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
1054aee8
MC
13294 if (dev->features & NETIF_F_GRO_HW)
13295 dev->features &= ~NETIF_F_LRO;
c0c050c5
MC
13296 dev->priv_flags |= IFF_UNICAST_FLT;
13297
13298#ifdef CONFIG_BNXT_SRIOV
13299 init_waitqueue_head(&bp->sriov_cfg_wait);
4ab0c6a8 13300 mutex_init(&bp->sriov_lock);
c0c050c5 13301#endif
e38287b7
MC
13302 if (BNXT_SUPPORTS_TPA(bp)) {
13303 bp->gro_func = bnxt_gro_func_5730x;
67912c36 13304 if (BNXT_CHIP_P4(bp))
e38287b7 13305 bp->gro_func = bnxt_gro_func_5731x;
67912c36
MC
13306 else if (BNXT_CHIP_P5(bp))
13307 bp->gro_func = bnxt_gro_func_5750x;
e38287b7
MC
13308 }
13309 if (!BNXT_CHIP_P4_PLUS(bp))
434c975a 13310 bp->flags |= BNXT_FLAG_DOUBLE_DB;
309369c9 13311
a22a6ac2
MC
13312 rc = bnxt_init_mac_addr(bp);
13313 if (rc) {
13314 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
13315 rc = -EADDRNOTAVAIL;
13316 goto init_err_pci_clean;
13317 }
c0c050c5 13318
2e9217d1
VV
13319 if (BNXT_PF(bp)) {
13320 /* Read the adapter's DSN to use as the eswitch switch_id */
b014232f 13321 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
2e9217d1 13322 }
567b2abe 13323
7eb9bb3a
MC
13324 /* MTU range: 60 - FW defined max */
13325 dev->min_mtu = ETH_ZLEN;
13326 dev->max_mtu = bp->max_mtu;
13327
ba642ab7 13328 rc = bnxt_probe_phy(bp, true);
d5430d31
MC
13329 if (rc)
13330 goto init_err_pci_clean;
13331
c61fb99c 13332 bnxt_set_rx_skb_mode(bp, false);
c0c050c5
MC
13333 bnxt_set_tpa_flags(bp);
13334 bnxt_set_ring_params(bp);
702c221c 13335 rc = bnxt_set_dflt_rings(bp, true);
bdbd1eb5
MC
13336 if (rc) {
13337 netdev_err(bp->dev, "Not enough rings available.\n");
13338 rc = -ENOMEM;
17086399 13339 goto init_err_pci_clean;
bdbd1eb5 13340 }
c0c050c5 13341
ba642ab7 13342 bnxt_fw_init_one_p3(bp);
2bcfa6f6 13343
a196e96b 13344 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
c0c050c5
MC
13345 bp->flags |= BNXT_FLAG_STRIP_VLAN;
13346
7809592d 13347 rc = bnxt_init_int_mode(bp);
c0c050c5 13348 if (rc)
17086399 13349 goto init_err_pci_clean;
c0c050c5 13350
832aed16
MC
13351 /* No TC has been set yet and rings may have been trimmed due to
13352 * limited MSIX, so we re-initialize the TX rings per TC.
13353 */
13354 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13355
c213eae8
MC
13356 if (BNXT_PF(bp)) {
13357 if (!bnxt_pf_wq) {
13358 bnxt_pf_wq =
13359 create_singlethread_workqueue("bnxt_pf_wq");
13360 if (!bnxt_pf_wq) {
13361 dev_err(&pdev->dev, "Unable to create workqueue.\n");
b5f796b6 13362 rc = -ENOMEM;
c213eae8
MC
13363 goto init_err_pci_clean;
13364 }
13365 }
18c7015c
JK
13366 rc = bnxt_init_tc(bp);
13367 if (rc)
13368 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
13369 rc);
c213eae8 13370 }
2ae7408f 13371
190eda1a 13372 bnxt_inv_fw_health_reg(bp);
cda2cab0
VV
13373 bnxt_dl_register(bp);
13374
7809592d
MC
13375 rc = register_netdev(dev);
13376 if (rc)
cda2cab0 13377 goto init_err_cleanup;
7809592d 13378
cda2cab0
VV
13379 if (BNXT_PF(bp))
13380 devlink_port_type_eth_set(&bp->dl_port, bp->dev);
7e334fc8 13381 bnxt_dl_fw_reporters_create(bp);
4ab0c6a8 13382
c0c050c5
MC
13383 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
13384 board_info[ent->driver_data].name,
13385 (long)pci_resource_start(pdev, 0), dev->dev_addr);
af125b75 13386 pcie_print_link_status(pdev);
90c4f788 13387
df3875ec 13388 pci_save_state(pdev);
c0c050c5
MC
13389 return 0;
13390
cda2cab0
VV
13391init_err_cleanup:
13392 bnxt_dl_unregister(bp);
2ae7408f 13393 bnxt_shutdown_tc(bp);
7809592d
MC
13394 bnxt_clear_int_mode(bp);
13395
17086399 13396init_err_pci_clean:
bdb38602 13397 bnxt_hwrm_func_drv_unrgtr(bp);
a2bf74f4 13398 bnxt_free_hwrm_resources(bp);
03400aaa 13399 bnxt_ethtool_free(bp);
a521c8a0 13400 bnxt_ptp_clear(bp);
ae5c42f0
MC
13401 kfree(bp->ptp_cfg);
13402 bp->ptp_cfg = NULL;
07f83d72
MC
13403 kfree(bp->fw_health);
13404 bp->fw_health = NULL;
17086399 13405 bnxt_cleanup_pci(bp);
62bfb932
MC
13406 bnxt_free_ctx_mem(bp);
13407 kfree(bp->ctx);
13408 bp->ctx = NULL;
1667cbf6
MC
13409 kfree(bp->rss_indir_tbl);
13410 bp->rss_indir_tbl = NULL;
c0c050c5
MC
13411
13412init_err_free:
13413 free_netdev(dev);
13414 return rc;
13415}
13416
d196ece7
MC
13417static void bnxt_shutdown(struct pci_dev *pdev)
13418{
13419 struct net_device *dev = pci_get_drvdata(pdev);
13420 struct bnxt *bp;
13421
13422 if (!dev)
13423 return;
13424
13425 rtnl_lock();
13426 bp = netdev_priv(dev);
13427 if (!bp)
13428 goto shutdown_exit;
13429
13430 if (netif_running(dev))
13431 dev_close(dev);
13432
a7f3f939 13433 bnxt_ulp_shutdown(bp);
5567ae4a
VV
13434 bnxt_clear_int_mode(bp);
13435 pci_disable_device(pdev);
a7f3f939 13436
d196ece7 13437 if (system_state == SYSTEM_POWER_OFF) {
d196ece7
MC
13438 pci_wake_from_d3(pdev, bp->wol);
13439 pci_set_power_state(pdev, PCI_D3hot);
13440 }
13441
13442shutdown_exit:
13443 rtnl_unlock();
13444}
13445
f65a2044
MC
13446#ifdef CONFIG_PM_SLEEP
13447static int bnxt_suspend(struct device *device)
13448{
f521eaa9 13449 struct net_device *dev = dev_get_drvdata(device);
f65a2044
MC
13450 struct bnxt *bp = netdev_priv(dev);
13451 int rc = 0;
13452
13453 rtnl_lock();
6a68749d 13454 bnxt_ulp_stop(bp);
f65a2044
MC
13455 if (netif_running(dev)) {
13456 netif_device_detach(dev);
13457 rc = bnxt_close(dev);
13458 }
13459 bnxt_hwrm_func_drv_unrgtr(bp);
ef02af8c 13460 pci_disable_device(bp->pdev);
f9b69d7f
VV
13461 bnxt_free_ctx_mem(bp);
13462 kfree(bp->ctx);
13463 bp->ctx = NULL;
f65a2044
MC
13464 rtnl_unlock();
13465 return rc;
13466}
13467
13468static int bnxt_resume(struct device *device)
13469{
f521eaa9 13470 struct net_device *dev = dev_get_drvdata(device);
f65a2044
MC
13471 struct bnxt *bp = netdev_priv(dev);
13472 int rc = 0;
13473
13474 rtnl_lock();
ef02af8c
MC
13475 rc = pci_enable_device(bp->pdev);
13476 if (rc) {
13477 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
13478 rc);
13479 goto resume_exit;
13480 }
13481 pci_set_master(bp->pdev);
f92335d8 13482 if (bnxt_hwrm_ver_get(bp)) {
f65a2044
MC
13483 rc = -ENODEV;
13484 goto resume_exit;
13485 }
13486 rc = bnxt_hwrm_func_reset(bp);
13487 if (rc) {
13488 rc = -EBUSY;
13489 goto resume_exit;
13490 }
f92335d8 13491
2084ccf6
MC
13492 rc = bnxt_hwrm_func_qcaps(bp);
13493 if (rc)
f9b69d7f 13494 goto resume_exit;
f92335d8
VV
13495
13496 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
13497 rc = -ENODEV;
13498 goto resume_exit;
13499 }
13500
f65a2044
MC
13501 bnxt_get_wol_settings(bp);
13502 if (netif_running(dev)) {
13503 rc = bnxt_open(dev);
13504 if (!rc)
13505 netif_device_attach(dev);
13506 }
13507
13508resume_exit:
6a68749d 13509 bnxt_ulp_start(bp, rc);
59ae2101
MC
13510 if (!rc)
13511 bnxt_reenable_sriov(bp);
f65a2044
MC
13512 rtnl_unlock();
13513 return rc;
13514}
13515
13516static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
13517#define BNXT_PM_OPS (&bnxt_pm_ops)
13518
13519#else
13520
13521#define BNXT_PM_OPS NULL
13522
13523#endif /* CONFIG_PM_SLEEP */
13524
6316ea6d
SB
13525/**
13526 * bnxt_io_error_detected - called when PCI error is detected
13527 * @pdev: Pointer to PCI device
13528 * @state: The current pci connection state
13529 *
13530 * This function is called after a PCI bus error affecting
13531 * this device has been detected.
13532 */
13533static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
13534 pci_channel_state_t state)
13535{
13536 struct net_device *netdev = pci_get_drvdata(pdev);
a588e458 13537 struct bnxt *bp = netdev_priv(netdev);
6316ea6d
SB
13538
13539 netdev_info(netdev, "PCI I/O error detected\n");
13540
13541 rtnl_lock();
13542 netif_device_detach(netdev);
13543
a588e458
MC
13544 bnxt_ulp_stop(bp);
13545
6316ea6d
SB
13546 if (state == pci_channel_io_perm_failure) {
13547 rtnl_unlock();
13548 return PCI_ERS_RESULT_DISCONNECT;
13549 }
13550
f75d9a0a
VV
13551 if (state == pci_channel_io_frozen)
13552 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
13553
6316ea6d
SB
13554 if (netif_running(netdev))
13555 bnxt_close(netdev);
13556
c81cfb62
KA
13557 if (pci_is_enabled(pdev))
13558 pci_disable_device(pdev);
6e2f8388
MC
13559 bnxt_free_ctx_mem(bp);
13560 kfree(bp->ctx);
13561 bp->ctx = NULL;
6316ea6d
SB
13562 rtnl_unlock();
13563
13564 /* Request a slot slot reset. */
13565 return PCI_ERS_RESULT_NEED_RESET;
13566}
13567
13568/**
13569 * bnxt_io_slot_reset - called after the pci bus has been reset.
13570 * @pdev: Pointer to PCI device
13571 *
13572 * Restart the card from scratch, as if from a cold-boot.
13573 * At this point, the card has exprienced a hard reset,
13574 * followed by fixups by BIOS, and has its config space
13575 * set up identically to what it was at cold boot.
13576 */
13577static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
13578{
fb1e6e56 13579 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
6316ea6d
SB
13580 struct net_device *netdev = pci_get_drvdata(pdev);
13581 struct bnxt *bp = netdev_priv(netdev);
f75d9a0a 13582 int err = 0, off;
6316ea6d
SB
13583
13584 netdev_info(bp->dev, "PCI Slot Reset\n");
13585
13586 rtnl_lock();
13587
13588 if (pci_enable_device(pdev)) {
13589 dev_err(&pdev->dev,
13590 "Cannot re-enable PCI device after reset.\n");
13591 } else {
13592 pci_set_master(pdev);
f75d9a0a
VV
13593 /* Upon fatal error, our device internal logic that latches to
13594 * BAR value is getting reset and will restore only upon
13595 * rewritting the BARs.
13596 *
13597 * As pci_restore_state() does not re-write the BARs if the
13598 * value is same as saved value earlier, driver needs to
13599 * write the BARs to 0 to force restore, in case of fatal error.
13600 */
13601 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
13602 &bp->state)) {
13603 for (off = PCI_BASE_ADDRESS_0;
13604 off <= PCI_BASE_ADDRESS_5; off += 4)
13605 pci_write_config_dword(bp->pdev, off, 0);
13606 }
df3875ec
VV
13607 pci_restore_state(pdev);
13608 pci_save_state(pdev);
6316ea6d 13609
aa8ed021 13610 err = bnxt_hwrm_func_reset(bp);
fb1e6e56 13611 if (!err)
6e2f8388 13612 result = PCI_ERS_RESULT_RECOVERED;
bae361c5 13613 }
6316ea6d
SB
13614
13615 rtnl_unlock();
13616
bae361c5 13617 return result;
6316ea6d
SB
13618}
13619
13620/**
13621 * bnxt_io_resume - called when traffic can start flowing again.
13622 * @pdev: Pointer to PCI device
13623 *
13624 * This callback is called when the error recovery driver tells
13625 * us that its OK to resume normal operation.
13626 */
13627static void bnxt_io_resume(struct pci_dev *pdev)
13628{
13629 struct net_device *netdev = pci_get_drvdata(pdev);
fb1e6e56
VV
13630 struct bnxt *bp = netdev_priv(netdev);
13631 int err;
6316ea6d 13632
fb1e6e56 13633 netdev_info(bp->dev, "PCI Slot Resume\n");
6316ea6d
SB
13634 rtnl_lock();
13635
fb1e6e56
VV
13636 err = bnxt_hwrm_func_qcaps(bp);
13637 if (!err && netif_running(netdev))
13638 err = bnxt_open(netdev);
13639
13640 bnxt_ulp_start(bp, err);
13641 if (!err) {
13642 bnxt_reenable_sriov(bp);
13643 netif_device_attach(netdev);
13644 }
6316ea6d
SB
13645
13646 rtnl_unlock();
13647}
13648
13649static const struct pci_error_handlers bnxt_err_handler = {
13650 .error_detected = bnxt_io_error_detected,
13651 .slot_reset = bnxt_io_slot_reset,
13652 .resume = bnxt_io_resume
13653};
13654
c0c050c5
MC
13655static struct pci_driver bnxt_pci_driver = {
13656 .name = DRV_MODULE_NAME,
13657 .id_table = bnxt_pci_tbl,
13658 .probe = bnxt_init_one,
13659 .remove = bnxt_remove_one,
d196ece7 13660 .shutdown = bnxt_shutdown,
f65a2044 13661 .driver.pm = BNXT_PM_OPS,
6316ea6d 13662 .err_handler = &bnxt_err_handler,
c0c050c5
MC
13663#if defined(CONFIG_BNXT_SRIOV)
13664 .sriov_configure = bnxt_sriov_configure,
13665#endif
13666};
13667
c213eae8
MC
13668static int __init bnxt_init(void)
13669{
cabfb09d 13670 bnxt_debug_init();
c213eae8
MC
13671 return pci_register_driver(&bnxt_pci_driver);
13672}
13673
13674static void __exit bnxt_exit(void)
13675{
13676 pci_unregister_driver(&bnxt_pci_driver);
13677 if (bnxt_pf_wq)
13678 destroy_workqueue(bnxt_pf_wq);
cabfb09d 13679 bnxt_debug_exit();
c213eae8
MC
13680}
13681
13682module_init(bnxt_init);
13683module_exit(bnxt_exit);