]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/broadcom/bnxt/bnxt.c
bnxt_en: support multiple HWRM commands in flight
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
CommitLineData
c0c050c5
MC
1/* Broadcom NetXtreme-C/E network driver.
2 *
11f15ed3 3 * Copyright (c) 2014-2016 Broadcom Corporation
c6cc32a2 4 * Copyright (c) 2016-2019 Broadcom Limited
c0c050c5
MC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12
13#include <linux/stringify.h>
14#include <linux/kernel.h>
15#include <linux/timer.h>
16#include <linux/errno.h>
17#include <linux/ioport.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/interrupt.h>
21#include <linux/pci.h>
22#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/skbuff.h>
25#include <linux/dma-mapping.h>
26#include <linux/bitops.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29#include <linux/delay.h>
30#include <asm/byteorder.h>
31#include <asm/page.h>
32#include <linux/time.h>
33#include <linux/mii.h>
0ca12be9 34#include <linux/mdio.h>
c0c050c5
MC
35#include <linux/if.h>
36#include <linux/if_vlan.h>
32e8239c 37#include <linux/if_bridge.h>
5ac67d8b 38#include <linux/rtc.h>
c6d30e83 39#include <linux/bpf.h>
c0c050c5
MC
40#include <net/ip.h>
41#include <net/tcp.h>
42#include <net/udp.h>
43#include <net/checksum.h>
44#include <net/ip6_checksum.h>
ad51b8e9 45#include <net/udp_tunnel.h>
c0c050c5
MC
46#include <linux/workqueue.h>
47#include <linux/prefetch.h>
48#include <linux/cache.h>
49#include <linux/log2.h>
50#include <linux/aer.h>
51#include <linux/bitmap.h>
ae5c42f0
MC
52#include <linux/ptp_clock_kernel.h>
53#include <linux/timecounter.h>
c0c050c5 54#include <linux/cpu_rmap.h>
56f0fd80 55#include <linux/cpumask.h>
2ae7408f 56#include <net/pkt_cls.h>
cde49a42
VV
57#include <linux/hwmon.h>
58#include <linux/hwmon-sysfs.h>
322b87ca 59#include <net/page_pool.h>
c0c050c5
MC
60
61#include "bnxt_hsi.h"
62#include "bnxt.h"
3c8c20db 63#include "bnxt_hwrm.h"
a588e458 64#include "bnxt_ulp.h"
c0c050c5
MC
65#include "bnxt_sriov.h"
66#include "bnxt_ethtool.h"
7df4ae9f 67#include "bnxt_dcb.h"
c6d30e83 68#include "bnxt_xdp.h"
ae5c42f0 69#include "bnxt_ptp.h"
4ab0c6a8 70#include "bnxt_vfr.h"
2ae7408f 71#include "bnxt_tc.h"
3c467bf3 72#include "bnxt_devlink.h"
cabfb09d 73#include "bnxt_debugfs.h"
c0c050c5
MC
74
75#define BNXT_TX_TIMEOUT (5 * HZ)
e8d8c5d8
JK
76#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
77 NETIF_MSG_TX_ERR)
c0c050c5 78
c0c050c5
MC
79MODULE_LICENSE("GPL");
80MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
c0c050c5
MC
81
82#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
83#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
84#define BNXT_RX_COPY_THRESH 256
85
4419dbe6 86#define BNXT_TX_PUSH_THRESH 164
c0c050c5
MC
87
88enum board_idx {
fbc9a523 89 BCM57301,
c0c050c5
MC
90 BCM57302,
91 BCM57304,
1f681688 92 BCM57417_NPAR,
fa853dda 93 BCM58700,
b24eb6ae
MC
94 BCM57311,
95 BCM57312,
fbc9a523 96 BCM57402,
c0c050c5
MC
97 BCM57404,
98 BCM57406,
1f681688
MC
99 BCM57402_NPAR,
100 BCM57407,
b24eb6ae
MC
101 BCM57412,
102 BCM57414,
103 BCM57416,
104 BCM57417,
1f681688 105 BCM57412_NPAR,
5049e33b 106 BCM57314,
1f681688
MC
107 BCM57417_SFP,
108 BCM57416_SFP,
109 BCM57404_NPAR,
110 BCM57406_NPAR,
111 BCM57407_SFP,
adbc8305 112 BCM57407_NPAR,
1f681688
MC
113 BCM57414_NPAR,
114 BCM57416_NPAR,
32b40798
DK
115 BCM57452,
116 BCM57454,
92abef36 117 BCM5745x_NPAR,
1ab968d2 118 BCM57508,
c6cc32a2 119 BCM57504,
51fec80d 120 BCM57502,
49c98421
MC
121 BCM57508_NPAR,
122 BCM57504_NPAR,
123 BCM57502_NPAR,
4a58139b 124 BCM58802,
8ed693b7 125 BCM58804,
4a58139b 126 BCM58808,
adbc8305
MC
127 NETXTREME_E_VF,
128 NETXTREME_C_VF,
618784e3 129 NETXTREME_S_VF,
7fbf359b
MC
130 NETXTREME_C_VF_HV,
131 NETXTREME_E_VF_HV,
b16b6891 132 NETXTREME_E_P5_VF,
7fbf359b 133 NETXTREME_E_P5_VF_HV,
c0c050c5
MC
134};
135
136/* indexed by enum above */
137static const struct {
138 char *name;
139} board_info[] = {
27573a7d
SB
140 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
141 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
142 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
143 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
144 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
145 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
146 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
147 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
148 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
149 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
150 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
151 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
152 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
153 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
154 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
155 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
156 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
157 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
158 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
159 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
160 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
161 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
162 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
163 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
164 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
165 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
166 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
167 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
92abef36 168 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
1ab968d2 169 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
c6cc32a2 170 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
51fec80d 171 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
49c98421
MC
172 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
173 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
174 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
27573a7d 175 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
8ed693b7 176 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
27573a7d
SB
177 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
178 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
179 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
618784e3 180 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
7fbf359b
MC
181 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
182 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
b16b6891 183 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
7fbf359b 184 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
c0c050c5
MC
185};
186
187static const struct pci_device_id bnxt_pci_tbl[] = {
92abef36
VV
188 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
189 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
4a58139b 190 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
adbc8305 191 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
fbc9a523 192 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
c0c050c5
MC
193 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
194 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
1f681688 195 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
fa853dda 196 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
b24eb6ae
MC
197 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
198 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
fbc9a523 199 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
c0c050c5
MC
200 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
201 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
1f681688
MC
202 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
203 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
b24eb6ae
MC
204 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
205 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
206 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
207 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
1f681688 208 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
5049e33b 209 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
1f681688
MC
210 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
211 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
212 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
213 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
214 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
adbc8305
MC
215 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
216 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
1f681688 217 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
adbc8305 218 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
1f681688 219 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
adbc8305 220 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
4a58139b 221 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
32b40798 222 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
1ab968d2 223 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
c6cc32a2 224 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
51fec80d 225 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
49c98421
MC
226 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
227 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
228 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
229 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
230 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
231 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
4a58139b 232 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
8ed693b7 233 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
c0c050c5 234#ifdef CONFIG_BNXT_SRIOV
c7ef35eb 235 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
7fbf359b
MC
236 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
237 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
c7ef35eb 238 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
7fbf359b 239 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
adbc8305 240 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
7fbf359b
MC
241 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
242 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
243 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
244 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
adbc8305
MC
245 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
246 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
247 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
248 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
249 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
7fbf359b 250 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
51fec80d 251 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
b16b6891 252 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
7fbf359b
MC
253 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
254 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
618784e3 255 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
c0c050c5
MC
256#endif
257 { 0 }
258};
259
260MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
261
262static const u16 bnxt_vf_req_snif[] = {
263 HWRM_FUNC_CFG,
91cdda40 264 HWRM_FUNC_VF_CFG,
c0c050c5
MC
265 HWRM_PORT_PHY_QCFG,
266 HWRM_CFA_L2_FILTER_ALLOC,
267};
268
25be8623 269static const u16 bnxt_async_events_arr[] = {
87c374de 270 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
b1613e78 271 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
87c374de
MC
272 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
273 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
274 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
275 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
b1613e78 276 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
2151fe08 277 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
7e914027 278 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
a44daa8f 279 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
68f684e2 280 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
8d4bd96b 281 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
df97b34d 282 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
099fdeda 283 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
abf90ac2 284 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
25be8623
MC
285};
286
c213eae8
MC
287static struct workqueue_struct *bnxt_pf_wq;
288
c0c050c5
MC
289static bool bnxt_vf_pciid(enum board_idx idx)
290{
618784e3 291 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
7fbf359b 292 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
ab21494b
AG
293 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
294 idx == NETXTREME_E_P5_VF_HV);
c0c050c5
MC
295}
296
297#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
298#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
299#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
300
c0c050c5
MC
301#define BNXT_CP_DB_IRQ_DIS(db) \
302 writel(DB_CP_IRQ_DIS_FLAGS, db)
303
697197e5
MC
304#define BNXT_DB_CQ(db, idx) \
305 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
306
307#define BNXT_DB_NQ_P5(db, idx) \
308 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
309
310#define BNXT_DB_CQ_ARM(db, idx) \
311 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
312
313#define BNXT_DB_NQ_ARM_P5(db, idx) \
314 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
315
316static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
317{
318 if (bp->flags & BNXT_FLAG_CHIP_P5)
319 BNXT_DB_NQ_P5(db, idx);
320 else
321 BNXT_DB_CQ(db, idx);
322}
323
324static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
325{
326 if (bp->flags & BNXT_FLAG_CHIP_P5)
327 BNXT_DB_NQ_ARM_P5(db, idx);
328 else
329 BNXT_DB_CQ_ARM(db, idx);
330}
331
332static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
333{
334 if (bp->flags & BNXT_FLAG_CHIP_P5)
335 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
336 db->doorbell);
337 else
338 BNXT_DB_CQ(db, idx);
339}
340
38413406 341const u16 bnxt_lhint_arr[] = {
c0c050c5
MC
342 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
343 TX_BD_FLAGS_LHINT_512_TO_1023,
344 TX_BD_FLAGS_LHINT_1024_TO_2047,
345 TX_BD_FLAGS_LHINT_1024_TO_2047,
346 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
347 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
348 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
349 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
350 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
351 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
352 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
353 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
354 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
355 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
356 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
357 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
358 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
359 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
360 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
361};
362
ee5c7fb3
SP
363static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
364{
365 struct metadata_dst *md_dst = skb_metadata_dst(skb);
366
367 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
368 return 0;
369
370 return md_dst->u.port_info.port_id;
371}
372
e8d8c5d8
JK
373static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
374 u16 prod)
375{
376 bnxt_db_write(bp, &txr->tx_db, prod);
377 txr->kick_pending = 0;
378}
379
3c603136
JK
380static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
381 struct bnxt_tx_ring_info *txr,
382 struct netdev_queue *txq)
383{
384 netif_tx_stop_queue(txq);
385
386 /* netif_tx_stop_queue() must be done before checking
387 * tx index in bnxt_tx_avail() below, because in
388 * bnxt_tx_int(), we update tx index before checking for
389 * netif_tx_queue_stopped().
390 */
391 smp_mb();
392 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
393 netif_tx_wake_queue(txq);
394 return false;
395 }
396
397 return true;
398}
399
c0c050c5
MC
400static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
401{
402 struct bnxt *bp = netdev_priv(dev);
403 struct tx_bd *txbd;
404 struct tx_bd_ext *txbd1;
405 struct netdev_queue *txq;
406 int i;
407 dma_addr_t mapping;
408 unsigned int length, pad = 0;
409 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
410 u16 prod, last_frag;
411 struct pci_dev *pdev = bp->pdev;
c0c050c5
MC
412 struct bnxt_tx_ring_info *txr;
413 struct bnxt_sw_tx_bd *tx_buf;
dade5e15 414 __le32 lflags = 0;
c0c050c5
MC
415
416 i = skb_get_queue_mapping(skb);
417 if (unlikely(i >= bp->tx_nr_rings)) {
418 dev_kfree_skb_any(skb);
fb9f7190 419 atomic_long_inc(&dev->tx_dropped);
c0c050c5
MC
420 return NETDEV_TX_OK;
421 }
422
c0c050c5 423 txq = netdev_get_tx_queue(dev, i);
a960dec9 424 txr = &bp->tx_ring[bp->tx_ring_map[i]];
c0c050c5
MC
425 prod = txr->tx_prod;
426
427 free_size = bnxt_tx_avail(bp, txr);
428 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
e8d8c5d8
JK
429 /* We must have raced with NAPI cleanup */
430 if (net_ratelimit() && txr->kick_pending)
431 netif_warn(bp, tx_err, dev,
432 "bnxt: ring busy w/ flush pending!\n");
3c603136
JK
433 if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
434 return NETDEV_TX_BUSY;
c0c050c5
MC
435 }
436
437 length = skb->len;
438 len = skb_headlen(skb);
439 last_frag = skb_shinfo(skb)->nr_frags;
440
441 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
442
443 txbd->tx_bd_opaque = prod;
444
445 tx_buf = &txr->tx_buf_ring[prod];
446 tx_buf->skb = skb;
447 tx_buf->nr_frags = last_frag;
448
449 vlan_tag_flags = 0;
ee5c7fb3 450 cfa_action = bnxt_xmit_get_cfa_action(skb);
c0c050c5
MC
451 if (skb_vlan_tag_present(skb)) {
452 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
453 skb_vlan_tag_get(skb);
454 /* Currently supports 8021Q, 8021AD vlan offloads
455 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
456 */
457 if (skb->vlan_proto == htons(ETH_P_8021Q))
458 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
459 }
460
83bb623c
PC
461 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
462 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
463
464 if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
465 atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
9e266807
MC
466 if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
467 &ptp->tx_hdr_off)) {
468 if (vlan_tag_flags)
469 ptp->tx_hdr_off += VLAN_HLEN;
83bb623c
PC
470 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
471 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
472 } else {
473 atomic_inc(&bp->ptp_cfg->tx_avail);
474 }
475 }
dade5e15
MC
476 }
477
83bb623c
PC
478 if (unlikely(skb->no_fcs))
479 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
480
481 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
482 !lflags) {
4419dbe6
MC
483 struct tx_push_buffer *tx_push_buf = txr->tx_push;
484 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
485 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
697197e5 486 void __iomem *db = txr->tx_db.doorbell;
4419dbe6
MC
487 void *pdata = tx_push_buf->data;
488 u64 *end;
489 int j, push_len;
c0c050c5
MC
490
491 /* Set COAL_NOW to be ready quickly for the next push */
492 tx_push->tx_bd_len_flags_type =
493 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
494 TX_BD_TYPE_LONG_TX_BD |
495 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
496 TX_BD_FLAGS_COAL_NOW |
497 TX_BD_FLAGS_PACKET_END |
498 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
499
500 if (skb->ip_summed == CHECKSUM_PARTIAL)
501 tx_push1->tx_bd_hsize_lflags =
502 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
503 else
504 tx_push1->tx_bd_hsize_lflags = 0;
505
506 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
ee5c7fb3
SP
507 tx_push1->tx_bd_cfa_action =
508 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
c0c050c5 509
fbb0fa8b
MC
510 end = pdata + length;
511 end = PTR_ALIGN(end, 8) - 1;
4419dbe6
MC
512 *end = 0;
513
c0c050c5
MC
514 skb_copy_from_linear_data(skb, pdata, len);
515 pdata += len;
516 for (j = 0; j < last_frag; j++) {
517 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
518 void *fptr;
519
520 fptr = skb_frag_address_safe(frag);
521 if (!fptr)
522 goto normal_tx;
523
524 memcpy(pdata, fptr, skb_frag_size(frag));
525 pdata += skb_frag_size(frag);
526 }
527
4419dbe6
MC
528 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
529 txbd->tx_bd_haddr = txr->data_mapping;
c0c050c5
MC
530 prod = NEXT_TX(prod);
531 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
532 memcpy(txbd, tx_push1, sizeof(*txbd));
533 prod = NEXT_TX(prod);
4419dbe6 534 tx_push->doorbell =
c0c050c5
MC
535 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
536 txr->tx_prod = prod;
537
b9a8460a 538 tx_buf->is_push = 1;
c0c050c5 539 netdev_tx_sent_queue(txq, skb->len);
b9a8460a 540 wmb(); /* Sync is_push and byte queue before pushing data */
c0c050c5 541
4419dbe6
MC
542 push_len = (length + sizeof(*tx_push) + 7) / 8;
543 if (push_len > 16) {
697197e5
MC
544 __iowrite64_copy(db, tx_push_buf, 16);
545 __iowrite32_copy(db + 4, tx_push_buf + 1,
9d13744b 546 (push_len - 16) << 1);
4419dbe6 547 } else {
697197e5 548 __iowrite64_copy(db, tx_push_buf, push_len);
4419dbe6 549 }
c0c050c5 550
c0c050c5
MC
551 goto tx_done;
552 }
553
554normal_tx:
555 if (length < BNXT_MIN_PKT_SIZE) {
556 pad = BNXT_MIN_PKT_SIZE - length;
e8d8c5d8 557 if (skb_pad(skb, pad))
c0c050c5 558 /* SKB already freed. */
e8d8c5d8 559 goto tx_kick_pending;
c0c050c5
MC
560 length = BNXT_MIN_PKT_SIZE;
561 }
562
563 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
564
e8d8c5d8
JK
565 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
566 goto tx_free;
c0c050c5
MC
567
568 dma_unmap_addr_set(tx_buf, mapping, mapping);
569 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
570 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
571
572 txbd->tx_bd_haddr = cpu_to_le64(mapping);
573
574 prod = NEXT_TX(prod);
575 txbd1 = (struct tx_bd_ext *)
576 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
577
dade5e15 578 txbd1->tx_bd_hsize_lflags = lflags;
c0c050c5
MC
579 if (skb_is_gso(skb)) {
580 u32 hdr_len;
581
582 if (skb->encapsulation)
583 hdr_len = skb_inner_network_offset(skb) +
584 skb_inner_network_header_len(skb) +
585 inner_tcp_hdrlen(skb);
586 else
587 hdr_len = skb_transport_offset(skb) +
588 tcp_hdrlen(skb);
589
dade5e15 590 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
c0c050c5
MC
591 TX_BD_FLAGS_T_IPID |
592 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
593 length = skb_shinfo(skb)->gso_size;
594 txbd1->tx_bd_mss = cpu_to_le32(length);
595 length += hdr_len;
596 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
dade5e15 597 txbd1->tx_bd_hsize_lflags |=
c0c050c5
MC
598 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
599 txbd1->tx_bd_mss = 0;
600 }
601
602 length >>= 9;
2b3c6885
MC
603 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
604 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
605 skb->len);
606 i = 0;
607 goto tx_dma_error;
608 }
c0c050c5
MC
609 flags |= bnxt_lhint_arr[length];
610 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
611
612 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
ee5c7fb3
SP
613 txbd1->tx_bd_cfa_action =
614 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
c0c050c5
MC
615 for (i = 0; i < last_frag; i++) {
616 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
617
618 prod = NEXT_TX(prod);
619 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
620
621 len = skb_frag_size(frag);
622 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
623 DMA_TO_DEVICE);
624
625 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
626 goto tx_dma_error;
627
628 tx_buf = &txr->tx_buf_ring[prod];
629 dma_unmap_addr_set(tx_buf, mapping, mapping);
630
631 txbd->tx_bd_haddr = cpu_to_le64(mapping);
632
633 flags = len << TX_BD_LEN_SHIFT;
634 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
635 }
636
637 flags &= ~TX_BD_LEN;
638 txbd->tx_bd_len_flags_type =
639 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
640 TX_BD_FLAGS_PACKET_END);
641
642 netdev_tx_sent_queue(txq, skb->len);
643
83bb623c
PC
644 skb_tx_timestamp(skb);
645
c0c050c5
MC
646 /* Sync BD data before updating doorbell */
647 wmb();
648
649 prod = NEXT_TX(prod);
650 txr->tx_prod = prod;
651
6b16f9ee 652 if (!netdev_xmit_more() || netif_xmit_stopped(txq))
e8d8c5d8
JK
653 bnxt_txr_db_kick(bp, txr, prod);
654 else
655 txr->kick_pending = 1;
c0c050c5
MC
656
657tx_done:
658
c0c050c5 659 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
6b16f9ee 660 if (netdev_xmit_more() && !tx_buf->is_push)
e8d8c5d8 661 bnxt_txr_db_kick(bp, txr, prod);
c0c050c5 662
3c603136 663 bnxt_txr_netif_try_stop_queue(bp, txr, txq);
c0c050c5
MC
664 }
665 return NETDEV_TX_OK;
666
667tx_dma_error:
83bb623c
PC
668 if (BNXT_TX_PTP_IS_SET(lflags))
669 atomic_inc(&bp->ptp_cfg->tx_avail);
670
c0c050c5
MC
671 last_frag = i;
672
673 /* start back at beginning and unmap skb */
674 prod = txr->tx_prod;
675 tx_buf = &txr->tx_buf_ring[prod];
c0c050c5 676 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
df70303d 677 skb_headlen(skb), DMA_TO_DEVICE);
c0c050c5
MC
678 prod = NEXT_TX(prod);
679
680 /* unmap remaining mapped pages */
681 for (i = 0; i < last_frag; i++) {
682 prod = NEXT_TX(prod);
683 tx_buf = &txr->tx_buf_ring[prod];
684 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
685 skb_frag_size(&skb_shinfo(skb)->frags[i]),
df70303d 686 DMA_TO_DEVICE);
c0c050c5
MC
687 }
688
e8d8c5d8 689tx_free:
c0c050c5 690 dev_kfree_skb_any(skb);
e8d8c5d8
JK
691tx_kick_pending:
692 if (txr->kick_pending)
693 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
694 txr->tx_buf_ring[txr->tx_prod].skb = NULL;
fb9f7190 695 atomic_long_inc(&dev->tx_dropped);
c0c050c5
MC
696 return NETDEV_TX_OK;
697}
698
699static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
700{
b6ab4b01 701 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
a960dec9 702 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
c0c050c5
MC
703 u16 cons = txr->tx_cons;
704 struct pci_dev *pdev = bp->pdev;
705 int i;
706 unsigned int tx_bytes = 0;
707
708 for (i = 0; i < nr_pkts; i++) {
709 struct bnxt_sw_tx_bd *tx_buf;
83bb623c 710 bool compl_deferred = false;
c0c050c5
MC
711 struct sk_buff *skb;
712 int j, last;
713
714 tx_buf = &txr->tx_buf_ring[cons];
715 cons = NEXT_TX(cons);
716 skb = tx_buf->skb;
717 tx_buf->skb = NULL;
718
719 if (tx_buf->is_push) {
720 tx_buf->is_push = 0;
721 goto next_tx_int;
722 }
723
724 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
df70303d 725 skb_headlen(skb), DMA_TO_DEVICE);
c0c050c5
MC
726 last = tx_buf->nr_frags;
727
728 for (j = 0; j < last; j++) {
729 cons = NEXT_TX(cons);
730 tx_buf = &txr->tx_buf_ring[cons];
731 dma_unmap_page(
732 &pdev->dev,
733 dma_unmap_addr(tx_buf, mapping),
734 skb_frag_size(&skb_shinfo(skb)->frags[j]),
df70303d 735 DMA_TO_DEVICE);
c0c050c5 736 }
83bb623c
PC
737 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
738 if (bp->flags & BNXT_FLAG_CHIP_P5) {
739 if (!bnxt_get_tx_ts_p5(bp, skb))
740 compl_deferred = true;
741 else
742 atomic_inc(&bp->ptp_cfg->tx_avail);
743 }
744 }
c0c050c5
MC
745
746next_tx_int:
747 cons = NEXT_TX(cons);
748
749 tx_bytes += skb->len;
83bb623c
PC
750 if (!compl_deferred)
751 dev_kfree_skb_any(skb);
c0c050c5
MC
752 }
753
754 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
755 txr->tx_cons = cons;
756
757 /* Need to make the tx_cons update visible to bnxt_start_xmit()
758 * before checking for netif_tx_queue_stopped(). Without the
759 * memory barrier, there is a small possibility that bnxt_start_xmit()
760 * will miss it and cause the queue to be stopped forever.
761 */
762 smp_mb();
763
764 if (unlikely(netif_tx_queue_stopped(txq)) &&
3c603136
JK
765 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
766 READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
767 netif_tx_wake_queue(txq);
c0c050c5
MC
768}
769
c61fb99c 770static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
322b87ca 771 struct bnxt_rx_ring_info *rxr,
c61fb99c
MC
772 gfp_t gfp)
773{
774 struct device *dev = &bp->pdev->dev;
775 struct page *page;
776
322b87ca 777 page = page_pool_dev_alloc_pages(rxr->page_pool);
c61fb99c
MC
778 if (!page)
779 return NULL;
780
c519fe9a
SN
781 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
782 DMA_ATTR_WEAK_ORDERING);
c61fb99c 783 if (dma_mapping_error(dev, *mapping)) {
322b87ca 784 page_pool_recycle_direct(rxr->page_pool, page);
c61fb99c
MC
785 return NULL;
786 }
787 *mapping += bp->rx_dma_offset;
788 return page;
789}
790
c0c050c5
MC
791static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
792 gfp_t gfp)
793{
794 u8 *data;
795 struct pci_dev *pdev = bp->pdev;
796
797 data = kmalloc(bp->rx_buf_size, gfp);
798 if (!data)
799 return NULL;
800
c519fe9a
SN
801 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
802 bp->rx_buf_use_size, bp->rx_dir,
803 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
804
805 if (dma_mapping_error(&pdev->dev, *mapping)) {
806 kfree(data);
807 data = NULL;
808 }
809 return data;
810}
811
38413406
MC
812int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
813 u16 prod, gfp_t gfp)
c0c050c5
MC
814{
815 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
816 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
c0c050c5
MC
817 dma_addr_t mapping;
818
c61fb99c 819 if (BNXT_RX_PAGE_MODE(bp)) {
322b87ca
AG
820 struct page *page =
821 __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
c0c050c5 822
c61fb99c
MC
823 if (!page)
824 return -ENOMEM;
825
826 rx_buf->data = page;
827 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
828 } else {
829 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
830
831 if (!data)
832 return -ENOMEM;
833
834 rx_buf->data = data;
835 rx_buf->data_ptr = data + bp->rx_offset;
836 }
11cd119d 837 rx_buf->mapping = mapping;
c0c050c5
MC
838
839 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
c0c050c5
MC
840 return 0;
841}
842
c6d30e83 843void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
c0c050c5
MC
844{
845 u16 prod = rxr->rx_prod;
846 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
847 struct rx_bd *cons_bd, *prod_bd;
848
849 prod_rx_buf = &rxr->rx_buf_ring[prod];
850 cons_rx_buf = &rxr->rx_buf_ring[cons];
851
852 prod_rx_buf->data = data;
6bb19474 853 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
c0c050c5 854
11cd119d 855 prod_rx_buf->mapping = cons_rx_buf->mapping;
c0c050c5
MC
856
857 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
858 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
859
860 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
861}
862
863static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
864{
865 u16 next, max = rxr->rx_agg_bmap_size;
866
867 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
868 if (next >= max)
869 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
870 return next;
871}
872
873static inline int bnxt_alloc_rx_page(struct bnxt *bp,
874 struct bnxt_rx_ring_info *rxr,
875 u16 prod, gfp_t gfp)
876{
877 struct rx_bd *rxbd =
878 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
879 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
880 struct pci_dev *pdev = bp->pdev;
881 struct page *page;
882 dma_addr_t mapping;
883 u16 sw_prod = rxr->rx_sw_agg_prod;
89d0a06c 884 unsigned int offset = 0;
c0c050c5 885
89d0a06c
MC
886 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
887 page = rxr->rx_page;
888 if (!page) {
889 page = alloc_page(gfp);
890 if (!page)
891 return -ENOMEM;
892 rxr->rx_page = page;
893 rxr->rx_page_offset = 0;
894 }
895 offset = rxr->rx_page_offset;
896 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
897 if (rxr->rx_page_offset == PAGE_SIZE)
898 rxr->rx_page = NULL;
899 else
900 get_page(page);
901 } else {
902 page = alloc_page(gfp);
903 if (!page)
904 return -ENOMEM;
905 }
c0c050c5 906
c519fe9a 907 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
df70303d 908 BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
c519fe9a 909 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
910 if (dma_mapping_error(&pdev->dev, mapping)) {
911 __free_page(page);
912 return -EIO;
913 }
914
915 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
916 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
917
918 __set_bit(sw_prod, rxr->rx_agg_bmap);
919 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
920 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
921
922 rx_agg_buf->page = page;
89d0a06c 923 rx_agg_buf->offset = offset;
c0c050c5
MC
924 rx_agg_buf->mapping = mapping;
925 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
926 rxbd->rx_bd_opaque = sw_prod;
927 return 0;
928}
929
4a228a3a
MC
930static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
931 struct bnxt_cp_ring_info *cpr,
932 u16 cp_cons, u16 curr)
933{
934 struct rx_agg_cmp *agg;
935
936 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
937 agg = (struct rx_agg_cmp *)
938 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
939 return agg;
940}
941
bfcd8d79
MC
942static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
943 struct bnxt_rx_ring_info *rxr,
944 u16 agg_id, u16 curr)
945{
946 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
947
948 return &tpa_info->agg_arr[curr];
949}
950
4a228a3a
MC
951static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
952 u16 start, u32 agg_bufs, bool tpa)
c0c050c5 953{
e44758b7 954 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5 955 struct bnxt *bp = bnapi->bp;
b6ab4b01 956 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
957 u16 prod = rxr->rx_agg_prod;
958 u16 sw_prod = rxr->rx_sw_agg_prod;
bfcd8d79 959 bool p5_tpa = false;
c0c050c5
MC
960 u32 i;
961
bfcd8d79
MC
962 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
963 p5_tpa = true;
964
c0c050c5
MC
965 for (i = 0; i < agg_bufs; i++) {
966 u16 cons;
967 struct rx_agg_cmp *agg;
968 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
969 struct rx_bd *prod_bd;
970 struct page *page;
971
bfcd8d79
MC
972 if (p5_tpa)
973 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
974 else
975 agg = bnxt_get_agg(bp, cpr, idx, start + i);
c0c050c5
MC
976 cons = agg->rx_agg_cmp_opaque;
977 __clear_bit(cons, rxr->rx_agg_bmap);
978
979 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
980 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
981
982 __set_bit(sw_prod, rxr->rx_agg_bmap);
983 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
984 cons_rx_buf = &rxr->rx_agg_ring[cons];
985
986 /* It is possible for sw_prod to be equal to cons, so
987 * set cons_rx_buf->page to NULL first.
988 */
989 page = cons_rx_buf->page;
990 cons_rx_buf->page = NULL;
991 prod_rx_buf->page = page;
89d0a06c 992 prod_rx_buf->offset = cons_rx_buf->offset;
c0c050c5
MC
993
994 prod_rx_buf->mapping = cons_rx_buf->mapping;
995
996 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
997
998 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
999 prod_bd->rx_bd_opaque = sw_prod;
1000
1001 prod = NEXT_RX_AGG(prod);
1002 sw_prod = NEXT_RX_AGG(sw_prod);
c0c050c5
MC
1003 }
1004 rxr->rx_agg_prod = prod;
1005 rxr->rx_sw_agg_prod = sw_prod;
1006}
1007
c61fb99c
MC
1008static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1009 struct bnxt_rx_ring_info *rxr,
1010 u16 cons, void *data, u8 *data_ptr,
1011 dma_addr_t dma_addr,
1012 unsigned int offset_and_len)
1013{
1014 unsigned int payload = offset_and_len >> 16;
1015 unsigned int len = offset_and_len & 0xffff;
d7840976 1016 skb_frag_t *frag;
c61fb99c
MC
1017 struct page *page = data;
1018 u16 prod = rxr->rx_prod;
1019 struct sk_buff *skb;
1020 int off, err;
1021
1022 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1023 if (unlikely(err)) {
1024 bnxt_reuse_rx_data(rxr, cons, data);
1025 return NULL;
1026 }
1027 dma_addr -= bp->rx_dma_offset;
c519fe9a
SN
1028 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
1029 DMA_ATTR_WEAK_ORDERING);
3071c517 1030 page_pool_release_page(rxr->page_pool, page);
c61fb99c
MC
1031
1032 if (unlikely(!payload))
c43f1255 1033 payload = eth_get_headlen(bp->dev, data_ptr, len);
c61fb99c
MC
1034
1035 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1036 if (!skb) {
1037 __free_page(page);
1038 return NULL;
1039 }
1040
1041 off = (void *)data_ptr - page_address(page);
1042 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
1043 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1044 payload + NET_IP_ALIGN);
1045
1046 frag = &skb_shinfo(skb)->frags[0];
1047 skb_frag_size_sub(frag, payload);
b54c9d5b 1048 skb_frag_off_add(frag, payload);
c61fb99c
MC
1049 skb->data_len -= payload;
1050 skb->tail += payload;
1051
1052 return skb;
1053}
1054
c0c050c5
MC
1055static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1056 struct bnxt_rx_ring_info *rxr, u16 cons,
6bb19474
MC
1057 void *data, u8 *data_ptr,
1058 dma_addr_t dma_addr,
1059 unsigned int offset_and_len)
c0c050c5 1060{
6bb19474 1061 u16 prod = rxr->rx_prod;
c0c050c5 1062 struct sk_buff *skb;
6bb19474 1063 int err;
c0c050c5
MC
1064
1065 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1066 if (unlikely(err)) {
1067 bnxt_reuse_rx_data(rxr, cons, data);
1068 return NULL;
1069 }
1070
1071 skb = build_skb(data, 0);
c519fe9a
SN
1072 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1073 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
1074 if (!skb) {
1075 kfree(data);
1076 return NULL;
1077 }
1078
b3dba77c 1079 skb_reserve(skb, bp->rx_offset);
6bb19474 1080 skb_put(skb, offset_and_len & 0xffff);
c0c050c5
MC
1081 return skb;
1082}
1083
e44758b7
MC
1084static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
1085 struct bnxt_cp_ring_info *cpr,
4a228a3a
MC
1086 struct sk_buff *skb, u16 idx,
1087 u32 agg_bufs, bool tpa)
c0c050c5 1088{
e44758b7 1089 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5 1090 struct pci_dev *pdev = bp->pdev;
b6ab4b01 1091 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5 1092 u16 prod = rxr->rx_agg_prod;
bfcd8d79 1093 bool p5_tpa = false;
c0c050c5
MC
1094 u32 i;
1095
bfcd8d79
MC
1096 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1097 p5_tpa = true;
1098
c0c050c5
MC
1099 for (i = 0; i < agg_bufs; i++) {
1100 u16 cons, frag_len;
1101 struct rx_agg_cmp *agg;
1102 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1103 struct page *page;
1104 dma_addr_t mapping;
1105
bfcd8d79
MC
1106 if (p5_tpa)
1107 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1108 else
1109 agg = bnxt_get_agg(bp, cpr, idx, i);
c0c050c5
MC
1110 cons = agg->rx_agg_cmp_opaque;
1111 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1112 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1113
1114 cons_rx_buf = &rxr->rx_agg_ring[cons];
89d0a06c
MC
1115 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1116 cons_rx_buf->offset, frag_len);
c0c050c5
MC
1117 __clear_bit(cons, rxr->rx_agg_bmap);
1118
1119 /* It is possible for bnxt_alloc_rx_page() to allocate
1120 * a sw_prod index that equals the cons index, so we
1121 * need to clear the cons entry now.
1122 */
11cd119d 1123 mapping = cons_rx_buf->mapping;
c0c050c5
MC
1124 page = cons_rx_buf->page;
1125 cons_rx_buf->page = NULL;
1126
1127 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1128 struct skb_shared_info *shinfo;
1129 unsigned int nr_frags;
1130
1131 shinfo = skb_shinfo(skb);
1132 nr_frags = --shinfo->nr_frags;
1133 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1134
1135 dev_kfree_skb(skb);
1136
1137 cons_rx_buf->page = page;
1138
1139 /* Update prod since possibly some pages have been
1140 * allocated already.
1141 */
1142 rxr->rx_agg_prod = prod;
4a228a3a 1143 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
c0c050c5
MC
1144 return NULL;
1145 }
1146
c519fe9a 1147 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
df70303d 1148 DMA_FROM_DEVICE,
c519fe9a 1149 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
1150
1151 skb->data_len += frag_len;
1152 skb->len += frag_len;
1153 skb->truesize += PAGE_SIZE;
1154
1155 prod = NEXT_RX_AGG(prod);
c0c050c5
MC
1156 }
1157 rxr->rx_agg_prod = prod;
1158 return skb;
1159}
1160
1161static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1162 u8 agg_bufs, u32 *raw_cons)
1163{
1164 u16 last;
1165 struct rx_agg_cmp *agg;
1166
1167 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1168 last = RING_CMP(*raw_cons);
1169 agg = (struct rx_agg_cmp *)
1170 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1171 return RX_AGG_CMP_VALID(agg, *raw_cons);
1172}
1173
1174static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1175 unsigned int len,
1176 dma_addr_t mapping)
1177{
1178 struct bnxt *bp = bnapi->bp;
1179 struct pci_dev *pdev = bp->pdev;
1180 struct sk_buff *skb;
1181
1182 skb = napi_alloc_skb(&bnapi->napi, len);
1183 if (!skb)
1184 return NULL;
1185
745fc05c
MC
1186 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1187 bp->rx_dir);
c0c050c5 1188
6bb19474
MC
1189 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1190 len + NET_IP_ALIGN);
c0c050c5 1191
745fc05c
MC
1192 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1193 bp->rx_dir);
c0c050c5
MC
1194
1195 skb_put(skb, len);
1196 return skb;
1197}
1198
e44758b7 1199static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
fa7e2812
MC
1200 u32 *raw_cons, void *cmp)
1201{
fa7e2812
MC
1202 struct rx_cmp *rxcmp = cmp;
1203 u32 tmp_raw_cons = *raw_cons;
1204 u8 cmp_type, agg_bufs = 0;
1205
1206 cmp_type = RX_CMP_TYPE(rxcmp);
1207
1208 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1209 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1210 RX_CMP_AGG_BUFS) >>
1211 RX_CMP_AGG_BUFS_SHIFT;
1212 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1213 struct rx_tpa_end_cmp *tpa_end = cmp;
1214
bfcd8d79
MC
1215 if (bp->flags & BNXT_FLAG_CHIP_P5)
1216 return 0;
1217
4a228a3a 1218 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
fa7e2812
MC
1219 }
1220
1221 if (agg_bufs) {
1222 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1223 return -EBUSY;
1224 }
1225 *raw_cons = tmp_raw_cons;
1226 return 0;
1227}
1228
230d1f0d
MC
1229static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1230{
b148bb23
MC
1231 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1232 return;
1233
230d1f0d
MC
1234 if (BNXT_PF(bp))
1235 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1236 else
1237 schedule_delayed_work(&bp->fw_reset_task, delay);
1238}
1239
c213eae8
MC
1240static void bnxt_queue_sp_work(struct bnxt *bp)
1241{
1242 if (BNXT_PF(bp))
1243 queue_work(bnxt_pf_wq, &bp->sp_task);
1244 else
1245 schedule_work(&bp->sp_task);
1246}
1247
fa7e2812
MC
1248static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1249{
1250 if (!rxr->bnapi->in_reset) {
1251 rxr->bnapi->in_reset = true;
8fbf58e1
MC
1252 if (bp->flags & BNXT_FLAG_CHIP_P5)
1253 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1254 else
1255 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
c213eae8 1256 bnxt_queue_sp_work(bp);
fa7e2812
MC
1257 }
1258 rxr->rx_next_cons = 0xffff;
1259}
1260
ec4d8e7c
MC
1261static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1262{
1263 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1264 u16 idx = agg_id & MAX_TPA_P5_MASK;
1265
1266 if (test_bit(idx, map->agg_idx_bmap))
1267 idx = find_first_zero_bit(map->agg_idx_bmap,
1268 BNXT_AGG_IDX_BMAP_SIZE);
1269 __set_bit(idx, map->agg_idx_bmap);
1270 map->agg_id_tbl[agg_id] = idx;
1271 return idx;
1272}
1273
1274static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1275{
1276 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1277
1278 __clear_bit(idx, map->agg_idx_bmap);
1279}
1280
1281static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1282{
1283 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1284
1285 return map->agg_id_tbl[agg_id];
1286}
1287
c0c050c5
MC
1288static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1289 struct rx_tpa_start_cmp *tpa_start,
1290 struct rx_tpa_start_cmp_ext *tpa_start1)
1291{
c0c050c5 1292 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
bfcd8d79
MC
1293 struct bnxt_tpa_info *tpa_info;
1294 u16 cons, prod, agg_id;
c0c050c5
MC
1295 struct rx_bd *prod_bd;
1296 dma_addr_t mapping;
1297
ec4d8e7c 1298 if (bp->flags & BNXT_FLAG_CHIP_P5) {
bfcd8d79 1299 agg_id = TPA_START_AGG_ID_P5(tpa_start);
ec4d8e7c
MC
1300 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1301 } else {
bfcd8d79 1302 agg_id = TPA_START_AGG_ID(tpa_start);
ec4d8e7c 1303 }
c0c050c5
MC
1304 cons = tpa_start->rx_tpa_start_cmp_opaque;
1305 prod = rxr->rx_prod;
1306 cons_rx_buf = &rxr->rx_buf_ring[cons];
1307 prod_rx_buf = &rxr->rx_buf_ring[prod];
1308 tpa_info = &rxr->rx_tpa[agg_id];
1309
bfcd8d79
MC
1310 if (unlikely(cons != rxr->rx_next_cons ||
1311 TPA_START_ERROR(tpa_start))) {
1312 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1313 cons, rxr->rx_next_cons,
1314 TPA_START_ERROR_CODE(tpa_start1));
fa7e2812
MC
1315 bnxt_sched_reset(bp, rxr);
1316 return;
1317 }
ee5c7fb3
SP
1318 /* Store cfa_code in tpa_info to use in tpa_end
1319 * completion processing.
1320 */
1321 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
c0c050c5 1322 prod_rx_buf->data = tpa_info->data;
6bb19474 1323 prod_rx_buf->data_ptr = tpa_info->data_ptr;
c0c050c5
MC
1324
1325 mapping = tpa_info->mapping;
11cd119d 1326 prod_rx_buf->mapping = mapping;
c0c050c5
MC
1327
1328 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1329
1330 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1331
1332 tpa_info->data = cons_rx_buf->data;
6bb19474 1333 tpa_info->data_ptr = cons_rx_buf->data_ptr;
c0c050c5 1334 cons_rx_buf->data = NULL;
11cd119d 1335 tpa_info->mapping = cons_rx_buf->mapping;
c0c050c5
MC
1336
1337 tpa_info->len =
1338 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1339 RX_TPA_START_CMP_LEN_SHIFT;
1340 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1341 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1342
1343 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1344 tpa_info->gso_type = SKB_GSO_TCPV4;
1345 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
50f011b6 1346 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
c0c050c5
MC
1347 tpa_info->gso_type = SKB_GSO_TCPV6;
1348 tpa_info->rss_hash =
1349 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1350 } else {
1351 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1352 tpa_info->gso_type = 0;
871127e6 1353 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
c0c050c5
MC
1354 }
1355 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1356 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
94758f8d 1357 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
bfcd8d79 1358 tpa_info->agg_count = 0;
c0c050c5
MC
1359
1360 rxr->rx_prod = NEXT_RX(prod);
1361 cons = NEXT_RX(cons);
376a5b86 1362 rxr->rx_next_cons = NEXT_RX(cons);
c0c050c5
MC
1363 cons_rx_buf = &rxr->rx_buf_ring[cons];
1364
1365 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1366 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1367 cons_rx_buf->data = NULL;
1368}
1369
4a228a3a 1370static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
c0c050c5
MC
1371{
1372 if (agg_bufs)
4a228a3a 1373 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
c0c050c5
MC
1374}
1375
bee5a188
MC
1376#ifdef CONFIG_INET
1377static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1378{
1379 struct udphdr *uh = NULL;
1380
1381 if (ip_proto == htons(ETH_P_IP)) {
1382 struct iphdr *iph = (struct iphdr *)skb->data;
1383
1384 if (iph->protocol == IPPROTO_UDP)
1385 uh = (struct udphdr *)(iph + 1);
1386 } else {
1387 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1388
1389 if (iph->nexthdr == IPPROTO_UDP)
1390 uh = (struct udphdr *)(iph + 1);
1391 }
1392 if (uh) {
1393 if (uh->check)
1394 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1395 else
1396 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1397 }
1398}
1399#endif
1400
94758f8d
MC
1401static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1402 int payload_off, int tcp_ts,
1403 struct sk_buff *skb)
1404{
1405#ifdef CONFIG_INET
1406 struct tcphdr *th;
1407 int len, nw_off;
1408 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1409 u32 hdr_info = tpa_info->hdr_info;
1410 bool loopback = false;
1411
1412 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1413 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1414 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1415
1416 /* If the packet is an internal loopback packet, the offsets will
1417 * have an extra 4 bytes.
1418 */
1419 if (inner_mac_off == 4) {
1420 loopback = true;
1421 } else if (inner_mac_off > 4) {
1422 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1423 ETH_HLEN - 2));
1424
1425 /* We only support inner iPv4/ipv6. If we don't see the
1426 * correct protocol ID, it must be a loopback packet where
1427 * the offsets are off by 4.
1428 */
09a7636a 1429 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
94758f8d
MC
1430 loopback = true;
1431 }
1432 if (loopback) {
1433 /* internal loopback packet, subtract all offsets by 4 */
1434 inner_ip_off -= 4;
1435 inner_mac_off -= 4;
1436 outer_ip_off -= 4;
1437 }
1438
1439 nw_off = inner_ip_off - ETH_HLEN;
1440 skb_set_network_header(skb, nw_off);
1441 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1442 struct ipv6hdr *iph = ipv6_hdr(skb);
1443
1444 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1445 len = skb->len - skb_transport_offset(skb);
1446 th = tcp_hdr(skb);
1447 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1448 } else {
1449 struct iphdr *iph = ip_hdr(skb);
1450
1451 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1452 len = skb->len - skb_transport_offset(skb);
1453 th = tcp_hdr(skb);
1454 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1455 }
1456
1457 if (inner_mac_off) { /* tunnel */
94758f8d
MC
1458 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1459 ETH_HLEN - 2));
1460
bee5a188 1461 bnxt_gro_tunnel(skb, proto);
94758f8d
MC
1462 }
1463#endif
1464 return skb;
1465}
1466
67912c36
MC
1467static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1468 int payload_off, int tcp_ts,
1469 struct sk_buff *skb)
1470{
1471#ifdef CONFIG_INET
1472 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1473 u32 hdr_info = tpa_info->hdr_info;
1474 int iphdr_len, nw_off;
1475
1476 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1477 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1478 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1479
1480 nw_off = inner_ip_off - ETH_HLEN;
1481 skb_set_network_header(skb, nw_off);
1482 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1483 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1484 skb_set_transport_header(skb, nw_off + iphdr_len);
1485
1486 if (inner_mac_off) { /* tunnel */
1487 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1488 ETH_HLEN - 2));
1489
1490 bnxt_gro_tunnel(skb, proto);
1491 }
1492#endif
1493 return skb;
1494}
1495
c0c050c5
MC
1496#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1497#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1498
309369c9
MC
1499static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1500 int payload_off, int tcp_ts,
c0c050c5
MC
1501 struct sk_buff *skb)
1502{
d1611c3a 1503#ifdef CONFIG_INET
c0c050c5 1504 struct tcphdr *th;
719ca811 1505 int len, nw_off, tcp_opt_len = 0;
27e24189 1506
309369c9 1507 if (tcp_ts)
c0c050c5
MC
1508 tcp_opt_len = 12;
1509
c0c050c5
MC
1510 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1511 struct iphdr *iph;
1512
1513 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1514 ETH_HLEN;
1515 skb_set_network_header(skb, nw_off);
1516 iph = ip_hdr(skb);
1517 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1518 len = skb->len - skb_transport_offset(skb);
1519 th = tcp_hdr(skb);
1520 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1521 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1522 struct ipv6hdr *iph;
1523
1524 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1525 ETH_HLEN;
1526 skb_set_network_header(skb, nw_off);
1527 iph = ipv6_hdr(skb);
1528 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1529 len = skb->len - skb_transport_offset(skb);
1530 th = tcp_hdr(skb);
1531 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1532 } else {
1533 dev_kfree_skb_any(skb);
1534 return NULL;
1535 }
c0c050c5 1536
bee5a188
MC
1537 if (nw_off) /* tunnel */
1538 bnxt_gro_tunnel(skb, skb->protocol);
c0c050c5
MC
1539#endif
1540 return skb;
1541}
1542
309369c9
MC
1543static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1544 struct bnxt_tpa_info *tpa_info,
1545 struct rx_tpa_end_cmp *tpa_end,
1546 struct rx_tpa_end_cmp_ext *tpa_end1,
1547 struct sk_buff *skb)
1548{
1549#ifdef CONFIG_INET
1550 int payload_off;
1551 u16 segs;
1552
1553 segs = TPA_END_TPA_SEGS(tpa_end);
1554 if (segs == 1)
1555 return skb;
1556
1557 NAPI_GRO_CB(skb)->count = segs;
1558 skb_shinfo(skb)->gso_size =
1559 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1560 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
bfcd8d79
MC
1561 if (bp->flags & BNXT_FLAG_CHIP_P5)
1562 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1563 else
1564 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
309369c9 1565 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
5910906c
MC
1566 if (likely(skb))
1567 tcp_gro_complete(skb);
309369c9
MC
1568#endif
1569 return skb;
1570}
1571
ee5c7fb3
SP
1572/* Given the cfa_code of a received packet determine which
1573 * netdev (vf-rep or PF) the packet is destined to.
1574 */
1575static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1576{
1577 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1578
1579 /* if vf-rep dev is NULL, the must belongs to the PF */
1580 return dev ? dev : bp->dev;
1581}
1582
c0c050c5 1583static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
e44758b7 1584 struct bnxt_cp_ring_info *cpr,
c0c050c5
MC
1585 u32 *raw_cons,
1586 struct rx_tpa_end_cmp *tpa_end,
1587 struct rx_tpa_end_cmp_ext *tpa_end1,
4e5dbbda 1588 u8 *event)
c0c050c5 1589{
e44758b7 1590 struct bnxt_napi *bnapi = cpr->bnapi;
b6ab4b01 1591 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
6bb19474 1592 u8 *data_ptr, agg_bufs;
c0c050c5
MC
1593 unsigned int len;
1594 struct bnxt_tpa_info *tpa_info;
1595 dma_addr_t mapping;
1596 struct sk_buff *skb;
bfcd8d79 1597 u16 idx = 0, agg_id;
6bb19474 1598 void *data;
bfcd8d79 1599 bool gro;
c0c050c5 1600
fa7e2812 1601 if (unlikely(bnapi->in_reset)) {
e44758b7 1602 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
fa7e2812
MC
1603
1604 if (rc < 0)
1605 return ERR_PTR(-EBUSY);
1606 return NULL;
1607 }
1608
bfcd8d79
MC
1609 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1610 agg_id = TPA_END_AGG_ID_P5(tpa_end);
ec4d8e7c 1611 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
bfcd8d79
MC
1612 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1613 tpa_info = &rxr->rx_tpa[agg_id];
1614 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1615 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1616 agg_bufs, tpa_info->agg_count);
1617 agg_bufs = tpa_info->agg_count;
1618 }
1619 tpa_info->agg_count = 0;
1620 *event |= BNXT_AGG_EVENT;
ec4d8e7c 1621 bnxt_free_agg_idx(rxr, agg_id);
bfcd8d79
MC
1622 idx = agg_id;
1623 gro = !!(bp->flags & BNXT_FLAG_GRO);
1624 } else {
1625 agg_id = TPA_END_AGG_ID(tpa_end);
1626 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1627 tpa_info = &rxr->rx_tpa[agg_id];
1628 idx = RING_CMP(*raw_cons);
1629 if (agg_bufs) {
1630 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1631 return ERR_PTR(-EBUSY);
1632
1633 *event |= BNXT_AGG_EVENT;
1634 idx = NEXT_CMP(idx);
1635 }
1636 gro = !!TPA_END_GRO(tpa_end);
1637 }
c0c050c5 1638 data = tpa_info->data;
6bb19474
MC
1639 data_ptr = tpa_info->data_ptr;
1640 prefetch(data_ptr);
c0c050c5
MC
1641 len = tpa_info->len;
1642 mapping = tpa_info->mapping;
1643
69c149e2 1644 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
4a228a3a 1645 bnxt_abort_tpa(cpr, idx, agg_bufs);
69c149e2
MC
1646 if (agg_bufs > MAX_SKB_FRAGS)
1647 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1648 agg_bufs, (int)MAX_SKB_FRAGS);
c0c050c5
MC
1649 return NULL;
1650 }
1651
1652 if (len <= bp->rx_copy_thresh) {
6bb19474 1653 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
c0c050c5 1654 if (!skb) {
4a228a3a 1655 bnxt_abort_tpa(cpr, idx, agg_bufs);
907fd4a2 1656 cpr->sw_stats.rx.rx_oom_discards += 1;
c0c050c5
MC
1657 return NULL;
1658 }
1659 } else {
1660 u8 *new_data;
1661 dma_addr_t new_mapping;
1662
1663 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1664 if (!new_data) {
4a228a3a 1665 bnxt_abort_tpa(cpr, idx, agg_bufs);
907fd4a2 1666 cpr->sw_stats.rx.rx_oom_discards += 1;
c0c050c5
MC
1667 return NULL;
1668 }
1669
1670 tpa_info->data = new_data;
b3dba77c 1671 tpa_info->data_ptr = new_data + bp->rx_offset;
c0c050c5
MC
1672 tpa_info->mapping = new_mapping;
1673
1674 skb = build_skb(data, 0);
c519fe9a
SN
1675 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1676 bp->rx_buf_use_size, bp->rx_dir,
1677 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
1678
1679 if (!skb) {
1680 kfree(data);
4a228a3a 1681 bnxt_abort_tpa(cpr, idx, agg_bufs);
907fd4a2 1682 cpr->sw_stats.rx.rx_oom_discards += 1;
c0c050c5
MC
1683 return NULL;
1684 }
b3dba77c 1685 skb_reserve(skb, bp->rx_offset);
c0c050c5
MC
1686 skb_put(skb, len);
1687 }
1688
1689 if (agg_bufs) {
4a228a3a 1690 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
c0c050c5
MC
1691 if (!skb) {
1692 /* Page reuse already handled by bnxt_rx_pages(). */
907fd4a2 1693 cpr->sw_stats.rx.rx_oom_discards += 1;
c0c050c5
MC
1694 return NULL;
1695 }
1696 }
ee5c7fb3
SP
1697
1698 skb->protocol =
1699 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
c0c050c5
MC
1700
1701 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1702 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1703
8852ddb4 1704 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
a196e96b 1705 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
96bdd4b9
MC
1706 __be16 vlan_proto = htons(tpa_info->metadata >>
1707 RX_CMP_FLAGS2_METADATA_TPID_SFT);
ed7bc602 1708 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
c0c050c5 1709
96bdd4b9
MC
1710 if (eth_type_vlan(vlan_proto)) {
1711 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1712 } else {
1713 dev_kfree_skb(skb);
1714 return NULL;
1715 }
c0c050c5
MC
1716 }
1717
1718 skb_checksum_none_assert(skb);
1719 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1720 skb->ip_summed = CHECKSUM_UNNECESSARY;
1721 skb->csum_level =
1722 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1723 }
1724
bfcd8d79 1725 if (gro)
309369c9 1726 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
c0c050c5
MC
1727
1728 return skb;
1729}
1730
8fe88ce7
MC
1731static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1732 struct rx_agg_cmp *rx_agg)
1733{
1734 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1735 struct bnxt_tpa_info *tpa_info;
1736
ec4d8e7c 1737 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
8fe88ce7
MC
1738 tpa_info = &rxr->rx_tpa[agg_id];
1739 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1740 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1741}
1742
ee5c7fb3
SP
1743static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1744 struct sk_buff *skb)
1745{
1746 if (skb->dev != bp->dev) {
1747 /* this packet belongs to a vf-rep */
1748 bnxt_vf_rep_rx(bp, skb);
1749 return;
1750 }
1751 skb_record_rx_queue(skb, bnapi->index);
1752 napi_gro_receive(&bnapi->napi, skb);
1753}
1754
c0c050c5
MC
1755/* returns the following:
1756 * 1 - 1 packet successfully received
1757 * 0 - successful TPA_START, packet not completed yet
1758 * -EBUSY - completion ring does not have all the agg buffers yet
1759 * -ENOMEM - packet aborted due to out of memory
1760 * -EIO - packet aborted due to hw error indicated in BD
1761 */
e44758b7
MC
1762static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1763 u32 *raw_cons, u8 *event)
c0c050c5 1764{
e44758b7 1765 struct bnxt_napi *bnapi = cpr->bnapi;
b6ab4b01 1766 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
1767 struct net_device *dev = bp->dev;
1768 struct rx_cmp *rxcmp;
1769 struct rx_cmp_ext *rxcmp1;
1770 u32 tmp_raw_cons = *raw_cons;
ee5c7fb3 1771 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
c0c050c5
MC
1772 struct bnxt_sw_rx_bd *rx_buf;
1773 unsigned int len;
6bb19474 1774 u8 *data_ptr, agg_bufs, cmp_type;
c0c050c5
MC
1775 dma_addr_t dma_addr;
1776 struct sk_buff *skb;
7f5515d1 1777 u32 flags, misc;
6bb19474 1778 void *data;
c0c050c5
MC
1779 int rc = 0;
1780
1781 rxcmp = (struct rx_cmp *)
1782 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1783
8fe88ce7
MC
1784 cmp_type = RX_CMP_TYPE(rxcmp);
1785
1786 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1787 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1788 goto next_rx_no_prod_no_len;
1789 }
1790
c0c050c5
MC
1791 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1792 cp_cons = RING_CMP(tmp_raw_cons);
1793 rxcmp1 = (struct rx_cmp_ext *)
1794 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1795
1796 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1797 return -EBUSY;
1798
828affc2
MC
1799 /* The valid test of the entry must be done first before
1800 * reading any further.
1801 */
1802 dma_rmb();
c0c050c5
MC
1803 prod = rxr->rx_prod;
1804
1805 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1806 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1807 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1808
4e5dbbda 1809 *event |= BNXT_RX_EVENT;
e7e70fa6 1810 goto next_rx_no_prod_no_len;
c0c050c5
MC
1811
1812 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
e44758b7 1813 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
c0c050c5 1814 (struct rx_tpa_end_cmp *)rxcmp,
4e5dbbda 1815 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
c0c050c5 1816
1fac4b2f 1817 if (IS_ERR(skb))
c0c050c5
MC
1818 return -EBUSY;
1819
1820 rc = -ENOMEM;
1821 if (likely(skb)) {
ee5c7fb3 1822 bnxt_deliver_skb(bp, bnapi, skb);
c0c050c5
MC
1823 rc = 1;
1824 }
4e5dbbda 1825 *event |= BNXT_RX_EVENT;
e7e70fa6 1826 goto next_rx_no_prod_no_len;
c0c050c5
MC
1827 }
1828
1829 cons = rxcmp->rx_cmp_opaque;
fa7e2812 1830 if (unlikely(cons != rxr->rx_next_cons)) {
bbd6f0a9 1831 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
fa7e2812 1832
1b5c8b63
MC
1833 /* 0xffff is forced error, don't print it */
1834 if (rxr->rx_next_cons != 0xffff)
1835 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1836 cons, rxr->rx_next_cons);
fa7e2812 1837 bnxt_sched_reset(bp, rxr);
bbd6f0a9
MC
1838 if (rc1)
1839 return rc1;
1840 goto next_rx_no_prod_no_len;
fa7e2812 1841 }
a1b0e4e6
MC
1842 rx_buf = &rxr->rx_buf_ring[cons];
1843 data = rx_buf->data;
1844 data_ptr = rx_buf->data_ptr;
6bb19474 1845 prefetch(data_ptr);
c0c050c5 1846
c61fb99c
MC
1847 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1848 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
c0c050c5
MC
1849
1850 if (agg_bufs) {
1851 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1852 return -EBUSY;
1853
1854 cp_cons = NEXT_CMP(cp_cons);
4e5dbbda 1855 *event |= BNXT_AGG_EVENT;
c0c050c5 1856 }
4e5dbbda 1857 *event |= BNXT_RX_EVENT;
c0c050c5
MC
1858
1859 rx_buf->data = NULL;
1860 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
8e44e96c
MC
1861 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1862
c0c050c5
MC
1863 bnxt_reuse_rx_data(rxr, cons, data);
1864 if (agg_bufs)
4a228a3a
MC
1865 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1866 false);
c0c050c5
MC
1867
1868 rc = -EIO;
8e44e96c 1869 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
9d8b5f05 1870 bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
8d4bd96b
MC
1871 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1872 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
8fbf58e1
MC
1873 netdev_warn_once(bp->dev, "RX buffer error %x\n",
1874 rx_err);
19b3751f
MC
1875 bnxt_sched_reset(bp, rxr);
1876 }
8e44e96c 1877 }
0b397b17 1878 goto next_rx_no_len;
c0c050c5
MC
1879 }
1880
7f5515d1
PC
1881 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
1882 len = flags >> RX_CMP_LEN_SHIFT;
11cd119d 1883 dma_addr = rx_buf->mapping;
c0c050c5 1884
c6d30e83
MC
1885 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1886 rc = 1;
1887 goto next_rx;
1888 }
1889
c0c050c5 1890 if (len <= bp->rx_copy_thresh) {
6bb19474 1891 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
c0c050c5
MC
1892 bnxt_reuse_rx_data(rxr, cons, data);
1893 if (!skb) {
296d5b54 1894 if (agg_bufs)
4a228a3a
MC
1895 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1896 agg_bufs, false);
907fd4a2 1897 cpr->sw_stats.rx.rx_oom_discards += 1;
c0c050c5
MC
1898 rc = -ENOMEM;
1899 goto next_rx;
1900 }
1901 } else {
c61fb99c
MC
1902 u32 payload;
1903
c6d30e83
MC
1904 if (rx_buf->data_ptr == data_ptr)
1905 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1906 else
1907 payload = 0;
6bb19474 1908 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
c61fb99c 1909 payload | len);
c0c050c5 1910 if (!skb) {
907fd4a2 1911 cpr->sw_stats.rx.rx_oom_discards += 1;
c0c050c5
MC
1912 rc = -ENOMEM;
1913 goto next_rx;
1914 }
1915 }
1916
1917 if (agg_bufs) {
4a228a3a 1918 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
c0c050c5 1919 if (!skb) {
907fd4a2 1920 cpr->sw_stats.rx.rx_oom_discards += 1;
c0c050c5
MC
1921 rc = -ENOMEM;
1922 goto next_rx;
1923 }
1924 }
1925
1926 if (RX_CMP_HASH_VALID(rxcmp)) {
1927 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1928 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1929
1930 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1931 if (hash_type != 1 && hash_type != 3)
1932 type = PKT_HASH_TYPE_L3;
1933 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1934 }
1935
ee5c7fb3
SP
1936 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1937 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
c0c050c5 1938
8852ddb4
MC
1939 if ((rxcmp1->rx_cmp_flags2 &
1940 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
a196e96b 1941 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
c0c050c5 1942 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
ed7bc602 1943 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
96bdd4b9
MC
1944 __be16 vlan_proto = htons(meta_data >>
1945 RX_CMP_FLAGS2_METADATA_TPID_SFT);
c0c050c5 1946
96bdd4b9
MC
1947 if (eth_type_vlan(vlan_proto)) {
1948 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1949 } else {
1950 dev_kfree_skb(skb);
1951 goto next_rx;
1952 }
c0c050c5
MC
1953 }
1954
1955 skb_checksum_none_assert(skb);
1956 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1957 if (dev->features & NETIF_F_RXCSUM) {
1958 skb->ip_summed = CHECKSUM_UNNECESSARY;
1959 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1960 }
1961 } else {
665e350d
SB
1962 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1963 if (dev->features & NETIF_F_RXCSUM)
9d8b5f05 1964 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
665e350d 1965 }
c0c050c5
MC
1966 }
1967
7f5515d1
PC
1968 if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
1969 RX_CMP_FLAGS_ITYPE_PTP_W_TS)) {
1970 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1971 u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
1972 u64 ns, ts;
1973
1974 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
1975 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
1976
1977 spin_lock_bh(&ptp->ptp_lock);
1978 ns = timecounter_cyc2time(&ptp->tc, ts);
1979 spin_unlock_bh(&ptp->ptp_lock);
1980 memset(skb_hwtstamps(skb), 0,
1981 sizeof(*skb_hwtstamps(skb)));
1982 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
1983 }
1984 }
1985 }
ee5c7fb3 1986 bnxt_deliver_skb(bp, bnapi, skb);
c0c050c5
MC
1987 rc = 1;
1988
1989next_rx:
6a8788f2
AG
1990 cpr->rx_packets += 1;
1991 cpr->rx_bytes += len;
e7e70fa6 1992
0b397b17
MC
1993next_rx_no_len:
1994 rxr->rx_prod = NEXT_RX(prod);
1995 rxr->rx_next_cons = NEXT_RX(cons);
1996
e7e70fa6 1997next_rx_no_prod_no_len:
c0c050c5
MC
1998 *raw_cons = tmp_raw_cons;
1999
2000 return rc;
2001}
2002
2270bc5d
MC
2003/* In netpoll mode, if we are using a combined completion ring, we need to
2004 * discard the rx packets and recycle the buffers.
2005 */
e44758b7
MC
2006static int bnxt_force_rx_discard(struct bnxt *bp,
2007 struct bnxt_cp_ring_info *cpr,
2270bc5d
MC
2008 u32 *raw_cons, u8 *event)
2009{
2270bc5d
MC
2010 u32 tmp_raw_cons = *raw_cons;
2011 struct rx_cmp_ext *rxcmp1;
2012 struct rx_cmp *rxcmp;
2013 u16 cp_cons;
2014 u8 cmp_type;
40bedf7c 2015 int rc;
2270bc5d
MC
2016
2017 cp_cons = RING_CMP(tmp_raw_cons);
2018 rxcmp = (struct rx_cmp *)
2019 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2020
2021 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2022 cp_cons = RING_CMP(tmp_raw_cons);
2023 rxcmp1 = (struct rx_cmp_ext *)
2024 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2025
2026 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2027 return -EBUSY;
2028
828affc2
MC
2029 /* The valid test of the entry must be done first before
2030 * reading any further.
2031 */
2032 dma_rmb();
2270bc5d
MC
2033 cmp_type = RX_CMP_TYPE(rxcmp);
2034 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2035 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2036 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2037 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2038 struct rx_tpa_end_cmp_ext *tpa_end1;
2039
2040 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2041 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2042 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2043 }
40bedf7c
JK
2044 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2045 if (rc && rc != -EBUSY)
2046 cpr->sw_stats.rx.rx_netpoll_discards += 1;
2047 return rc;
2270bc5d
MC
2048}
2049
7e914027
MC
2050u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2051{
2052 struct bnxt_fw_health *fw_health = bp->fw_health;
2053 u32 reg = fw_health->regs[reg_idx];
2054 u32 reg_type, reg_off, val = 0;
2055
2056 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2057 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2058 switch (reg_type) {
2059 case BNXT_FW_HEALTH_REG_TYPE_CFG:
2060 pci_read_config_dword(bp->pdev, reg_off, &val);
2061 break;
2062 case BNXT_FW_HEALTH_REG_TYPE_GRC:
2063 reg_off = fw_health->mapped_regs[reg_idx];
df561f66 2064 fallthrough;
7e914027
MC
2065 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2066 val = readl(bp->bar0 + reg_off);
2067 break;
2068 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2069 val = readl(bp->bar1 + reg_off);
2070 break;
2071 }
2072 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2073 val &= fw_health->fw_reset_inprog_reg_mask;
2074 return val;
2075}
2076
8d4bd96b
MC
2077static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2078{
2079 int i;
2080
2081 for (i = 0; i < bp->rx_nr_rings; i++) {
2082 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2083 struct bnxt_ring_grp_info *grp_info;
2084
2085 grp_info = &bp->grp_info[grp_idx];
2086 if (grp_info->agg_fw_ring_id == ring_id)
2087 return grp_idx;
2088 }
2089 return INVALID_HW_RING_ID;
2090}
2091
abf90ac2
PC
2092static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2093{
2094 switch (BNXT_EVENT_ERROR_REPORT_TYPE(data1)) {
2095 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2096 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2097 BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2098 break;
2099 default:
2100 netdev_err(bp->dev, "FW reported unknown error type\n");
2101 break;
2102 }
2103}
2104
4bb13abf 2105#define BNXT_GET_EVENT_PORT(data) \
87c374de
MC
2106 ((data) & \
2107 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
4bb13abf 2108
8d4bd96b
MC
2109#define BNXT_EVENT_RING_TYPE(data2) \
2110 ((data2) & \
2111 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2112
2113#define BNXT_EVENT_RING_TYPE_RX(data2) \
2114 (BNXT_EVENT_RING_TYPE(data2) == \
2115 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2116
c0c050c5
MC
2117static int bnxt_async_event_process(struct bnxt *bp,
2118 struct hwrm_async_event_cmpl *cmpl)
2119{
2120 u16 event_id = le16_to_cpu(cmpl->event_id);
03ab8ca1
MC
2121 u32 data1 = le32_to_cpu(cmpl->event_data1);
2122 u32 data2 = le32_to_cpu(cmpl->event_data2);
c0c050c5
MC
2123
2124 /* TODO CHIMP_FW: Define event id's for link change, error etc */
2125 switch (event_id) {
87c374de 2126 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
8cbde117
MC
2127 struct bnxt_link_info *link_info = &bp->link_info;
2128
2129 if (BNXT_VF(bp))
2130 goto async_event_process_exit;
a8168b6c
MC
2131
2132 /* print unsupported speed warning in forced speed mode only */
2133 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2134 (data1 & 0x20000)) {
8cbde117
MC
2135 u16 fw_speed = link_info->force_link_speed;
2136 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2137
a8168b6c
MC
2138 if (speed != SPEED_UNKNOWN)
2139 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2140 speed);
8cbde117 2141 }
286ef9d6 2142 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
8cbde117 2143 }
df561f66 2144 fallthrough;
b1613e78
MC
2145 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2146 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2147 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
df561f66 2148 fallthrough;
87c374de 2149 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
c0c050c5 2150 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
19241368 2151 break;
87c374de 2152 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
19241368 2153 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
c0c050c5 2154 break;
87c374de 2155 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
4bb13abf
MC
2156 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2157
2158 if (BNXT_VF(bp))
2159 break;
2160
2161 if (bp->pf.port_id != port_id)
2162 break;
2163
4bb13abf
MC
2164 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2165 break;
2166 }
87c374de 2167 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
fc0f1929
MC
2168 if (BNXT_PF(bp))
2169 goto async_event_process_exit;
2170 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2171 break;
5863b10a
MC
2172 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2173 char *fatal_str = "non-fatal";
2174
8280b38e
VV
2175 if (!bp->fw_health)
2176 goto async_event_process_exit;
2177
2151fe08
MC
2178 bp->fw_reset_timestamp = jiffies;
2179 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2180 if (!bp->fw_reset_min_dsecs)
2181 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2182 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2183 if (!bp->fw_reset_max_dsecs)
2184 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
acfb50e4 2185 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
5863b10a 2186 fatal_str = "fatal";
acfb50e4 2187 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
5863b10a 2188 }
871127e6
MC
2189 netif_warn(bp, hw, bp->dev,
2190 "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2191 fatal_str, data1, data2,
2192 bp->fw_reset_min_dsecs * 100,
2193 bp->fw_reset_max_dsecs * 100);
2151fe08
MC
2194 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2195 break;
5863b10a 2196 }
7e914027
MC
2197 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2198 struct bnxt_fw_health *fw_health = bp->fw_health;
7e914027
MC
2199
2200 if (!fw_health)
2201 goto async_event_process_exit;
2202
2203 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
2204 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
f4d95c3c
MC
2205 if (!fw_health->enabled) {
2206 netif_info(bp, drv, bp->dev,
2207 "Error recovery info: error recovery[0]\n");
7e914027 2208 break;
f4d95c3c 2209 }
7e914027
MC
2210 fw_health->tmr_multiplier =
2211 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2212 bp->current_interval * 10);
2213 fw_health->tmr_counter = fw_health->tmr_multiplier;
2214 fw_health->last_fw_heartbeat =
2215 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2216 fw_health->last_fw_reset_cnt =
2217 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
f4d95c3c
MC
2218 netif_info(bp, drv, bp->dev,
2219 "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
2220 fw_health->master, fw_health->last_fw_reset_cnt,
2221 bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
7e914027
MC
2222 goto async_event_process_exit;
2223 }
a44daa8f 2224 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
871127e6
MC
2225 netif_notice(bp, hw, bp->dev,
2226 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2227 data1, data2);
a44daa8f 2228 goto async_event_process_exit;
8d4bd96b 2229 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
8d4bd96b
MC
2230 struct bnxt_rx_ring_info *rxr;
2231 u16 grp_idx;
2232
2233 if (bp->flags & BNXT_FLAG_CHIP_P5)
2234 goto async_event_process_exit;
2235
2236 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2237 BNXT_EVENT_RING_TYPE(data2), data1);
2238 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2239 goto async_event_process_exit;
2240
2241 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2242 if (grp_idx == INVALID_HW_RING_ID) {
2243 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2244 data1);
2245 goto async_event_process_exit;
2246 }
2247 rxr = bp->bnapi[grp_idx]->rx_ring;
2248 bnxt_sched_reset(bp, rxr);
2249 goto async_event_process_exit;
2250 }
df97b34d
MC
2251 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2252 struct bnxt_fw_health *fw_health = bp->fw_health;
2253
2254 netif_notice(bp, hw, bp->dev,
2255 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2256 data1, data2);
2257 if (fw_health) {
2258 fw_health->echo_req_data1 = data1;
2259 fw_health->echo_req_data2 = data2;
2260 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2261 break;
2262 }
2263 goto async_event_process_exit;
2264 }
099fdeda
PC
2265 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2266 bnxt_ptp_pps_event(bp, data1, data2);
abf90ac2
PC
2267 goto async_event_process_exit;
2268 }
2269 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2270 bnxt_event_error_report(bp, data1, data2);
099fdeda
PC
2271 goto async_event_process_exit;
2272 }
68f684e2
EP
2273 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2274 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2275
2276 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2277 goto async_event_process_exit;
2278 }
c0c050c5 2279 default:
19241368 2280 goto async_event_process_exit;
c0c050c5 2281 }
c213eae8 2282 bnxt_queue_sp_work(bp);
19241368 2283async_event_process_exit:
a588e458 2284 bnxt_ulp_async_events(bp, cmpl);
c0c050c5
MC
2285 return 0;
2286}
2287
2288static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2289{
2290 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2291 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2292 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2293 (struct hwrm_fwd_req_cmpl *)txcmp;
2294
2295 switch (cmpl_type) {
2296 case CMPL_BASE_TYPE_HWRM_DONE:
2297 seq_id = le16_to_cpu(h_cmpl->sequence_id);
68f684e2 2298 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
c0c050c5
MC
2299 break;
2300
2301 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2302 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2303
2304 if ((vf_id < bp->pf.first_vf_id) ||
2305 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2306 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2307 vf_id);
2308 return -EINVAL;
2309 }
2310
2311 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2312 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
c213eae8 2313 bnxt_queue_sp_work(bp);
c0c050c5
MC
2314 break;
2315
2316 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2317 bnxt_async_event_process(bp,
2318 (struct hwrm_async_event_cmpl *)txcmp);
cc9fd180 2319 break;
c0c050c5
MC
2320
2321 default:
2322 break;
2323 }
2324
2325 return 0;
2326}
2327
2328static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2329{
2330 struct bnxt_napi *bnapi = dev_instance;
2331 struct bnxt *bp = bnapi->bp;
2332 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2333 u32 cons = RING_CMP(cpr->cp_raw_cons);
2334
6a8788f2 2335 cpr->event_ctr++;
c0c050c5
MC
2336 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2337 napi_schedule(&bnapi->napi);
2338 return IRQ_HANDLED;
2339}
2340
2341static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2342{
2343 u32 raw_cons = cpr->cp_raw_cons;
2344 u16 cons = RING_CMP(raw_cons);
2345 struct tx_cmp *txcmp;
2346
2347 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2348
2349 return TX_CMP_VALID(txcmp, raw_cons);
2350}
2351
c0c050c5
MC
2352static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2353{
2354 struct bnxt_napi *bnapi = dev_instance;
2355 struct bnxt *bp = bnapi->bp;
2356 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2357 u32 cons = RING_CMP(cpr->cp_raw_cons);
2358 u32 int_status;
2359
2360 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2361
2362 if (!bnxt_has_work(bp, cpr)) {
11809490 2363 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
c0c050c5
MC
2364 /* return if erroneous interrupt */
2365 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2366 return IRQ_NONE;
2367 }
2368
2369 /* disable ring IRQ */
697197e5 2370 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
c0c050c5
MC
2371
2372 /* Return here if interrupt is shared and is disabled. */
2373 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2374 return IRQ_HANDLED;
2375
2376 napi_schedule(&bnapi->napi);
2377 return IRQ_HANDLED;
2378}
2379
3675b92f
MC
2380static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2381 int budget)
c0c050c5 2382{
e44758b7 2383 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5
MC
2384 u32 raw_cons = cpr->cp_raw_cons;
2385 u32 cons;
2386 int tx_pkts = 0;
2387 int rx_pkts = 0;
4e5dbbda 2388 u8 event = 0;
c0c050c5
MC
2389 struct tx_cmp *txcmp;
2390
0fcec985 2391 cpr->has_more_work = 0;
340ac85e 2392 cpr->had_work_done = 1;
c0c050c5
MC
2393 while (1) {
2394 int rc;
2395
2396 cons = RING_CMP(raw_cons);
2397 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2398
2399 if (!TX_CMP_VALID(txcmp, raw_cons))
2400 break;
2401
67a95e20
MC
2402 /* The valid test of the entry must be done first before
2403 * reading any further.
2404 */
b67daab0 2405 dma_rmb();
c0c050c5
MC
2406 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2407 tx_pkts++;
2408 /* return full budget so NAPI will complete. */
73f21c65 2409 if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
c0c050c5 2410 rx_pkts = budget;
73f21c65 2411 raw_cons = NEXT_RAW_CMP(raw_cons);
0fcec985
MC
2412 if (budget)
2413 cpr->has_more_work = 1;
73f21c65
MC
2414 break;
2415 }
c0c050c5 2416 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2270bc5d 2417 if (likely(budget))
e44758b7 2418 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2270bc5d 2419 else
e44758b7 2420 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2270bc5d 2421 &event);
c0c050c5
MC
2422 if (likely(rc >= 0))
2423 rx_pkts += rc;
903649e7
MC
2424 /* Increment rx_pkts when rc is -ENOMEM to count towards
2425 * the NAPI budget. Otherwise, we may potentially loop
2426 * here forever if we consistently cannot allocate
2427 * buffers.
2428 */
2edbdb31 2429 else if (rc == -ENOMEM && budget)
903649e7 2430 rx_pkts++;
c0c050c5
MC
2431 else if (rc == -EBUSY) /* partial completion */
2432 break;
c0c050c5
MC
2433 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2434 CMPL_BASE_TYPE_HWRM_DONE) ||
2435 (TX_CMP_TYPE(txcmp) ==
2436 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2437 (TX_CMP_TYPE(txcmp) ==
2438 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2439 bnxt_hwrm_handler(bp, txcmp);
2440 }
2441 raw_cons = NEXT_RAW_CMP(raw_cons);
2442
0fcec985
MC
2443 if (rx_pkts && rx_pkts == budget) {
2444 cpr->has_more_work = 1;
c0c050c5 2445 break;
0fcec985 2446 }
c0c050c5
MC
2447 }
2448
f18c2b77
AG
2449 if (event & BNXT_REDIRECT_EVENT)
2450 xdp_do_flush_map();
2451
38413406
MC
2452 if (event & BNXT_TX_EVENT) {
2453 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
38413406
MC
2454 u16 prod = txr->tx_prod;
2455
2456 /* Sync BD data before updating doorbell */
2457 wmb();
2458
697197e5 2459 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
38413406
MC
2460 }
2461
c0c050c5 2462 cpr->cp_raw_cons = raw_cons;
3675b92f
MC
2463 bnapi->tx_pkts += tx_pkts;
2464 bnapi->events |= event;
2465 return rx_pkts;
2466}
c0c050c5 2467
3675b92f
MC
2468static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2469{
2470 if (bnapi->tx_pkts) {
2471 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2472 bnapi->tx_pkts = 0;
2473 }
c0c050c5 2474
8fbf58e1 2475 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
b6ab4b01 2476 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5 2477
3675b92f 2478 if (bnapi->events & BNXT_AGG_EVENT)
697197e5 2479 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
e8f267b0 2480 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
c0c050c5 2481 }
3675b92f
MC
2482 bnapi->events = 0;
2483}
2484
2485static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2486 int budget)
2487{
2488 struct bnxt_napi *bnapi = cpr->bnapi;
2489 int rx_pkts;
2490
2491 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2492
2493 /* ACK completion ring before freeing tx ring and producing new
2494 * buffers in rx/agg rings to prevent overflowing the completion
2495 * ring.
2496 */
2497 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2498
2499 __bnxt_poll_work_done(bp, bnapi);
c0c050c5
MC
2500 return rx_pkts;
2501}
2502
10bbdaf5
PS
2503static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2504{
2505 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2506 struct bnxt *bp = bnapi->bp;
2507 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2508 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2509 struct tx_cmp *txcmp;
2510 struct rx_cmp_ext *rxcmp1;
2511 u32 cp_cons, tmp_raw_cons;
2512 u32 raw_cons = cpr->cp_raw_cons;
2513 u32 rx_pkts = 0;
4e5dbbda 2514 u8 event = 0;
10bbdaf5
PS
2515
2516 while (1) {
2517 int rc;
2518
2519 cp_cons = RING_CMP(raw_cons);
2520 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2521
2522 if (!TX_CMP_VALID(txcmp, raw_cons))
2523 break;
2524
828affc2
MC
2525 /* The valid test of the entry must be done first before
2526 * reading any further.
2527 */
2528 dma_rmb();
10bbdaf5
PS
2529 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2530 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2531 cp_cons = RING_CMP(tmp_raw_cons);
2532 rxcmp1 = (struct rx_cmp_ext *)
2533 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2534
2535 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2536 break;
2537
2538 /* force an error to recycle the buffer */
2539 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2540 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2541
e44758b7 2542 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2edbdb31 2543 if (likely(rc == -EIO) && budget)
10bbdaf5
PS
2544 rx_pkts++;
2545 else if (rc == -EBUSY) /* partial completion */
2546 break;
2547 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2548 CMPL_BASE_TYPE_HWRM_DONE)) {
2549 bnxt_hwrm_handler(bp, txcmp);
2550 } else {
2551 netdev_err(bp->dev,
2552 "Invalid completion received on special ring\n");
2553 }
2554 raw_cons = NEXT_RAW_CMP(raw_cons);
2555
2556 if (rx_pkts == budget)
2557 break;
2558 }
2559
2560 cpr->cp_raw_cons = raw_cons;
697197e5
MC
2561 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2562 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
10bbdaf5 2563
434c975a 2564 if (event & BNXT_AGG_EVENT)
697197e5 2565 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
10bbdaf5
PS
2566
2567 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
6ad20165 2568 napi_complete_done(napi, rx_pkts);
697197e5 2569 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
10bbdaf5
PS
2570 }
2571 return rx_pkts;
2572}
2573
c0c050c5
MC
2574static int bnxt_poll(struct napi_struct *napi, int budget)
2575{
2576 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2577 struct bnxt *bp = bnapi->bp;
2578 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2579 int work_done = 0;
2580
0da65f49
MC
2581 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2582 napi_complete(napi);
2583 return 0;
2584 }
c0c050c5 2585 while (1) {
e44758b7 2586 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
c0c050c5 2587
73f21c65
MC
2588 if (work_done >= budget) {
2589 if (!budget)
697197e5 2590 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
c0c050c5 2591 break;
73f21c65 2592 }
c0c050c5
MC
2593
2594 if (!bnxt_has_work(bp, cpr)) {
e7b95691 2595 if (napi_complete_done(napi, work_done))
697197e5 2596 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
c0c050c5
MC
2597 break;
2598 }
2599 }
6a8788f2 2600 if (bp->flags & BNXT_FLAG_DIM) {
f06d0ca4 2601 struct dim_sample dim_sample = {};
6a8788f2 2602
8960b389
TG
2603 dim_update_sample(cpr->event_ctr,
2604 cpr->rx_packets,
2605 cpr->rx_bytes,
2606 &dim_sample);
6a8788f2
AG
2607 net_dim(&cpr->dim, dim_sample);
2608 }
c0c050c5
MC
2609 return work_done;
2610}
2611
0fcec985
MC
2612static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2613{
2614 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2615 int i, work_done = 0;
2616
2617 for (i = 0; i < 2; i++) {
2618 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2619
2620 if (cpr2) {
2621 work_done += __bnxt_poll_work(bp, cpr2,
2622 budget - work_done);
2623 cpr->has_more_work |= cpr2->has_more_work;
2624 }
2625 }
2626 return work_done;
2627}
2628
2629static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
340ac85e 2630 u64 dbr_type)
0fcec985
MC
2631{
2632 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2633 int i;
2634
2635 for (i = 0; i < 2; i++) {
2636 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2637 struct bnxt_db_info *db;
2638
340ac85e 2639 if (cpr2 && cpr2->had_work_done) {
0fcec985
MC
2640 db = &cpr2->cp_db;
2641 writeq(db->db_key64 | dbr_type |
2642 RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2643 cpr2->had_work_done = 0;
2644 }
2645 }
2646 __bnxt_poll_work_done(bp, bnapi);
2647}
2648
2649static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2650{
2651 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2652 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2653 u32 raw_cons = cpr->cp_raw_cons;
2654 struct bnxt *bp = bnapi->bp;
2655 struct nqe_cn *nqcmp;
2656 int work_done = 0;
2657 u32 cons;
2658
0da65f49
MC
2659 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2660 napi_complete(napi);
2661 return 0;
2662 }
0fcec985
MC
2663 if (cpr->has_more_work) {
2664 cpr->has_more_work = 0;
2665 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
0fcec985
MC
2666 }
2667 while (1) {
2668 cons = RING_CMP(raw_cons);
2669 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2670
2671 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
54a9062f
MC
2672 if (cpr->has_more_work)
2673 break;
2674
340ac85e 2675 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
0fcec985
MC
2676 cpr->cp_raw_cons = raw_cons;
2677 if (napi_complete_done(napi, work_done))
2678 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2679 cpr->cp_raw_cons);
2680 return work_done;
2681 }
2682
2683 /* The valid test of the entry must be done first before
2684 * reading any further.
2685 */
2686 dma_rmb();
2687
2688 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2689 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2690 struct bnxt_cp_ring_info *cpr2;
2691
2692 cpr2 = cpr->cp_ring_arr[idx];
2693 work_done += __bnxt_poll_work(bp, cpr2,
2694 budget - work_done);
54a9062f 2695 cpr->has_more_work |= cpr2->has_more_work;
0fcec985
MC
2696 } else {
2697 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2698 }
2699 raw_cons = NEXT_RAW_CMP(raw_cons);
0fcec985 2700 }
340ac85e 2701 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
389a877a
MC
2702 if (raw_cons != cpr->cp_raw_cons) {
2703 cpr->cp_raw_cons = raw_cons;
2704 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2705 }
0fcec985
MC
2706 return work_done;
2707}
2708
c0c050c5
MC
2709static void bnxt_free_tx_skbs(struct bnxt *bp)
2710{
2711 int i, max_idx;
2712 struct pci_dev *pdev = bp->pdev;
2713
b6ab4b01 2714 if (!bp->tx_ring)
c0c050c5
MC
2715 return;
2716
2717 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2718 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2719 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
2720 int j;
2721
c0c050c5
MC
2722 for (j = 0; j < max_idx;) {
2723 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
f18c2b77 2724 struct sk_buff *skb;
c0c050c5
MC
2725 int k, last;
2726
f18c2b77
AG
2727 if (i < bp->tx_nr_rings_xdp &&
2728 tx_buf->action == XDP_REDIRECT) {
2729 dma_unmap_single(&pdev->dev,
2730 dma_unmap_addr(tx_buf, mapping),
2731 dma_unmap_len(tx_buf, len),
df70303d 2732 DMA_TO_DEVICE);
f18c2b77
AG
2733 xdp_return_frame(tx_buf->xdpf);
2734 tx_buf->action = 0;
2735 tx_buf->xdpf = NULL;
2736 j++;
2737 continue;
2738 }
2739
2740 skb = tx_buf->skb;
c0c050c5
MC
2741 if (!skb) {
2742 j++;
2743 continue;
2744 }
2745
2746 tx_buf->skb = NULL;
2747
2748 if (tx_buf->is_push) {
2749 dev_kfree_skb(skb);
2750 j += 2;
2751 continue;
2752 }
2753
2754 dma_unmap_single(&pdev->dev,
2755 dma_unmap_addr(tx_buf, mapping),
2756 skb_headlen(skb),
df70303d 2757 DMA_TO_DEVICE);
c0c050c5
MC
2758
2759 last = tx_buf->nr_frags;
2760 j += 2;
d612a579
MC
2761 for (k = 0; k < last; k++, j++) {
2762 int ring_idx = j & bp->tx_ring_mask;
c0c050c5
MC
2763 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2764
d612a579 2765 tx_buf = &txr->tx_buf_ring[ring_idx];
c0c050c5
MC
2766 dma_unmap_page(
2767 &pdev->dev,
2768 dma_unmap_addr(tx_buf, mapping),
df70303d 2769 skb_frag_size(frag), DMA_TO_DEVICE);
c0c050c5
MC
2770 }
2771 dev_kfree_skb(skb);
2772 }
2773 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2774 }
2775}
2776
975bc99a 2777static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
c0c050c5 2778{
975bc99a 2779 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
c0c050c5 2780 struct pci_dev *pdev = bp->pdev;
975bc99a
MC
2781 struct bnxt_tpa_idx_map *map;
2782 int i, max_idx, max_agg_idx;
c0c050c5
MC
2783
2784 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2785 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
975bc99a
MC
2786 if (!rxr->rx_tpa)
2787 goto skip_rx_tpa_free;
c0c050c5 2788
975bc99a
MC
2789 for (i = 0; i < bp->max_tpa; i++) {
2790 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2791 u8 *data = tpa_info->data;
c0c050c5 2792
975bc99a
MC
2793 if (!data)
2794 continue;
c0c050c5 2795
975bc99a
MC
2796 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2797 bp->rx_buf_use_size, bp->rx_dir,
2798 DMA_ATTR_WEAK_ORDERING);
c0c050c5 2799
975bc99a 2800 tpa_info->data = NULL;
c0c050c5 2801
975bc99a
MC
2802 kfree(data);
2803 }
c0c050c5 2804
975bc99a
MC
2805skip_rx_tpa_free:
2806 for (i = 0; i < max_idx; i++) {
2807 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2808 dma_addr_t mapping = rx_buf->mapping;
2809 void *data = rx_buf->data;
c0c050c5 2810
975bc99a
MC
2811 if (!data)
2812 continue;
c0c050c5 2813
975bc99a
MC
2814 rx_buf->data = NULL;
2815 if (BNXT_RX_PAGE_MODE(bp)) {
2816 mapping -= bp->rx_dma_offset;
2817 dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
2818 bp->rx_dir,
2819 DMA_ATTR_WEAK_ORDERING);
2820 page_pool_recycle_direct(rxr->page_pool, data);
2821 } else {
2822 dma_unmap_single_attrs(&pdev->dev, mapping,
2823 bp->rx_buf_use_size, bp->rx_dir,
2824 DMA_ATTR_WEAK_ORDERING);
2825 kfree(data);
c0c050c5 2826 }
975bc99a
MC
2827 }
2828 for (i = 0; i < max_agg_idx; i++) {
2829 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2830 struct page *page = rx_agg_buf->page;
c0c050c5 2831
975bc99a
MC
2832 if (!page)
2833 continue;
c0c050c5 2834
975bc99a 2835 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
df70303d 2836 BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
975bc99a 2837 DMA_ATTR_WEAK_ORDERING);
c0c050c5 2838
975bc99a
MC
2839 rx_agg_buf->page = NULL;
2840 __clear_bit(i, rxr->rx_agg_bmap);
c0c050c5 2841
975bc99a
MC
2842 __free_page(page);
2843 }
2844 if (rxr->rx_page) {
2845 __free_page(rxr->rx_page);
2846 rxr->rx_page = NULL;
c0c050c5 2847 }
975bc99a
MC
2848 map = rxr->rx_tpa_idx_map;
2849 if (map)
2850 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2851}
2852
2853static void bnxt_free_rx_skbs(struct bnxt *bp)
2854{
2855 int i;
2856
2857 if (!bp->rx_ring)
2858 return;
2859
2860 for (i = 0; i < bp->rx_nr_rings; i++)
2861 bnxt_free_one_rx_ring_skbs(bp, i);
c0c050c5
MC
2862}
2863
2864static void bnxt_free_skbs(struct bnxt *bp)
2865{
2866 bnxt_free_tx_skbs(bp);
2867 bnxt_free_rx_skbs(bp);
2868}
2869
41435c39
MC
2870static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
2871{
2872 u8 init_val = mem_init->init_val;
2873 u16 offset = mem_init->offset;
2874 u8 *p2 = p;
2875 int i;
2876
2877 if (!init_val)
2878 return;
2879 if (offset == BNXT_MEM_INVALID_OFFSET) {
2880 memset(p, init_val, len);
2881 return;
2882 }
2883 for (i = 0; i < len; i += mem_init->size)
2884 *(p2 + i + offset) = init_val;
2885}
2886
6fe19886 2887static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
c0c050c5
MC
2888{
2889 struct pci_dev *pdev = bp->pdev;
2890 int i;
2891
6fe19886
MC
2892 for (i = 0; i < rmem->nr_pages; i++) {
2893 if (!rmem->pg_arr[i])
c0c050c5
MC
2894 continue;
2895
6fe19886
MC
2896 dma_free_coherent(&pdev->dev, rmem->page_size,
2897 rmem->pg_arr[i], rmem->dma_arr[i]);
c0c050c5 2898
6fe19886 2899 rmem->pg_arr[i] = NULL;
c0c050c5 2900 }
6fe19886 2901 if (rmem->pg_tbl) {
4f49b2b8
MC
2902 size_t pg_tbl_size = rmem->nr_pages * 8;
2903
2904 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2905 pg_tbl_size = rmem->page_size;
2906 dma_free_coherent(&pdev->dev, pg_tbl_size,
6fe19886
MC
2907 rmem->pg_tbl, rmem->pg_tbl_map);
2908 rmem->pg_tbl = NULL;
c0c050c5 2909 }
6fe19886
MC
2910 if (rmem->vmem_size && *rmem->vmem) {
2911 vfree(*rmem->vmem);
2912 *rmem->vmem = NULL;
c0c050c5
MC
2913 }
2914}
2915
6fe19886 2916static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
c0c050c5 2917{
c0c050c5 2918 struct pci_dev *pdev = bp->pdev;
66cca20a 2919 u64 valid_bit = 0;
6fe19886 2920 int i;
c0c050c5 2921
66cca20a
MC
2922 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2923 valid_bit = PTU_PTE_VALID;
4f49b2b8
MC
2924 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2925 size_t pg_tbl_size = rmem->nr_pages * 8;
2926
2927 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2928 pg_tbl_size = rmem->page_size;
2929 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
6fe19886 2930 &rmem->pg_tbl_map,
c0c050c5 2931 GFP_KERNEL);
6fe19886 2932 if (!rmem->pg_tbl)
c0c050c5
MC
2933 return -ENOMEM;
2934 }
2935
6fe19886 2936 for (i = 0; i < rmem->nr_pages; i++) {
66cca20a
MC
2937 u64 extra_bits = valid_bit;
2938
6fe19886
MC
2939 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2940 rmem->page_size,
2941 &rmem->dma_arr[i],
c0c050c5 2942 GFP_KERNEL);
6fe19886 2943 if (!rmem->pg_arr[i])
c0c050c5
MC
2944 return -ENOMEM;
2945
41435c39
MC
2946 if (rmem->mem_init)
2947 bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
2948 rmem->page_size);
4f49b2b8 2949 if (rmem->nr_pages > 1 || rmem->depth > 0) {
66cca20a
MC
2950 if (i == rmem->nr_pages - 2 &&
2951 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2952 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2953 else if (i == rmem->nr_pages - 1 &&
2954 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2955 extra_bits |= PTU_PTE_LAST;
2956 rmem->pg_tbl[i] =
2957 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2958 }
c0c050c5
MC
2959 }
2960
6fe19886
MC
2961 if (rmem->vmem_size) {
2962 *rmem->vmem = vzalloc(rmem->vmem_size);
2963 if (!(*rmem->vmem))
c0c050c5
MC
2964 return -ENOMEM;
2965 }
2966 return 0;
2967}
2968
4a228a3a
MC
2969static void bnxt_free_tpa_info(struct bnxt *bp)
2970{
2971 int i;
2972
2973 for (i = 0; i < bp->rx_nr_rings; i++) {
2974 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2975
ec4d8e7c
MC
2976 kfree(rxr->rx_tpa_idx_map);
2977 rxr->rx_tpa_idx_map = NULL;
79632e9b
MC
2978 if (rxr->rx_tpa) {
2979 kfree(rxr->rx_tpa[0].agg_arr);
2980 rxr->rx_tpa[0].agg_arr = NULL;
2981 }
4a228a3a
MC
2982 kfree(rxr->rx_tpa);
2983 rxr->rx_tpa = NULL;
2984 }
2985}
2986
2987static int bnxt_alloc_tpa_info(struct bnxt *bp)
2988{
79632e9b
MC
2989 int i, j, total_aggs = 0;
2990
2991 bp->max_tpa = MAX_TPA;
2992 if (bp->flags & BNXT_FLAG_CHIP_P5) {
2993 if (!bp->max_tpa_v2)
2994 return 0;
2995 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2996 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
2997 }
4a228a3a
MC
2998
2999 for (i = 0; i < bp->rx_nr_rings; i++) {
3000 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
79632e9b 3001 struct rx_agg_cmp *agg;
4a228a3a 3002
79632e9b 3003 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
4a228a3a
MC
3004 GFP_KERNEL);
3005 if (!rxr->rx_tpa)
3006 return -ENOMEM;
79632e9b
MC
3007
3008 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3009 continue;
3010 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
3011 rxr->rx_tpa[0].agg_arr = agg;
3012 if (!agg)
3013 return -ENOMEM;
3014 for (j = 1; j < bp->max_tpa; j++)
3015 rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
ec4d8e7c
MC
3016 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3017 GFP_KERNEL);
3018 if (!rxr->rx_tpa_idx_map)
3019 return -ENOMEM;
4a228a3a
MC
3020 }
3021 return 0;
3022}
3023
c0c050c5
MC
3024static void bnxt_free_rx_rings(struct bnxt *bp)
3025{
3026 int i;
3027
b6ab4b01 3028 if (!bp->rx_ring)
c0c050c5
MC
3029 return;
3030
4a228a3a 3031 bnxt_free_tpa_info(bp);
c0c050c5 3032 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 3033 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
3034 struct bnxt_ring_struct *ring;
3035
c6d30e83
MC
3036 if (rxr->xdp_prog)
3037 bpf_prog_put(rxr->xdp_prog);
3038
96a8604f
JDB
3039 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3040 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3041
12479f62 3042 page_pool_destroy(rxr->page_pool);
322b87ca
AG
3043 rxr->page_pool = NULL;
3044
c0c050c5
MC
3045 kfree(rxr->rx_agg_bmap);
3046 rxr->rx_agg_bmap = NULL;
3047
3048 ring = &rxr->rx_ring_struct;
6fe19886 3049 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
3050
3051 ring = &rxr->rx_agg_ring_struct;
6fe19886 3052 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
3053 }
3054}
3055
322b87ca
AG
3056static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3057 struct bnxt_rx_ring_info *rxr)
3058{
3059 struct page_pool_params pp = { 0 };
3060
3061 pp.pool_size = bp->rx_ring_size;
3062 pp.nid = dev_to_node(&bp->pdev->dev);
3063 pp.dev = &bp->pdev->dev;
3064 pp.dma_dir = DMA_BIDIRECTIONAL;
3065
3066 rxr->page_pool = page_pool_create(&pp);
3067 if (IS_ERR(rxr->page_pool)) {
3068 int err = PTR_ERR(rxr->page_pool);
3069
3070 rxr->page_pool = NULL;
3071 return err;
3072 }
3073 return 0;
3074}
3075
c0c050c5
MC
3076static int bnxt_alloc_rx_rings(struct bnxt *bp)
3077{
4a228a3a 3078 int i, rc = 0, agg_rings = 0;
c0c050c5 3079
b6ab4b01
MC
3080 if (!bp->rx_ring)
3081 return -ENOMEM;
3082
c0c050c5
MC
3083 if (bp->flags & BNXT_FLAG_AGG_RINGS)
3084 agg_rings = 1;
3085
c0c050c5 3086 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 3087 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
3088 struct bnxt_ring_struct *ring;
3089
c0c050c5
MC
3090 ring = &rxr->rx_ring_struct;
3091
322b87ca
AG
3092 rc = bnxt_alloc_rx_page_pool(bp, rxr);
3093 if (rc)
3094 return rc;
3095
b02e5a0e 3096 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
12479f62 3097 if (rc < 0)
96a8604f
JDB
3098 return rc;
3099
f18c2b77 3100 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
322b87ca
AG
3101 MEM_TYPE_PAGE_POOL,
3102 rxr->page_pool);
f18c2b77
AG
3103 if (rc) {
3104 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3105 return rc;
3106 }
3107
6fe19886 3108 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
3109 if (rc)
3110 return rc;
3111
2c61d211 3112 ring->grp_idx = i;
c0c050c5
MC
3113 if (agg_rings) {
3114 u16 mem_size;
3115
3116 ring = &rxr->rx_agg_ring_struct;
6fe19886 3117 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
3118 if (rc)
3119 return rc;
3120
9899bb59 3121 ring->grp_idx = i;
c0c050c5
MC
3122 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3123 mem_size = rxr->rx_agg_bmap_size / 8;
3124 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3125 if (!rxr->rx_agg_bmap)
3126 return -ENOMEM;
c0c050c5
MC
3127 }
3128 }
4a228a3a
MC
3129 if (bp->flags & BNXT_FLAG_TPA)
3130 rc = bnxt_alloc_tpa_info(bp);
3131 return rc;
c0c050c5
MC
3132}
3133
3134static void bnxt_free_tx_rings(struct bnxt *bp)
3135{
3136 int i;
3137 struct pci_dev *pdev = bp->pdev;
3138
b6ab4b01 3139 if (!bp->tx_ring)
c0c050c5
MC
3140 return;
3141
3142 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 3143 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
3144 struct bnxt_ring_struct *ring;
3145
c0c050c5
MC
3146 if (txr->tx_push) {
3147 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3148 txr->tx_push, txr->tx_push_mapping);
3149 txr->tx_push = NULL;
3150 }
3151
3152 ring = &txr->tx_ring_struct;
3153
6fe19886 3154 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
3155 }
3156}
3157
3158static int bnxt_alloc_tx_rings(struct bnxt *bp)
3159{
3160 int i, j, rc;
3161 struct pci_dev *pdev = bp->pdev;
3162
3163 bp->tx_push_size = 0;
3164 if (bp->tx_push_thresh) {
3165 int push_size;
3166
3167 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3168 bp->tx_push_thresh);
3169
4419dbe6 3170 if (push_size > 256) {
c0c050c5
MC
3171 push_size = 0;
3172 bp->tx_push_thresh = 0;
3173 }
3174
3175 bp->tx_push_size = push_size;
3176 }
3177
3178 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 3179 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5 3180 struct bnxt_ring_struct *ring;
2e8ef77e 3181 u8 qidx;
c0c050c5 3182
c0c050c5
MC
3183 ring = &txr->tx_ring_struct;
3184
6fe19886 3185 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
3186 if (rc)
3187 return rc;
3188
9899bb59 3189 ring->grp_idx = txr->bnapi->index;
c0c050c5 3190 if (bp->tx_push_size) {
c0c050c5
MC
3191 dma_addr_t mapping;
3192
3193 /* One pre-allocated DMA buffer to backup
3194 * TX push operation
3195 */
3196 txr->tx_push = dma_alloc_coherent(&pdev->dev,
3197 bp->tx_push_size,
3198 &txr->tx_push_mapping,
3199 GFP_KERNEL);
3200
3201 if (!txr->tx_push)
3202 return -ENOMEM;
3203
c0c050c5
MC
3204 mapping = txr->tx_push_mapping +
3205 sizeof(struct tx_push_bd);
4419dbe6 3206 txr->data_mapping = cpu_to_le64(mapping);
c0c050c5 3207 }
2e8ef77e
MC
3208 qidx = bp->tc_to_qidx[j];
3209 ring->queue_id = bp->q_info[qidx].queue_id;
5f449249
MC
3210 if (i < bp->tx_nr_rings_xdp)
3211 continue;
c0c050c5
MC
3212 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3213 j++;
3214 }
3215 return 0;
3216}
3217
03c74487
MC
3218static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
3219{
3220 kfree(cpr->cp_desc_ring);
3221 cpr->cp_desc_ring = NULL;
3222 kfree(cpr->cp_desc_mapping);
3223 cpr->cp_desc_mapping = NULL;
3224}
3225
3226static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
3227{
3228 cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
3229 if (!cpr->cp_desc_ring)
3230 return -ENOMEM;
3231 cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
3232 GFP_KERNEL);
3233 if (!cpr->cp_desc_mapping)
3234 return -ENOMEM;
3235 return 0;
3236}
3237
3238static void bnxt_free_all_cp_arrays(struct bnxt *bp)
3239{
3240 int i;
3241
3242 if (!bp->bnapi)
3243 return;
3244 for (i = 0; i < bp->cp_nr_rings; i++) {
3245 struct bnxt_napi *bnapi = bp->bnapi[i];
3246
3247 if (!bnapi)
3248 continue;
3249 bnxt_free_cp_arrays(&bnapi->cp_ring);
3250 }
3251}
3252
3253static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
3254{
3255 int i, n = bp->cp_nr_pages;
3256
3257 for (i = 0; i < bp->cp_nr_rings; i++) {
3258 struct bnxt_napi *bnapi = bp->bnapi[i];
3259 int rc;
3260
3261 if (!bnapi)
3262 continue;
3263 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
3264 if (rc)
3265 return rc;
3266 }
3267 return 0;
3268}
3269
c0c050c5
MC
3270static void bnxt_free_cp_rings(struct bnxt *bp)
3271{
3272 int i;
3273
3274 if (!bp->bnapi)
3275 return;
3276
3277 for (i = 0; i < bp->cp_nr_rings; i++) {
3278 struct bnxt_napi *bnapi = bp->bnapi[i];
3279 struct bnxt_cp_ring_info *cpr;
3280 struct bnxt_ring_struct *ring;
50e3ab78 3281 int j;
c0c050c5
MC
3282
3283 if (!bnapi)
3284 continue;
3285
3286 cpr = &bnapi->cp_ring;
3287 ring = &cpr->cp_ring_struct;
3288
6fe19886 3289 bnxt_free_ring(bp, &ring->ring_mem);
50e3ab78
MC
3290
3291 for (j = 0; j < 2; j++) {
3292 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3293
3294 if (cpr2) {
3295 ring = &cpr2->cp_ring_struct;
3296 bnxt_free_ring(bp, &ring->ring_mem);
03c74487 3297 bnxt_free_cp_arrays(cpr2);
50e3ab78
MC
3298 kfree(cpr2);
3299 cpr->cp_ring_arr[j] = NULL;
3300 }
3301 }
c0c050c5
MC
3302 }
3303}
3304
50e3ab78
MC
3305static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3306{
3307 struct bnxt_ring_mem_info *rmem;
3308 struct bnxt_ring_struct *ring;
3309 struct bnxt_cp_ring_info *cpr;
3310 int rc;
3311
3312 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3313 if (!cpr)
3314 return NULL;
3315
03c74487
MC
3316 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
3317 if (rc) {
3318 bnxt_free_cp_arrays(cpr);
3319 kfree(cpr);
3320 return NULL;
3321 }
50e3ab78
MC
3322 ring = &cpr->cp_ring_struct;
3323 rmem = &ring->ring_mem;
3324 rmem->nr_pages = bp->cp_nr_pages;
3325 rmem->page_size = HW_CMPD_RING_SIZE;
3326 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3327 rmem->dma_arr = cpr->cp_desc_mapping;
3328 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3329 rc = bnxt_alloc_ring(bp, rmem);
3330 if (rc) {
3331 bnxt_free_ring(bp, rmem);
03c74487 3332 bnxt_free_cp_arrays(cpr);
50e3ab78
MC
3333 kfree(cpr);
3334 cpr = NULL;
3335 }
3336 return cpr;
3337}
3338
c0c050c5
MC
3339static int bnxt_alloc_cp_rings(struct bnxt *bp)
3340{
50e3ab78 3341 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
e5811b8c 3342 int i, rc, ulp_base_vec, ulp_msix;
c0c050c5 3343
e5811b8c
MC
3344 ulp_msix = bnxt_get_ulp_msix_num(bp);
3345 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
c0c050c5
MC
3346 for (i = 0; i < bp->cp_nr_rings; i++) {
3347 struct bnxt_napi *bnapi = bp->bnapi[i];
3348 struct bnxt_cp_ring_info *cpr;
3349 struct bnxt_ring_struct *ring;
3350
3351 if (!bnapi)
3352 continue;
3353
3354 cpr = &bnapi->cp_ring;
50e3ab78 3355 cpr->bnapi = bnapi;
c0c050c5
MC
3356 ring = &cpr->cp_ring_struct;
3357
6fe19886 3358 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
3359 if (rc)
3360 return rc;
e5811b8c
MC
3361
3362 if (ulp_msix && i >= ulp_base_vec)
3363 ring->map_idx = i + ulp_msix;
3364 else
3365 ring->map_idx = i;
50e3ab78
MC
3366
3367 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3368 continue;
3369
3370 if (i < bp->rx_nr_rings) {
3371 struct bnxt_cp_ring_info *cpr2 =
3372 bnxt_alloc_cp_sub_ring(bp);
3373
3374 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3375 if (!cpr2)
3376 return -ENOMEM;
3377 cpr2->bnapi = bnapi;
3378 }
3379 if ((sh && i < bp->tx_nr_rings) ||
3380 (!sh && i >= bp->rx_nr_rings)) {
3381 struct bnxt_cp_ring_info *cpr2 =
3382 bnxt_alloc_cp_sub_ring(bp);
3383
3384 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3385 if (!cpr2)
3386 return -ENOMEM;
3387 cpr2->bnapi = bnapi;
3388 }
c0c050c5
MC
3389 }
3390 return 0;
3391}
3392
3393static void bnxt_init_ring_struct(struct bnxt *bp)
3394{
3395 int i;
3396
3397 for (i = 0; i < bp->cp_nr_rings; i++) {
3398 struct bnxt_napi *bnapi = bp->bnapi[i];
6fe19886 3399 struct bnxt_ring_mem_info *rmem;
c0c050c5
MC
3400 struct bnxt_cp_ring_info *cpr;
3401 struct bnxt_rx_ring_info *rxr;
3402 struct bnxt_tx_ring_info *txr;
3403 struct bnxt_ring_struct *ring;
3404
3405 if (!bnapi)
3406 continue;
3407
3408 cpr = &bnapi->cp_ring;
3409 ring = &cpr->cp_ring_struct;
6fe19886
MC
3410 rmem = &ring->ring_mem;
3411 rmem->nr_pages = bp->cp_nr_pages;
3412 rmem->page_size = HW_CMPD_RING_SIZE;
3413 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3414 rmem->dma_arr = cpr->cp_desc_mapping;
3415 rmem->vmem_size = 0;
c0c050c5 3416
b6ab4b01 3417 rxr = bnapi->rx_ring;
3b2b7d9d
MC
3418 if (!rxr)
3419 goto skip_rx;
3420
c0c050c5 3421 ring = &rxr->rx_ring_struct;
6fe19886
MC
3422 rmem = &ring->ring_mem;
3423 rmem->nr_pages = bp->rx_nr_pages;
3424 rmem->page_size = HW_RXBD_RING_SIZE;
3425 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3426 rmem->dma_arr = rxr->rx_desc_mapping;
3427 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3428 rmem->vmem = (void **)&rxr->rx_buf_ring;
c0c050c5
MC
3429
3430 ring = &rxr->rx_agg_ring_struct;
6fe19886
MC
3431 rmem = &ring->ring_mem;
3432 rmem->nr_pages = bp->rx_agg_nr_pages;
3433 rmem->page_size = HW_RXBD_RING_SIZE;
3434 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3435 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3436 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3437 rmem->vmem = (void **)&rxr->rx_agg_ring;
c0c050c5 3438
3b2b7d9d 3439skip_rx:
b6ab4b01 3440 txr = bnapi->tx_ring;
3b2b7d9d
MC
3441 if (!txr)
3442 continue;
3443
c0c050c5 3444 ring = &txr->tx_ring_struct;
6fe19886
MC
3445 rmem = &ring->ring_mem;
3446 rmem->nr_pages = bp->tx_nr_pages;
3447 rmem->page_size = HW_RXBD_RING_SIZE;
3448 rmem->pg_arr = (void **)txr->tx_desc_ring;
3449 rmem->dma_arr = txr->tx_desc_mapping;
3450 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3451 rmem->vmem = (void **)&txr->tx_buf_ring;
c0c050c5
MC
3452 }
3453}
3454
3455static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3456{
3457 int i;
3458 u32 prod;
3459 struct rx_bd **rx_buf_ring;
3460
6fe19886
MC
3461 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3462 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
c0c050c5
MC
3463 int j;
3464 struct rx_bd *rxbd;
3465
3466 rxbd = rx_buf_ring[i];
3467 if (!rxbd)
3468 continue;
3469
3470 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3471 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3472 rxbd->rx_bd_opaque = prod;
3473 }
3474 }
3475}
3476
7737d325 3477static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
c0c050c5 3478{
7737d325 3479 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
c0c050c5 3480 struct net_device *dev = bp->dev;
7737d325 3481 u32 prod;
c0c050c5
MC
3482 int i;
3483
c0c050c5
MC
3484 prod = rxr->rx_prod;
3485 for (i = 0; i < bp->rx_ring_size; i++) {
7737d325 3486 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
c0c050c5
MC
3487 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3488 ring_nr, i, bp->rx_ring_size);
3489 break;
3490 }
3491 prod = NEXT_RX(prod);
3492 }
3493 rxr->rx_prod = prod;
edd0c2cc 3494
c0c050c5
MC
3495 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3496 return 0;
3497
c0c050c5
MC
3498 prod = rxr->rx_agg_prod;
3499 for (i = 0; i < bp->rx_agg_ring_size; i++) {
7737d325 3500 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
c0c050c5
MC
3501 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3502 ring_nr, i, bp->rx_ring_size);
3503 break;
3504 }
3505 prod = NEXT_RX_AGG(prod);
3506 }
3507 rxr->rx_agg_prod = prod;
c0c050c5 3508
7737d325
MC
3509 if (rxr->rx_tpa) {
3510 dma_addr_t mapping;
3511 u8 *data;
c0c050c5 3512
7737d325
MC
3513 for (i = 0; i < bp->max_tpa; i++) {
3514 data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
3515 if (!data)
3516 return -ENOMEM;
c0c050c5 3517
7737d325
MC
3518 rxr->rx_tpa[i].data = data;
3519 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3520 rxr->rx_tpa[i].mapping = mapping;
c0c050c5
MC
3521 }
3522 }
c0c050c5
MC
3523 return 0;
3524}
3525
7737d325
MC
3526static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3527{
3528 struct bnxt_rx_ring_info *rxr;
3529 struct bnxt_ring_struct *ring;
3530 u32 type;
3531
3532 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3533 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3534
3535 if (NET_IP_ALIGN == 2)
3536 type |= RX_BD_FLAGS_SOP;
3537
3538 rxr = &bp->rx_ring[ring_nr];
3539 ring = &rxr->rx_ring_struct;
3540 bnxt_init_rxbd_pages(ring, type);
3541
3542 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3543 bpf_prog_add(bp->xdp_prog, 1);
3544 rxr->xdp_prog = bp->xdp_prog;
3545 }
3546 ring->fw_ring_id = INVALID_HW_RING_ID;
3547
3548 ring = &rxr->rx_agg_ring_struct;
3549 ring->fw_ring_id = INVALID_HW_RING_ID;
3550
3551 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3552 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3553 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3554
3555 bnxt_init_rxbd_pages(ring, type);
3556 }
3557
3558 return bnxt_alloc_one_rx_ring(bp, ring_nr);
3559}
3560
2247925f
SP
3561static void bnxt_init_cp_rings(struct bnxt *bp)
3562{
3e08b184 3563 int i, j;
2247925f
SP
3564
3565 for (i = 0; i < bp->cp_nr_rings; i++) {
3566 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3567 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3568
3569 ring->fw_ring_id = INVALID_HW_RING_ID;
6a8788f2
AG
3570 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3571 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3e08b184
MC
3572 for (j = 0; j < 2; j++) {
3573 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3574
3575 if (!cpr2)
3576 continue;
3577
3578 ring = &cpr2->cp_ring_struct;
3579 ring->fw_ring_id = INVALID_HW_RING_ID;
3580 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3581 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3582 }
2247925f
SP
3583 }
3584}
3585
c0c050c5
MC
3586static int bnxt_init_rx_rings(struct bnxt *bp)
3587{
3588 int i, rc = 0;
3589
c61fb99c 3590 if (BNXT_RX_PAGE_MODE(bp)) {
c6d30e83
MC
3591 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3592 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
c61fb99c
MC
3593 } else {
3594 bp->rx_offset = BNXT_RX_OFFSET;
3595 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3596 }
b3dba77c 3597
c0c050c5
MC
3598 for (i = 0; i < bp->rx_nr_rings; i++) {
3599 rc = bnxt_init_one_rx_ring(bp, i);
3600 if (rc)
3601 break;
3602 }
3603
3604 return rc;
3605}
3606
3607static int bnxt_init_tx_rings(struct bnxt *bp)
3608{
3609 u16 i;
3610
3611 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3612 MAX_SKB_FRAGS + 1);
3613
3614 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 3615 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
3616 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3617
3618 ring->fw_ring_id = INVALID_HW_RING_ID;
3619 }
3620
3621 return 0;
3622}
3623
3624static void bnxt_free_ring_grps(struct bnxt *bp)
3625{
3626 kfree(bp->grp_info);
3627 bp->grp_info = NULL;
3628}
3629
3630static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3631{
3632 int i;
3633
3634 if (irq_re_init) {
3635 bp->grp_info = kcalloc(bp->cp_nr_rings,
3636 sizeof(struct bnxt_ring_grp_info),
3637 GFP_KERNEL);
3638 if (!bp->grp_info)
3639 return -ENOMEM;
3640 }
3641 for (i = 0; i < bp->cp_nr_rings; i++) {
3642 if (irq_re_init)
3643 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3644 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3645 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3646 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3647 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3648 }
3649 return 0;
3650}
3651
3652static void bnxt_free_vnics(struct bnxt *bp)
3653{
3654 kfree(bp->vnic_info);
3655 bp->vnic_info = NULL;
3656 bp->nr_vnics = 0;
3657}
3658
3659static int bnxt_alloc_vnics(struct bnxt *bp)
3660{
3661 int num_vnics = 1;
3662
3663#ifdef CONFIG_RFS_ACCEL
9b3d15e6 3664 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
c0c050c5
MC
3665 num_vnics += bp->rx_nr_rings;
3666#endif
3667
dc52c6c7
PS
3668 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3669 num_vnics++;
3670
c0c050c5
MC
3671 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3672 GFP_KERNEL);
3673 if (!bp->vnic_info)
3674 return -ENOMEM;
3675
3676 bp->nr_vnics = num_vnics;
3677 return 0;
3678}
3679
3680static void bnxt_init_vnics(struct bnxt *bp)
3681{
3682 int i;
3683
3684 for (i = 0; i < bp->nr_vnics; i++) {
3685 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
44c6f72a 3686 int j;
c0c050c5
MC
3687
3688 vnic->fw_vnic_id = INVALID_HW_RING_ID;
44c6f72a
MC
3689 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3690 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3691
c0c050c5
MC
3692 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3693
3694 if (bp->vnic_info[i].rss_hash_key) {
3695 if (i == 0)
3696 prandom_bytes(vnic->rss_hash_key,
3697 HW_HASH_KEY_SIZE);
3698 else
3699 memcpy(vnic->rss_hash_key,
3700 bp->vnic_info[0].rss_hash_key,
3701 HW_HASH_KEY_SIZE);
3702 }
3703 }
3704}
3705
3706static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3707{
3708 int pages;
3709
3710 pages = ring_size / desc_per_pg;
3711
3712 if (!pages)
3713 return 1;
3714
3715 pages++;
3716
3717 while (pages & (pages - 1))
3718 pages++;
3719
3720 return pages;
3721}
3722
c6d30e83 3723void bnxt_set_tpa_flags(struct bnxt *bp)
c0c050c5
MC
3724{
3725 bp->flags &= ~BNXT_FLAG_TPA;
341138c3
MC
3726 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3727 return;
c0c050c5
MC
3728 if (bp->dev->features & NETIF_F_LRO)
3729 bp->flags |= BNXT_FLAG_LRO;
1054aee8 3730 else if (bp->dev->features & NETIF_F_GRO_HW)
c0c050c5
MC
3731 bp->flags |= BNXT_FLAG_GRO;
3732}
3733
3734/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3735 * be set on entry.
3736 */
3737void bnxt_set_ring_params(struct bnxt *bp)
3738{
27640ce6 3739 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
c0c050c5
MC
3740 u32 agg_factor = 0, agg_ring_size = 0;
3741
3742 /* 8 for CRC and VLAN */
3743 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3744
3745 rx_space = rx_size + NET_SKB_PAD +
3746 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3747
3748 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3749 ring_size = bp->rx_ring_size;
3750 bp->rx_agg_ring_size = 0;
3751 bp->rx_agg_nr_pages = 0;
3752
3753 if (bp->flags & BNXT_FLAG_TPA)
2839f28b 3754 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
c0c050c5
MC
3755
3756 bp->flags &= ~BNXT_FLAG_JUMBO;
bdbd1eb5 3757 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
c0c050c5
MC
3758 u32 jumbo_factor;
3759
3760 bp->flags |= BNXT_FLAG_JUMBO;
3761 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3762 if (jumbo_factor > agg_factor)
3763 agg_factor = jumbo_factor;
3764 }
c1129b51
MC
3765 if (agg_factor) {
3766 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
3767 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
3768 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
3769 bp->rx_ring_size, ring_size);
3770 bp->rx_ring_size = ring_size;
3771 }
3772 agg_ring_size = ring_size * agg_factor;
c0c050c5 3773
c0c050c5
MC
3774 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3775 RX_DESC_CNT);
3776 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3777 u32 tmp = agg_ring_size;
3778
3779 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3780 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3781 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3782 tmp, agg_ring_size);
3783 }
3784 bp->rx_agg_ring_size = agg_ring_size;
3785 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3786 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3787 rx_space = rx_size + NET_SKB_PAD +
3788 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3789 }
3790
3791 bp->rx_buf_use_size = rx_size;
3792 bp->rx_buf_size = rx_space;
3793
3794 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3795 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3796
3797 ring_size = bp->tx_ring_size;
3798 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3799 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3800
27640ce6
MC
3801 max_rx_cmpl = bp->rx_ring_size;
3802 /* MAX TPA needs to be added because TPA_START completions are
3803 * immediately recycled, so the TPA completions are not bound by
3804 * the RX ring size.
3805 */
3806 if (bp->flags & BNXT_FLAG_TPA)
3807 max_rx_cmpl += bp->max_tpa;
3808 /* RX and TPA completions are 32-byte, all others are 16-byte */
3809 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
c0c050c5
MC
3810 bp->cp_ring_size = ring_size;
3811
3812 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3813 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3814 bp->cp_nr_pages = MAX_CP_PAGES;
3815 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3816 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3817 ring_size, bp->cp_ring_size);
3818 }
3819 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3820 bp->cp_ring_mask = bp->cp_bit - 1;
3821}
3822
96a8604f
JDB
3823/* Changing allocation mode of RX rings.
3824 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3825 */
c61fb99c 3826int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
6bb19474 3827{
c61fb99c
MC
3828 if (page_mode) {
3829 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3830 return -EOPNOTSUPP;
7eb9bb3a
MC
3831 bp->dev->max_mtu =
3832 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
c61fb99c
MC
3833 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3834 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
c61fb99c
MC
3835 bp->rx_dir = DMA_BIDIRECTIONAL;
3836 bp->rx_skb_func = bnxt_rx_page_skb;
1054aee8
MC
3837 /* Disable LRO or GRO_HW */
3838 netdev_update_features(bp->dev);
c61fb99c 3839 } else {
7eb9bb3a 3840 bp->dev->max_mtu = bp->max_mtu;
c61fb99c
MC
3841 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3842 bp->rx_dir = DMA_FROM_DEVICE;
3843 bp->rx_skb_func = bnxt_rx_skb;
3844 }
6bb19474
MC
3845 return 0;
3846}
3847
c0c050c5
MC
3848static void bnxt_free_vnic_attributes(struct bnxt *bp)
3849{
3850 int i;
3851 struct bnxt_vnic_info *vnic;
3852 struct pci_dev *pdev = bp->pdev;
3853
3854 if (!bp->vnic_info)
3855 return;
3856
3857 for (i = 0; i < bp->nr_vnics; i++) {
3858 vnic = &bp->vnic_info[i];
3859
3860 kfree(vnic->fw_grp_ids);
3861 vnic->fw_grp_ids = NULL;
3862
3863 kfree(vnic->uc_list);
3864 vnic->uc_list = NULL;
3865
3866 if (vnic->mc_list) {
3867 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3868 vnic->mc_list, vnic->mc_list_mapping);
3869 vnic->mc_list = NULL;
3870 }
3871
3872 if (vnic->rss_table) {
34370d24 3873 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
c0c050c5
MC
3874 vnic->rss_table,
3875 vnic->rss_table_dma_addr);
3876 vnic->rss_table = NULL;
3877 }
3878
3879 vnic->rss_hash_key = NULL;
3880 vnic->flags = 0;
3881 }
3882}
3883
3884static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3885{
3886 int i, rc = 0, size;
3887 struct bnxt_vnic_info *vnic;
3888 struct pci_dev *pdev = bp->pdev;
3889 int max_rings;
3890
3891 for (i = 0; i < bp->nr_vnics; i++) {
3892 vnic = &bp->vnic_info[i];
3893
3894 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3895 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3896
3897 if (mem_size > 0) {
3898 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3899 if (!vnic->uc_list) {
3900 rc = -ENOMEM;
3901 goto out;
3902 }
3903 }
3904 }
3905
3906 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3907 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3908 vnic->mc_list =
3909 dma_alloc_coherent(&pdev->dev,
3910 vnic->mc_list_size,
3911 &vnic->mc_list_mapping,
3912 GFP_KERNEL);
3913 if (!vnic->mc_list) {
3914 rc = -ENOMEM;
3915 goto out;
3916 }
3917 }
3918
44c6f72a
MC
3919 if (bp->flags & BNXT_FLAG_CHIP_P5)
3920 goto vnic_skip_grps;
3921
c0c050c5
MC
3922 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3923 max_rings = bp->rx_nr_rings;
3924 else
3925 max_rings = 1;
3926
3927 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3928 if (!vnic->fw_grp_ids) {
3929 rc = -ENOMEM;
3930 goto out;
3931 }
44c6f72a 3932vnic_skip_grps:
ae10ae74
MC
3933 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3934 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3935 continue;
3936
c0c050c5 3937 /* Allocate rss table and hash key */
34370d24
MC
3938 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3939 if (bp->flags & BNXT_FLAG_CHIP_P5)
3940 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
3941
3942 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
3943 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
3944 vnic->rss_table_size,
c0c050c5
MC
3945 &vnic->rss_table_dma_addr,
3946 GFP_KERNEL);
3947 if (!vnic->rss_table) {
3948 rc = -ENOMEM;
3949 goto out;
3950 }
3951
c0c050c5
MC
3952 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3953 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3954 }
3955 return 0;
3956
3957out:
3958 return rc;
3959}
3960
3961static void bnxt_free_hwrm_resources(struct bnxt *bp)
3962{
68f684e2
EP
3963 struct bnxt_hwrm_wait_token *token;
3964
f9ff5782
EP
3965 dma_pool_destroy(bp->hwrm_dma_pool);
3966 bp->hwrm_dma_pool = NULL;
68f684e2
EP
3967
3968 rcu_read_lock();
3969 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
3970 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
3971 rcu_read_unlock();
c0c050c5
MC
3972}
3973
3974static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3975{
b34695a8 3976 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
f9ff5782
EP
3977 BNXT_HWRM_DMA_SIZE,
3978 BNXT_HWRM_DMA_ALIGN, 0);
3979 if (!bp->hwrm_dma_pool)
e605db80
DK
3980 return -ENOMEM;
3981
68f684e2
EP
3982 INIT_HLIST_HEAD(&bp->hwrm_pending_list);
3983
e605db80
DK
3984 return 0;
3985}
3986
177a6cde 3987static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
c0c050c5 3988{
a37120b2
MC
3989 kfree(stats->hw_masks);
3990 stats->hw_masks = NULL;
3991 kfree(stats->sw_stats);
3992 stats->sw_stats = NULL;
177a6cde
MC
3993 if (stats->hw_stats) {
3994 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
3995 stats->hw_stats_map);
3996 stats->hw_stats = NULL;
3997 }
3998}
c0c050c5 3999
a37120b2
MC
4000static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
4001 bool alloc_masks)
177a6cde
MC
4002{
4003 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
4004 &stats->hw_stats_map, GFP_KERNEL);
4005 if (!stats->hw_stats)
4006 return -ENOMEM;
00db3cba 4007
a37120b2
MC
4008 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
4009 if (!stats->sw_stats)
4010 goto stats_mem_err;
4011
4012 if (alloc_masks) {
4013 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
4014 if (!stats->hw_masks)
4015 goto stats_mem_err;
4016 }
177a6cde 4017 return 0;
a37120b2
MC
4018
4019stats_mem_err:
4020 bnxt_free_stats_mem(bp, stats);
4021 return -ENOMEM;
177a6cde 4022}
00db3cba 4023
d752d053
MC
4024static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
4025{
4026 int i;
4027
4028 for (i = 0; i < count; i++)
4029 mask_arr[i] = mask;
4030}
4031
4032static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
4033{
4034 int i;
4035
4036 for (i = 0; i < count; i++)
4037 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
4038}
4039
4040static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
4041 struct bnxt_stats_mem *stats)
4042{
bbf33d1d
EP
4043 struct hwrm_func_qstats_ext_output *resp;
4044 struct hwrm_func_qstats_ext_input *req;
d752d053
MC
4045 __le64 *hw_masks;
4046 int rc;
4047
4048 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
4049 !(bp->flags & BNXT_FLAG_CHIP_P5))
4050 return -EOPNOTSUPP;
4051
bbf33d1d 4052 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
d752d053 4053 if (rc)
bbf33d1d 4054 return rc;
d752d053 4055
bbf33d1d
EP
4056 req->fid = cpu_to_le16(0xffff);
4057 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
d752d053 4058
bbf33d1d
EP
4059 resp = hwrm_req_hold(bp, req);
4060 rc = hwrm_req_send(bp, req);
4061 if (!rc) {
4062 hw_masks = &resp->rx_ucast_pkts;
4063 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
4064 }
4065 hwrm_req_drop(bp, req);
d752d053
MC
4066 return rc;
4067}
4068
531d1d26
MC
4069static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
4070static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
4071
d752d053
MC
4072static void bnxt_init_stats(struct bnxt *bp)
4073{
4074 struct bnxt_napi *bnapi = bp->bnapi[0];
4075 struct bnxt_cp_ring_info *cpr;
4076 struct bnxt_stats_mem *stats;
531d1d26
MC
4077 __le64 *rx_stats, *tx_stats;
4078 int rc, rx_count, tx_count;
4079 u64 *rx_masks, *tx_masks;
d752d053 4080 u64 mask;
531d1d26 4081 u8 flags;
d752d053
MC
4082
4083 cpr = &bnapi->cp_ring;
4084 stats = &cpr->stats;
4085 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
4086 if (rc) {
4087 if (bp->flags & BNXT_FLAG_CHIP_P5)
4088 mask = (1ULL << 48) - 1;
4089 else
4090 mask = -1ULL;
4091 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
4092 }
531d1d26
MC
4093 if (bp->flags & BNXT_FLAG_PORT_STATS) {
4094 stats = &bp->port_stats;
4095 rx_stats = stats->hw_stats;
4096 rx_masks = stats->hw_masks;
4097 rx_count = sizeof(struct rx_port_stats) / 8;
4098 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4099 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4100 tx_count = sizeof(struct tx_port_stats) / 8;
4101
4102 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
4103 rc = bnxt_hwrm_port_qstats(bp, flags);
4104 if (rc) {
4105 mask = (1ULL << 40) - 1;
4106
4107 bnxt_fill_masks(rx_masks, mask, rx_count);
4108 bnxt_fill_masks(tx_masks, mask, tx_count);
4109 } else {
4110 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4111 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
4112 bnxt_hwrm_port_qstats(bp, 0);
4113 }
4114 }
4115 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4116 stats = &bp->rx_port_stats_ext;
4117 rx_stats = stats->hw_stats;
4118 rx_masks = stats->hw_masks;
4119 rx_count = sizeof(struct rx_port_stats_ext) / 8;
4120 stats = &bp->tx_port_stats_ext;
4121 tx_stats = stats->hw_stats;
4122 tx_masks = stats->hw_masks;
4123 tx_count = sizeof(struct tx_port_stats_ext) / 8;
4124
c07fa08f 4125 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
531d1d26
MC
4126 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4127 if (rc) {
4128 mask = (1ULL << 40) - 1;
4129
4130 bnxt_fill_masks(rx_masks, mask, rx_count);
4131 if (tx_stats)
4132 bnxt_fill_masks(tx_masks, mask, tx_count);
4133 } else {
4134 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4135 if (tx_stats)
4136 bnxt_copy_hw_masks(tx_masks, tx_stats,
4137 tx_count);
4138 bnxt_hwrm_port_qstats_ext(bp, 0);
4139 }
4140 }
d752d053
MC
4141}
4142
177a6cde
MC
4143static void bnxt_free_port_stats(struct bnxt *bp)
4144{
4145 bp->flags &= ~BNXT_FLAG_PORT_STATS;
4146 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
36e53349 4147
177a6cde
MC
4148 bnxt_free_stats_mem(bp, &bp->port_stats);
4149 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4150 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
fd3ab1c7
MC
4151}
4152
4153static void bnxt_free_ring_stats(struct bnxt *bp)
4154{
177a6cde 4155 int i;
3bdf56c4 4156
c0c050c5
MC
4157 if (!bp->bnapi)
4158 return;
4159
c0c050c5
MC
4160 for (i = 0; i < bp->cp_nr_rings; i++) {
4161 struct bnxt_napi *bnapi = bp->bnapi[i];
4162 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4163
177a6cde 4164 bnxt_free_stats_mem(bp, &cpr->stats);
c0c050c5
MC
4165 }
4166}
4167
4168static int bnxt_alloc_stats(struct bnxt *bp)
4169{
4170 u32 size, i;
177a6cde 4171 int rc;
c0c050c5 4172
4e748506 4173 size = bp->hw_ring_stats_size;
c0c050c5
MC
4174
4175 for (i = 0; i < bp->cp_nr_rings; i++) {
4176 struct bnxt_napi *bnapi = bp->bnapi[i];
4177 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4178
177a6cde 4179 cpr->stats.len = size;
a37120b2 4180 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
177a6cde
MC
4181 if (rc)
4182 return rc;
c0c050c5
MC
4183
4184 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4185 }
3bdf56c4 4186
a220eabc
VV
4187 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4188 return 0;
fd3ab1c7 4189
177a6cde 4190 if (bp->port_stats.hw_stats)
a220eabc 4191 goto alloc_ext_stats;
3bdf56c4 4192
177a6cde 4193 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
a37120b2 4194 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
177a6cde
MC
4195 if (rc)
4196 return rc;
3bdf56c4 4197
a220eabc 4198 bp->flags |= BNXT_FLAG_PORT_STATS;
00db3cba 4199
fd3ab1c7 4200alloc_ext_stats:
a220eabc
VV
4201 /* Display extended statistics only if FW supports it */
4202 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
6154532f 4203 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
00db3cba
VV
4204 return 0;
4205
177a6cde 4206 if (bp->rx_port_stats_ext.hw_stats)
a220eabc 4207 goto alloc_tx_ext_stats;
fd3ab1c7 4208
177a6cde 4209 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
a37120b2 4210 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
177a6cde
MC
4211 /* Extended stats are optional */
4212 if (rc)
a220eabc 4213 return 0;
00db3cba 4214
fd3ab1c7 4215alloc_tx_ext_stats:
177a6cde 4216 if (bp->tx_port_stats_ext.hw_stats)
dfe64de9 4217 return 0;
fd3ab1c7 4218
6154532f
VV
4219 if (bp->hwrm_spec_code >= 0x10902 ||
4220 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
177a6cde 4221 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
a37120b2 4222 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
177a6cde
MC
4223 /* Extended stats are optional */
4224 if (rc)
4225 return 0;
3bdf56c4 4226 }
a220eabc 4227 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
c0c050c5
MC
4228 return 0;
4229}
4230
4231static void bnxt_clear_ring_indices(struct bnxt *bp)
4232{
4233 int i;
4234
4235 if (!bp->bnapi)
4236 return;
4237
4238 for (i = 0; i < bp->cp_nr_rings; i++) {
4239 struct bnxt_napi *bnapi = bp->bnapi[i];
4240 struct bnxt_cp_ring_info *cpr;
4241 struct bnxt_rx_ring_info *rxr;
4242 struct bnxt_tx_ring_info *txr;
4243
4244 if (!bnapi)
4245 continue;
4246
4247 cpr = &bnapi->cp_ring;
4248 cpr->cp_raw_cons = 0;
4249
b6ab4b01 4250 txr = bnapi->tx_ring;
3b2b7d9d
MC
4251 if (txr) {
4252 txr->tx_prod = 0;
4253 txr->tx_cons = 0;
4254 }
c0c050c5 4255
b6ab4b01 4256 rxr = bnapi->rx_ring;
3b2b7d9d
MC
4257 if (rxr) {
4258 rxr->rx_prod = 0;
4259 rxr->rx_agg_prod = 0;
4260 rxr->rx_sw_agg_prod = 0;
376a5b86 4261 rxr->rx_next_cons = 0;
3b2b7d9d 4262 }
c0c050c5
MC
4263 }
4264}
4265
4266static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4267{
4268#ifdef CONFIG_RFS_ACCEL
4269 int i;
4270
4271 /* Under rtnl_lock and all our NAPIs have been disabled. It's
4272 * safe to delete the hash table.
4273 */
4274 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4275 struct hlist_head *head;
4276 struct hlist_node *tmp;
4277 struct bnxt_ntuple_filter *fltr;
4278
4279 head = &bp->ntp_fltr_hash_tbl[i];
4280 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4281 hlist_del(&fltr->hash);
4282 kfree(fltr);
4283 }
4284 }
4285 if (irq_reinit) {
4286 kfree(bp->ntp_fltr_bmap);
4287 bp->ntp_fltr_bmap = NULL;
4288 }
4289 bp->ntp_fltr_count = 0;
4290#endif
4291}
4292
4293static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4294{
4295#ifdef CONFIG_RFS_ACCEL
4296 int i, rc = 0;
4297
4298 if (!(bp->flags & BNXT_FLAG_RFS))
4299 return 0;
4300
4301 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4302 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4303
4304 bp->ntp_fltr_count = 0;
ac45bd93
DC
4305 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
4306 sizeof(long),
c0c050c5
MC
4307 GFP_KERNEL);
4308
4309 if (!bp->ntp_fltr_bmap)
4310 rc = -ENOMEM;
4311
4312 return rc;
4313#else
4314 return 0;
4315#endif
4316}
4317
4318static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4319{
4320 bnxt_free_vnic_attributes(bp);
4321 bnxt_free_tx_rings(bp);
4322 bnxt_free_rx_rings(bp);
4323 bnxt_free_cp_rings(bp);
03c74487 4324 bnxt_free_all_cp_arrays(bp);
c0c050c5
MC
4325 bnxt_free_ntp_fltrs(bp, irq_re_init);
4326 if (irq_re_init) {
fd3ab1c7 4327 bnxt_free_ring_stats(bp);
b0d28207 4328 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
eba93de6 4329 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
fea6b333 4330 bnxt_free_port_stats(bp);
c0c050c5
MC
4331 bnxt_free_ring_grps(bp);
4332 bnxt_free_vnics(bp);
a960dec9
MC
4333 kfree(bp->tx_ring_map);
4334 bp->tx_ring_map = NULL;
b6ab4b01
MC
4335 kfree(bp->tx_ring);
4336 bp->tx_ring = NULL;
4337 kfree(bp->rx_ring);
4338 bp->rx_ring = NULL;
c0c050c5
MC
4339 kfree(bp->bnapi);
4340 bp->bnapi = NULL;
4341 } else {
4342 bnxt_clear_ring_indices(bp);
4343 }
4344}
4345
4346static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4347{
01657bcd 4348 int i, j, rc, size, arr_size;
c0c050c5
MC
4349 void *bnapi;
4350
4351 if (irq_re_init) {
4352 /* Allocate bnapi mem pointer array and mem block for
4353 * all queues
4354 */
4355 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4356 bp->cp_nr_rings);
4357 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4358 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4359 if (!bnapi)
4360 return -ENOMEM;
4361
4362 bp->bnapi = bnapi;
4363 bnapi += arr_size;
4364 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4365 bp->bnapi[i] = bnapi;
4366 bp->bnapi[i]->index = i;
4367 bp->bnapi[i]->bp = bp;
e38287b7
MC
4368 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4369 struct bnxt_cp_ring_info *cpr =
4370 &bp->bnapi[i]->cp_ring;
4371
4372 cpr->cp_ring_struct.ring_mem.flags =
4373 BNXT_RMEM_RING_PTE_FLAG;
4374 }
c0c050c5
MC
4375 }
4376
b6ab4b01
MC
4377 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4378 sizeof(struct bnxt_rx_ring_info),
4379 GFP_KERNEL);
4380 if (!bp->rx_ring)
4381 return -ENOMEM;
4382
4383 for (i = 0; i < bp->rx_nr_rings; i++) {
e38287b7
MC
4384 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4385
4386 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4387 rxr->rx_ring_struct.ring_mem.flags =
4388 BNXT_RMEM_RING_PTE_FLAG;
4389 rxr->rx_agg_ring_struct.ring_mem.flags =
4390 BNXT_RMEM_RING_PTE_FLAG;
4391 }
4392 rxr->bnapi = bp->bnapi[i];
b6ab4b01
MC
4393 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4394 }
4395
4396 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4397 sizeof(struct bnxt_tx_ring_info),
4398 GFP_KERNEL);
4399 if (!bp->tx_ring)
4400 return -ENOMEM;
4401
a960dec9
MC
4402 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4403 GFP_KERNEL);
4404
4405 if (!bp->tx_ring_map)
4406 return -ENOMEM;
4407
01657bcd
MC
4408 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4409 j = 0;
4410 else
4411 j = bp->rx_nr_rings;
4412
4413 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
e38287b7
MC
4414 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4415
4416 if (bp->flags & BNXT_FLAG_CHIP_P5)
4417 txr->tx_ring_struct.ring_mem.flags =
4418 BNXT_RMEM_RING_PTE_FLAG;
4419 txr->bnapi = bp->bnapi[j];
4420 bp->bnapi[j]->tx_ring = txr;
5f449249 4421 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
38413406 4422 if (i >= bp->tx_nr_rings_xdp) {
e38287b7 4423 txr->txq_index = i - bp->tx_nr_rings_xdp;
38413406
MC
4424 bp->bnapi[j]->tx_int = bnxt_tx_int;
4425 } else {
fa3e93e8 4426 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
38413406
MC
4427 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4428 }
b6ab4b01
MC
4429 }
4430
c0c050c5
MC
4431 rc = bnxt_alloc_stats(bp);
4432 if (rc)
4433 goto alloc_mem_err;
d752d053 4434 bnxt_init_stats(bp);
c0c050c5
MC
4435
4436 rc = bnxt_alloc_ntp_fltrs(bp);
4437 if (rc)
4438 goto alloc_mem_err;
4439
4440 rc = bnxt_alloc_vnics(bp);
4441 if (rc)
4442 goto alloc_mem_err;
4443 }
4444
03c74487
MC
4445 rc = bnxt_alloc_all_cp_arrays(bp);
4446 if (rc)
4447 goto alloc_mem_err;
4448
c0c050c5
MC
4449 bnxt_init_ring_struct(bp);
4450
4451 rc = bnxt_alloc_rx_rings(bp);
4452 if (rc)
4453 goto alloc_mem_err;
4454
4455 rc = bnxt_alloc_tx_rings(bp);
4456 if (rc)
4457 goto alloc_mem_err;
4458
4459 rc = bnxt_alloc_cp_rings(bp);
4460 if (rc)
4461 goto alloc_mem_err;
4462
4463 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4464 BNXT_VNIC_UCAST_FLAG;
4465 rc = bnxt_alloc_vnic_attributes(bp);
4466 if (rc)
4467 goto alloc_mem_err;
4468 return 0;
4469
4470alloc_mem_err:
4471 bnxt_free_mem(bp, true);
4472 return rc;
4473}
4474
9d8bc097
MC
4475static void bnxt_disable_int(struct bnxt *bp)
4476{
4477 int i;
4478
4479 if (!bp->bnapi)
4480 return;
4481
4482 for (i = 0; i < bp->cp_nr_rings; i++) {
4483 struct bnxt_napi *bnapi = bp->bnapi[i];
4484 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
daf1f1e7 4485 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
9d8bc097 4486
daf1f1e7 4487 if (ring->fw_ring_id != INVALID_HW_RING_ID)
697197e5 4488 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
9d8bc097
MC
4489 }
4490}
4491
e5811b8c
MC
4492static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4493{
4494 struct bnxt_napi *bnapi = bp->bnapi[n];
4495 struct bnxt_cp_ring_info *cpr;
4496
4497 cpr = &bnapi->cp_ring;
4498 return cpr->cp_ring_struct.map_idx;
4499}
4500
9d8bc097
MC
4501static void bnxt_disable_int_sync(struct bnxt *bp)
4502{
4503 int i;
4504
38290e37
MC
4505 if (!bp->irq_tbl)
4506 return;
4507
9d8bc097
MC
4508 atomic_inc(&bp->intr_sem);
4509
4510 bnxt_disable_int(bp);
e5811b8c
MC
4511 for (i = 0; i < bp->cp_nr_rings; i++) {
4512 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4513
4514 synchronize_irq(bp->irq_tbl[map_idx].vector);
4515 }
9d8bc097
MC
4516}
4517
4518static void bnxt_enable_int(struct bnxt *bp)
4519{
4520 int i;
4521
4522 atomic_set(&bp->intr_sem, 0);
4523 for (i = 0; i < bp->cp_nr_rings; i++) {
4524 struct bnxt_napi *bnapi = bp->bnapi[i];
4525 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4526
697197e5 4527 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
9d8bc097
MC
4528 }
4529}
4530
2e882468
VV
4531int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4532 bool async_only)
c0c050c5 4533{
25be8623
MC
4534 DECLARE_BITMAP(async_events_bmap, 256);
4535 u32 *events = (u32 *)async_events_bmap;
bbf33d1d
EP
4536 struct hwrm_func_drv_rgtr_output *resp;
4537 struct hwrm_func_drv_rgtr_input *req;
acfb50e4 4538 u32 flags;
2e882468 4539 int rc, i;
a1653b13 4540
bbf33d1d
EP
4541 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
4542 if (rc)
4543 return rc;
a1653b13 4544
bbf33d1d
EP
4545 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4546 FUNC_DRV_RGTR_REQ_ENABLES_VER |
4547 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
a1653b13 4548
bbf33d1d 4549 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
8280b38e
VV
4550 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4551 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4552 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
acfb50e4 4553 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
e633a329
VV
4554 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4555 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
bbf33d1d
EP
4556 req->flags = cpu_to_le32(flags);
4557 req->ver_maj_8b = DRV_VER_MAJ;
4558 req->ver_min_8b = DRV_VER_MIN;
4559 req->ver_upd_8b = DRV_VER_UPD;
4560 req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
4561 req->ver_min = cpu_to_le16(DRV_VER_MIN);
4562 req->ver_upd = cpu_to_le16(DRV_VER_UPD);
c0c050c5
MC
4563
4564 if (BNXT_PF(bp)) {
9b0436c3 4565 u32 data[8];
a1653b13 4566 int i;
c0c050c5 4567
9b0436c3
MC
4568 memset(data, 0, sizeof(data));
4569 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4570 u16 cmd = bnxt_vf_req_snif[i];
4571 unsigned int bit, idx;
4572
4573 idx = cmd / 32;
4574 bit = cmd % 32;
4575 data[idx] |= 1 << bit;
4576 }
c0c050c5 4577
de68f5de 4578 for (i = 0; i < 8; i++)
bbf33d1d 4579 req->vf_req_fwd[i] = cpu_to_le32(data[i]);
de68f5de 4580
bbf33d1d 4581 req->enables |=
c0c050c5
MC
4582 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4583 }
4584
abd43a13 4585 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
bbf33d1d 4586 req->flags |= cpu_to_le32(
abd43a13
VD
4587 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4588
2e882468
VV
4589 memset(async_events_bmap, 0, sizeof(async_events_bmap));
4590 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4591 u16 event_id = bnxt_async_events_arr[i];
4592
4593 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4594 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4595 continue;
4596 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4597 }
4598 if (bmap && bmap_size) {
4599 for (i = 0; i < bmap_size; i++) {
4600 if (test_bit(i, bmap))
4601 __set_bit(i, async_events_bmap);
4602 }
4603 }
4604 for (i = 0; i < 8; i++)
bbf33d1d 4605 req->async_event_fwd[i] |= cpu_to_le32(events[i]);
2e882468
VV
4606
4607 if (async_only)
bbf33d1d 4608 req->enables =
2e882468
VV
4609 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4610
bbf33d1d
EP
4611 resp = hwrm_req_hold(bp, req);
4612 rc = hwrm_req_send(bp, req);
bdb38602
VV
4613 if (!rc) {
4614 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4615 if (resp->flags &
4616 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4617 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4618 }
bbf33d1d 4619 hwrm_req_drop(bp, req);
25e1acd6 4620 return rc;
c0c050c5
MC
4621}
4622
be58a0da
JH
4623static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4624{
bbf33d1d
EP
4625 struct hwrm_func_drv_unrgtr_input *req;
4626 int rc;
be58a0da 4627
bdb38602
VV
4628 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4629 return 0;
4630
bbf33d1d
EP
4631 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
4632 if (rc)
4633 return rc;
4634 return hwrm_req_send(bp, req);
be58a0da
JH
4635}
4636
c0c050c5
MC
4637static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4638{
bbf33d1d
EP
4639 struct hwrm_tunnel_dst_port_free_input *req;
4640 int rc;
c0c050c5 4641
bbf33d1d
EP
4642 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
4643 if (rc)
4644 return rc;
4645
4646 req->tunnel_type = tunnel_type;
c0c050c5
MC
4647
4648 switch (tunnel_type) {
4649 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
bbf33d1d 4650 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
442a35a5 4651 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
c0c050c5
MC
4652 break;
4653 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
bbf33d1d 4654 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
442a35a5 4655 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
c0c050c5
MC
4656 break;
4657 default:
4658 break;
4659 }
4660
bbf33d1d 4661 rc = hwrm_req_send(bp, req);
c0c050c5
MC
4662 if (rc)
4663 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4664 rc);
4665 return rc;
4666}
4667
4668static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4669 u8 tunnel_type)
4670{
bbf33d1d
EP
4671 struct hwrm_tunnel_dst_port_alloc_output *resp;
4672 struct hwrm_tunnel_dst_port_alloc_input *req;
4673 int rc;
c0c050c5 4674
bbf33d1d
EP
4675 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
4676 if (rc)
4677 return rc;
c0c050c5 4678
bbf33d1d
EP
4679 req->tunnel_type = tunnel_type;
4680 req->tunnel_dst_port_val = port;
c0c050c5 4681
bbf33d1d
EP
4682 resp = hwrm_req_hold(bp, req);
4683 rc = hwrm_req_send(bp, req);
c0c050c5
MC
4684 if (rc) {
4685 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4686 rc);
4687 goto err_out;
4688 }
4689
57aac71b
CJ
4690 switch (tunnel_type) {
4691 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
442a35a5
JK
4692 bp->vxlan_fw_dst_port_id =
4693 le16_to_cpu(resp->tunnel_dst_port_id);
57aac71b
CJ
4694 break;
4695 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
442a35a5 4696 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
57aac71b
CJ
4697 break;
4698 default:
4699 break;
4700 }
4701
c0c050c5 4702err_out:
bbf33d1d 4703 hwrm_req_drop(bp, req);
c0c050c5
MC
4704 return rc;
4705}
4706
4707static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4708{
bbf33d1d 4709 struct hwrm_cfa_l2_set_rx_mask_input *req;
c0c050c5 4710 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
bbf33d1d 4711 int rc;
c0c050c5 4712
bbf33d1d
EP
4713 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
4714 if (rc)
4715 return rc;
c0c050c5 4716
bbf33d1d
EP
4717 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4718 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4719 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4720 req->mask = cpu_to_le32(vnic->rx_mask);
4721 return hwrm_req_send_silent(bp, req);
c0c050c5
MC
4722}
4723
4724#ifdef CONFIG_RFS_ACCEL
4725static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4726 struct bnxt_ntuple_filter *fltr)
4727{
bbf33d1d
EP
4728 struct hwrm_cfa_ntuple_filter_free_input *req;
4729 int rc;
c0c050c5 4730
bbf33d1d
EP
4731 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
4732 if (rc)
4733 return rc;
4734
4735 req->ntuple_filter_id = fltr->filter_id;
4736 return hwrm_req_send(bp, req);
c0c050c5
MC
4737}
4738
4739#define BNXT_NTP_FLTR_FLAGS \
4740 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4741 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
4742 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
4743 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
4744 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
4745 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
4746 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
4747 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
4748 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
4749 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
4750 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
4751 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
4752 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
c193554e 4753 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
c0c050c5 4754
61aad724
MC
4755#define BNXT_NTP_TUNNEL_FLTR_FLAG \
4756 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4757
c0c050c5
MC
4758static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4759 struct bnxt_ntuple_filter *fltr)
4760{
5c209fc8 4761 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
bbf33d1d 4762 struct hwrm_cfa_ntuple_filter_alloc_input *req;
c0c050c5 4763 struct flow_keys *keys = &fltr->fkeys;
ac33906c 4764 struct bnxt_vnic_info *vnic;
41136ab3 4765 u32 flags = 0;
bbf33d1d 4766 int rc;
c0c050c5 4767
bbf33d1d
EP
4768 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
4769 if (rc)
4770 return rc;
4771
4772 req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
c0c050c5 4773
41136ab3
MC
4774 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4775 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
bbf33d1d 4776 req->dst_id = cpu_to_le16(fltr->rxq);
ac33906c
MC
4777 } else {
4778 vnic = &bp->vnic_info[fltr->rxq + 1];
bbf33d1d 4779 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
ac33906c 4780 }
bbf33d1d
EP
4781 req->flags = cpu_to_le32(flags);
4782 req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
c0c050c5 4783
bbf33d1d
EP
4784 req->ethertype = htons(ETH_P_IP);
4785 memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4786 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4787 req->ip_protocol = keys->basic.ip_proto;
c0c050c5 4788
dda0e746
MC
4789 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4790 int i;
4791
bbf33d1d
EP
4792 req->ethertype = htons(ETH_P_IPV6);
4793 req->ip_addr_type =
dda0e746 4794 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
bbf33d1d 4795 *(struct in6_addr *)&req->src_ipaddr[0] =
dda0e746 4796 keys->addrs.v6addrs.src;
bbf33d1d 4797 *(struct in6_addr *)&req->dst_ipaddr[0] =
dda0e746
MC
4798 keys->addrs.v6addrs.dst;
4799 for (i = 0; i < 4; i++) {
bbf33d1d
EP
4800 req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4801 req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
dda0e746
MC
4802 }
4803 } else {
bbf33d1d
EP
4804 req->src_ipaddr[0] = keys->addrs.v4addrs.src;
4805 req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4806 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4807 req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
dda0e746 4808 }
61aad724 4809 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
bbf33d1d
EP
4810 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4811 req->tunnel_type =
61aad724
MC
4812 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4813 }
c0c050c5 4814
bbf33d1d
EP
4815 req->src_port = keys->ports.src;
4816 req->src_port_mask = cpu_to_be16(0xffff);
4817 req->dst_port = keys->ports.dst;
4818 req->dst_port_mask = cpu_to_be16(0xffff);
c0c050c5 4819
bbf33d1d
EP
4820 resp = hwrm_req_hold(bp, req);
4821 rc = hwrm_req_send(bp, req);
4822 if (!rc)
c0c050c5 4823 fltr->filter_id = resp->ntuple_filter_id;
bbf33d1d 4824 hwrm_req_drop(bp, req);
c0c050c5
MC
4825 return rc;
4826}
4827#endif
4828
4829static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4830 u8 *mac_addr)
4831{
bbf33d1d
EP
4832 struct hwrm_cfa_l2_filter_alloc_output *resp;
4833 struct hwrm_cfa_l2_filter_alloc_input *req;
4834 int rc;
c0c050c5 4835
bbf33d1d
EP
4836 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
4837 if (rc)
4838 return rc;
4839
4840 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
dc52c6c7 4841 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
bbf33d1d 4842 req->flags |=
dc52c6c7 4843 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
bbf33d1d
EP
4844 req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4845 req->enables =
c0c050c5 4846 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
c193554e 4847 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
c0c050c5 4848 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
bbf33d1d
EP
4849 memcpy(req->l2_addr, mac_addr, ETH_ALEN);
4850 req->l2_addr_mask[0] = 0xff;
4851 req->l2_addr_mask[1] = 0xff;
4852 req->l2_addr_mask[2] = 0xff;
4853 req->l2_addr_mask[3] = 0xff;
4854 req->l2_addr_mask[4] = 0xff;
4855 req->l2_addr_mask[5] = 0xff;
4856
4857 resp = hwrm_req_hold(bp, req);
4858 rc = hwrm_req_send(bp, req);
c0c050c5
MC
4859 if (!rc)
4860 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4861 resp->l2_filter_id;
bbf33d1d 4862 hwrm_req_drop(bp, req);
c0c050c5
MC
4863 return rc;
4864}
4865
4866static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4867{
bbf33d1d 4868 struct hwrm_cfa_l2_filter_free_input *req;
c0c050c5 4869 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
bbf33d1d 4870 int rc;
c0c050c5
MC
4871
4872 /* Any associated ntuple filters will also be cleared by firmware. */
bbf33d1d
EP
4873 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
4874 if (rc)
4875 return rc;
4876 hwrm_req_hold(bp, req);
c0c050c5
MC
4877 for (i = 0; i < num_of_vnics; i++) {
4878 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4879
4880 for (j = 0; j < vnic->uc_filter_count; j++) {
bbf33d1d 4881 req->l2_filter_id = vnic->fw_l2_filter_id[j];
c0c050c5 4882
bbf33d1d 4883 rc = hwrm_req_send(bp, req);
c0c050c5
MC
4884 }
4885 vnic->uc_filter_count = 0;
4886 }
bbf33d1d 4887 hwrm_req_drop(bp, req);
c0c050c5
MC
4888 return rc;
4889}
4890
4891static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4892{
4893 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
79632e9b 4894 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
bbf33d1d
EP
4895 struct hwrm_vnic_tpa_cfg_input *req;
4896 int rc;
c0c050c5 4897
3c4fe80b
MC
4898 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4899 return 0;
4900
bbf33d1d
EP
4901 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
4902 if (rc)
4903 return rc;
c0c050c5
MC
4904
4905 if (tpa_flags) {
4906 u16 mss = bp->dev->mtu - 40;
4907 u32 nsegs, n, segs = 0, flags;
4908
4909 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4910 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4911 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4912 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4913 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4914 if (tpa_flags & BNXT_FLAG_GRO)
4915 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4916
bbf33d1d 4917 req->flags = cpu_to_le32(flags);
c0c050c5 4918
bbf33d1d 4919 req->enables =
c0c050c5 4920 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
c193554e
MC
4921 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4922 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
c0c050c5
MC
4923
4924 /* Number of segs are log2 units, and first packet is not
4925 * included as part of this units.
4926 */
2839f28b
MC
4927 if (mss <= BNXT_RX_PAGE_SIZE) {
4928 n = BNXT_RX_PAGE_SIZE / mss;
c0c050c5
MC
4929 nsegs = (MAX_SKB_FRAGS - 1) * n;
4930 } else {
2839f28b
MC
4931 n = mss / BNXT_RX_PAGE_SIZE;
4932 if (mss & (BNXT_RX_PAGE_SIZE - 1))
c0c050c5
MC
4933 n++;
4934 nsegs = (MAX_SKB_FRAGS - n) / n;
4935 }
4936
79632e9b
MC
4937 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4938 segs = MAX_TPA_SEGS_P5;
4939 max_aggs = bp->max_tpa;
4940 } else {
4941 segs = ilog2(nsegs);
4942 }
bbf33d1d
EP
4943 req->max_agg_segs = cpu_to_le16(segs);
4944 req->max_aggs = cpu_to_le16(max_aggs);
c193554e 4945
bbf33d1d 4946 req->min_agg_len = cpu_to_le32(512);
c0c050c5 4947 }
bbf33d1d 4948 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
c0c050c5 4949
bbf33d1d 4950 return hwrm_req_send(bp, req);
c0c050c5
MC
4951}
4952
2c61d211
MC
4953static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4954{
4955 struct bnxt_ring_grp_info *grp_info;
4956
4957 grp_info = &bp->grp_info[ring->grp_idx];
4958 return grp_info->cp_fw_ring_id;
4959}
4960
4961static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
4962{
4963 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4964 struct bnxt_napi *bnapi = rxr->bnapi;
4965 struct bnxt_cp_ring_info *cpr;
4966
4967 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
4968 return cpr->cp_ring_struct.fw_ring_id;
4969 } else {
4970 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
4971 }
4972}
4973
4974static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
4975{
4976 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4977 struct bnxt_napi *bnapi = txr->bnapi;
4978 struct bnxt_cp_ring_info *cpr;
4979
4980 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
4981 return cpr->cp_ring_struct.fw_ring_id;
4982 } else {
4983 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
4984 }
4985}
4986
1667cbf6
MC
4987static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
4988{
4989 int entries;
4990
4991 if (bp->flags & BNXT_FLAG_CHIP_P5)
4992 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
4993 else
4994 entries = HW_HASH_INDEX_SIZE;
4995
4996 bp->rss_indir_tbl_entries = entries;
4997 bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
4998 GFP_KERNEL);
4999 if (!bp->rss_indir_tbl)
5000 return -ENOMEM;
5001 return 0;
5002}
5003
5004static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5005{
5006 u16 max_rings, max_entries, pad, i;
5007
5008 if (!bp->rx_nr_rings)
5009 return;
5010
5011 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5012 max_rings = bp->rx_nr_rings - 1;
5013 else
5014 max_rings = bp->rx_nr_rings;
5015
5016 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5017
5018 for (i = 0; i < max_entries; i++)
5019 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5020
5021 pad = bp->rss_indir_tbl_entries - max_entries;
5022 if (pad)
5023 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5024}
5025
bd3191b5
MC
5026static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5027{
5028 u16 i, tbl_size, max_ring = 0;
5029
5030 if (!bp->rss_indir_tbl)
5031 return 0;
5032
5033 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5034 for (i = 0; i < tbl_size; i++)
5035 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5036 return max_ring;
5037}
5038
f9f6a3fb
MC
5039int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5040{
5041 if (bp->flags & BNXT_FLAG_CHIP_P5)
5042 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5043 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5044 return 2;
5045 return 1;
5046}
5047
f33a305d
MC
5048static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5049{
5050 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5051 u16 i, j;
5052
5053 /* Fill the RSS indirection table with ring group ids */
5054 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5055 if (!no_rss)
5056 j = bp->rss_indir_tbl[i];
5057 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5058 }
5059}
5060
5061static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5062 struct bnxt_vnic_info *vnic)
5063{
5064 __le16 *ring_tbl = vnic->rss_table;
5065 struct bnxt_rx_ring_info *rxr;
5066 u16 tbl_size, i;
5067
5068 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5069
5070 for (i = 0; i < tbl_size; i++) {
5071 u16 ring_id, j;
5072
5073 j = bp->rss_indir_tbl[i];
5074 rxr = &bp->rx_ring[j];
5075
5076 ring_id = rxr->rx_ring_struct.fw_ring_id;
5077 *ring_tbl++ = cpu_to_le16(ring_id);
5078 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5079 *ring_tbl++ = cpu_to_le16(ring_id);
5080 }
5081}
5082
5083static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5084{
5085 if (bp->flags & BNXT_FLAG_CHIP_P5)
5086 __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5087 else
5088 __bnxt_fill_hw_rss_tbl(bp, vnic);
5089}
5090
c0c050c5
MC
5091static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5092{
c0c050c5 5093 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
bbf33d1d
EP
5094 struct hwrm_vnic_rss_cfg_input *req;
5095 int rc;
c0c050c5 5096
7b3af4f7
MC
5097 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5098 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
c0c050c5
MC
5099 return 0;
5100
bbf33d1d
EP
5101 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5102 if (rc)
5103 return rc;
5104
c0c050c5 5105 if (set_rss) {
f33a305d 5106 bnxt_fill_hw_rss_tbl(bp, vnic);
bbf33d1d
EP
5107 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5108 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5109 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5110 req->hash_key_tbl_addr =
c0c050c5
MC
5111 cpu_to_le64(vnic->rss_hash_key_dma_addr);
5112 }
bbf33d1d
EP
5113 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5114 return hwrm_req_send(bp, req);
c0c050c5
MC
5115}
5116
7b3af4f7
MC
5117static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5118{
5119 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
bbf33d1d 5120 struct hwrm_vnic_rss_cfg_input *req;
f33a305d
MC
5121 dma_addr_t ring_tbl_map;
5122 u32 i, nr_ctxs;
bbf33d1d
EP
5123 int rc;
5124
5125 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5126 if (rc)
5127 return rc;
5128
5129 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5130 if (!set_rss)
5131 return hwrm_req_send(bp, req);
7b3af4f7 5132
f33a305d 5133 bnxt_fill_hw_rss_tbl(bp, vnic);
bbf33d1d
EP
5134 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5135 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5136 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
f33a305d 5137 ring_tbl_map = vnic->rss_table_dma_addr;
f9f6a3fb 5138 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
7b3af4f7 5139
bbf33d1d
EP
5140 hwrm_req_hold(bp, req);
5141 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
5142 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
5143 req->ring_table_pair_index = i;
5144 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5145 rc = hwrm_req_send(bp, req);
7b3af4f7 5146 if (rc)
bbf33d1d 5147 goto exit;
7b3af4f7 5148 }
bbf33d1d
EP
5149
5150exit:
5151 hwrm_req_drop(bp, req);
5152 return rc;
7b3af4f7
MC
5153}
5154
c0c050c5
MC
5155static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5156{
5157 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
bbf33d1d
EP
5158 struct hwrm_vnic_plcmodes_cfg_input *req;
5159 int rc;
5160
5161 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
5162 if (rc)
5163 return rc;
c0c050c5 5164
bbf33d1d
EP
5165 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
5166 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5167 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5168 req->enables =
c0c050c5
MC
5169 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
5170 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
5171 /* thresholds not implemented in firmware yet */
bbf33d1d
EP
5172 req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5173 req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5174 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5175 return hwrm_req_send(bp, req);
c0c050c5
MC
5176}
5177
94ce9caa
PS
5178static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5179 u16 ctx_idx)
c0c050c5 5180{
bbf33d1d 5181 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
c0c050c5 5182
bbf33d1d
EP
5183 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
5184 return;
5185
5186 req->rss_cos_lb_ctx_id =
94ce9caa 5187 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
c0c050c5 5188
bbf33d1d 5189 hwrm_req_send(bp, req);
94ce9caa 5190 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
c0c050c5
MC
5191}
5192
5193static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5194{
94ce9caa 5195 int i, j;
c0c050c5
MC
5196
5197 for (i = 0; i < bp->nr_vnics; i++) {
5198 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5199
94ce9caa
PS
5200 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5201 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5202 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5203 }
c0c050c5
MC
5204 }
5205 bp->rsscos_nr_ctxs = 0;
5206}
5207
94ce9caa 5208static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
c0c050c5 5209{
bbf33d1d
EP
5210 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
5211 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
c0c050c5 5212 int rc;
c0c050c5 5213
bbf33d1d
EP
5214 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
5215 if (rc)
5216 return rc;
c0c050c5 5217
bbf33d1d
EP
5218 resp = hwrm_req_hold(bp, req);
5219 rc = hwrm_req_send(bp, req);
c0c050c5 5220 if (!rc)
94ce9caa 5221 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
c0c050c5 5222 le16_to_cpu(resp->rss_cos_lb_ctx_id);
bbf33d1d 5223 hwrm_req_drop(bp, req);
c0c050c5
MC
5224
5225 return rc;
5226}
5227
abe93ad2
MC
5228static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5229{
5230 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5231 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5232 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5233}
5234
a588e458 5235int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
c0c050c5 5236{
c0c050c5 5237 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
bbf33d1d
EP
5238 struct hwrm_vnic_cfg_input *req;
5239 unsigned int ring = 0, grp_idx;
cf6645f8 5240 u16 def_vlan = 0;
bbf33d1d 5241 int rc;
c0c050c5 5242
bbf33d1d
EP
5243 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
5244 if (rc)
5245 return rc;
dc52c6c7 5246
7b3af4f7
MC
5247 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5248 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5249
bbf33d1d 5250 req->default_rx_ring_id =
7b3af4f7 5251 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
bbf33d1d 5252 req->default_cmpl_ring_id =
7b3af4f7 5253 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
bbf33d1d 5254 req->enables =
7b3af4f7
MC
5255 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5256 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5257 goto vnic_mru;
5258 }
bbf33d1d 5259 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
c0c050c5 5260 /* Only RSS support for now TBD: COS & LB */
dc52c6c7 5261 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
bbf33d1d
EP
5262 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5263 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
dc52c6c7 5264 VNIC_CFG_REQ_ENABLES_MRU);
ae10ae74 5265 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
bbf33d1d 5266 req->rss_rule =
ae10ae74 5267 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
bbf33d1d 5268 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
ae10ae74 5269 VNIC_CFG_REQ_ENABLES_MRU);
bbf33d1d 5270 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
dc52c6c7 5271 } else {
bbf33d1d 5272 req->rss_rule = cpu_to_le16(0xffff);
dc52c6c7 5273 }
94ce9caa 5274
dc52c6c7
PS
5275 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5276 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
bbf33d1d
EP
5277 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5278 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
94ce9caa 5279 } else {
bbf33d1d 5280 req->cos_rule = cpu_to_le16(0xffff);
94ce9caa
PS
5281 }
5282
c0c050c5 5283 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
b81a90d3 5284 ring = 0;
c0c050c5 5285 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
b81a90d3 5286 ring = vnic_id - 1;
76595193
PS
5287 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5288 ring = bp->rx_nr_rings - 1;
c0c050c5 5289
b81a90d3 5290 grp_idx = bp->rx_ring[ring].bnapi->index;
bbf33d1d
EP
5291 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5292 req->lb_rule = cpu_to_le16(0xffff);
7b3af4f7 5293vnic_mru:
bbf33d1d 5294 req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
c0c050c5 5295
bbf33d1d 5296 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
cf6645f8
MC
5297#ifdef CONFIG_BNXT_SRIOV
5298 if (BNXT_VF(bp))
5299 def_vlan = bp->vf.vlan;
5300#endif
5301 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
bbf33d1d 5302 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
a588e458 5303 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
bbf33d1d 5304 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
c0c050c5 5305
bbf33d1d 5306 return hwrm_req_send(bp, req);
c0c050c5
MC
5307}
5308
3d061591 5309static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
c0c050c5 5310{
c0c050c5 5311 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
bbf33d1d 5312 struct hwrm_vnic_free_input *req;
c0c050c5 5313
bbf33d1d
EP
5314 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
5315 return;
5316
5317 req->vnic_id =
c0c050c5
MC
5318 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5319
bbf33d1d 5320 hwrm_req_send(bp, req);
c0c050c5
MC
5321 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5322 }
c0c050c5
MC
5323}
5324
5325static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5326{
5327 u16 i;
5328
5329 for (i = 0; i < bp->nr_vnics; i++)
5330 bnxt_hwrm_vnic_free_one(bp, i);
5331}
5332
b81a90d3
MC
5333static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5334 unsigned int start_rx_ring_idx,
5335 unsigned int nr_rings)
c0c050c5 5336{
b81a90d3 5337 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
44c6f72a 5338 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
bbf33d1d
EP
5339 struct hwrm_vnic_alloc_output *resp;
5340 struct hwrm_vnic_alloc_input *req;
5341 int rc;
5342
5343 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
5344 if (rc)
5345 return rc;
44c6f72a
MC
5346
5347 if (bp->flags & BNXT_FLAG_CHIP_P5)
5348 goto vnic_no_ring_grps;
c0c050c5
MC
5349
5350 /* map ring groups to this vnic */
b81a90d3
MC
5351 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5352 grp_idx = bp->rx_ring[i].bnapi->index;
5353 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
c0c050c5 5354 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
b81a90d3 5355 j, nr_rings);
c0c050c5
MC
5356 break;
5357 }
44c6f72a 5358 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
c0c050c5
MC
5359 }
5360
44c6f72a
MC
5361vnic_no_ring_grps:
5362 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5363 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
c0c050c5 5364 if (vnic_id == 0)
bbf33d1d 5365 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
c0c050c5 5366
bbf33d1d
EP
5367 resp = hwrm_req_hold(bp, req);
5368 rc = hwrm_req_send(bp, req);
c0c050c5 5369 if (!rc)
44c6f72a 5370 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
bbf33d1d 5371 hwrm_req_drop(bp, req);
c0c050c5
MC
5372 return rc;
5373}
5374
8fdefd63
MC
5375static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5376{
bbf33d1d
EP
5377 struct hwrm_vnic_qcaps_output *resp;
5378 struct hwrm_vnic_qcaps_input *req;
8fdefd63
MC
5379 int rc;
5380
fbbdbc64 5381 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
ba642ab7 5382 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
8fdefd63
MC
5383 if (bp->hwrm_spec_code < 0x10600)
5384 return 0;
5385
bbf33d1d
EP
5386 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
5387 if (rc)
5388 return rc;
5389
5390 resp = hwrm_req_hold(bp, req);
5391 rc = hwrm_req_send(bp, req);
8fdefd63 5392 if (!rc) {
abe93ad2
MC
5393 u32 flags = le32_to_cpu(resp->flags);
5394
41e8d798
MC
5395 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5396 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
8fdefd63 5397 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
abe93ad2
MC
5398 if (flags &
5399 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5400 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
1da63ddd
EP
5401
5402 /* Older P5 fw before EXT_HW_STATS support did not set
5403 * VLAN_STRIP_CAP properly.
5404 */
5405 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
9d6b648c 5406 (BNXT_CHIP_P5_THOR(bp) &&
1da63ddd
EP
5407 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5408 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
79632e9b 5409 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
9d6b648c
MC
5410 if (bp->max_tpa_v2) {
5411 if (BNXT_CHIP_P5_THOR(bp))
5412 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5413 else
5414 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5415 }
8fdefd63 5416 }
bbf33d1d 5417 hwrm_req_drop(bp, req);
8fdefd63
MC
5418 return rc;
5419}
5420
c0c050c5
MC
5421static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5422{
bbf33d1d
EP
5423 struct hwrm_ring_grp_alloc_output *resp;
5424 struct hwrm_ring_grp_alloc_input *req;
5425 int rc;
c0c050c5 5426 u16 i;
c0c050c5 5427
44c6f72a
MC
5428 if (bp->flags & BNXT_FLAG_CHIP_P5)
5429 return 0;
5430
bbf33d1d
EP
5431 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
5432 if (rc)
5433 return rc;
5434
5435 resp = hwrm_req_hold(bp, req);
c0c050c5 5436 for (i = 0; i < bp->rx_nr_rings; i++) {
b81a90d3 5437 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
c0c050c5 5438
bbf33d1d
EP
5439 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5440 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5441 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5442 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
c0c050c5 5443
bbf33d1d 5444 rc = hwrm_req_send(bp, req);
c0c050c5 5445
c0c050c5
MC
5446 if (rc)
5447 break;
5448
b81a90d3
MC
5449 bp->grp_info[grp_idx].fw_grp_id =
5450 le32_to_cpu(resp->ring_group_id);
c0c050c5 5451 }
bbf33d1d 5452 hwrm_req_drop(bp, req);
c0c050c5
MC
5453 return rc;
5454}
5455
3d061591 5456static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
c0c050c5 5457{
bbf33d1d 5458 struct hwrm_ring_grp_free_input *req;
c0c050c5 5459 u16 i;
c0c050c5 5460
44c6f72a 5461 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
3d061591 5462 return;
c0c050c5 5463
bbf33d1d
EP
5464 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
5465 return;
c0c050c5 5466
bbf33d1d 5467 hwrm_req_hold(bp, req);
c0c050c5
MC
5468 for (i = 0; i < bp->cp_nr_rings; i++) {
5469 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5470 continue;
bbf33d1d 5471 req->ring_group_id =
c0c050c5
MC
5472 cpu_to_le32(bp->grp_info[i].fw_grp_id);
5473
bbf33d1d 5474 hwrm_req_send(bp, req);
c0c050c5
MC
5475 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5476 }
bbf33d1d 5477 hwrm_req_drop(bp, req);
c0c050c5
MC
5478}
5479
5480static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5481 struct bnxt_ring_struct *ring,
9899bb59 5482 u32 ring_type, u32 map_index)
c0c050c5 5483{
bbf33d1d
EP
5484 struct hwrm_ring_alloc_output *resp;
5485 struct hwrm_ring_alloc_input *req;
6fe19886 5486 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
9899bb59 5487 struct bnxt_ring_grp_info *grp_info;
bbf33d1d 5488 int rc, err = 0;
c0c050c5
MC
5489 u16 ring_id;
5490
bbf33d1d
EP
5491 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
5492 if (rc)
5493 goto exit;
c0c050c5 5494
bbf33d1d 5495 req->enables = 0;
6fe19886 5496 if (rmem->nr_pages > 1) {
bbf33d1d 5497 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
c0c050c5 5498 /* Page size is in log2 units */
bbf33d1d
EP
5499 req->page_size = BNXT_PAGE_SHIFT;
5500 req->page_tbl_depth = 1;
c0c050c5 5501 } else {
bbf33d1d 5502 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
c0c050c5 5503 }
bbf33d1d 5504 req->fbo = 0;
c0c050c5 5505 /* Association of ring index with doorbell index and MSIX number */
bbf33d1d 5506 req->logical_id = cpu_to_le16(map_index);
c0c050c5
MC
5507
5508 switch (ring_type) {
2c61d211
MC
5509 case HWRM_RING_ALLOC_TX: {
5510 struct bnxt_tx_ring_info *txr;
5511
5512 txr = container_of(ring, struct bnxt_tx_ring_info,
5513 tx_ring_struct);
bbf33d1d 5514 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
c0c050c5 5515 /* Association of transmit ring with completion ring */
9899bb59 5516 grp_info = &bp->grp_info[ring->grp_idx];
bbf33d1d
EP
5517 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5518 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
5519 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5520 req->queue_id = cpu_to_le16(ring->queue_id);
c0c050c5 5521 break;
2c61d211 5522 }
c0c050c5 5523 case HWRM_RING_ALLOC_RX:
bbf33d1d
EP
5524 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5525 req->length = cpu_to_le32(bp->rx_ring_mask + 1);
23aefdd7
MC
5526 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5527 u16 flags = 0;
5528
5529 /* Association of rx ring with stats context */
5530 grp_info = &bp->grp_info[ring->grp_idx];
bbf33d1d
EP
5531 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5532 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5533 req->enables |= cpu_to_le32(
23aefdd7
MC
5534 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5535 if (NET_IP_ALIGN == 2)
5536 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
bbf33d1d 5537 req->flags = cpu_to_le16(flags);
23aefdd7 5538 }
c0c050c5
MC
5539 break;
5540 case HWRM_RING_ALLOC_AGG:
23aefdd7 5541 if (bp->flags & BNXT_FLAG_CHIP_P5) {
bbf33d1d 5542 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
23aefdd7
MC
5543 /* Association of agg ring with rx ring */
5544 grp_info = &bp->grp_info[ring->grp_idx];
bbf33d1d
EP
5545 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5546 req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5547 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5548 req->enables |= cpu_to_le32(
23aefdd7
MC
5549 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5550 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5551 } else {
bbf33d1d 5552 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
23aefdd7 5553 }
bbf33d1d 5554 req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
c0c050c5
MC
5555 break;
5556 case HWRM_RING_ALLOC_CMPL:
bbf33d1d
EP
5557 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5558 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
23aefdd7
MC
5559 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5560 /* Association of cp ring with nq */
5561 grp_info = &bp->grp_info[map_index];
bbf33d1d
EP
5562 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5563 req->cq_handle = cpu_to_le64(ring->handle);
5564 req->enables |= cpu_to_le32(
23aefdd7
MC
5565 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5566 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
bbf33d1d 5567 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
23aefdd7
MC
5568 }
5569 break;
5570 case HWRM_RING_ALLOC_NQ:
bbf33d1d
EP
5571 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5572 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
c0c050c5 5573 if (bp->flags & BNXT_FLAG_USING_MSIX)
bbf33d1d 5574 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
c0c050c5
MC
5575 break;
5576 default:
5577 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5578 ring_type);
5579 return -1;
5580 }
5581
bbf33d1d
EP
5582 resp = hwrm_req_hold(bp, req);
5583 rc = hwrm_req_send(bp, req);
c0c050c5
MC
5584 err = le16_to_cpu(resp->error_code);
5585 ring_id = le16_to_cpu(resp->ring_id);
bbf33d1d 5586 hwrm_req_drop(bp, req);
c0c050c5 5587
bbf33d1d 5588exit:
c0c050c5 5589 if (rc || err) {
2727c888
MC
5590 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5591 ring_type, rc, err);
5592 return -EIO;
c0c050c5
MC
5593 }
5594 ring->fw_ring_id = ring_id;
5595 return rc;
5596}
5597
486b5c22
MC
5598static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5599{
5600 int rc;
5601
5602 if (BNXT_PF(bp)) {
bbf33d1d 5603 struct hwrm_func_cfg_input *req;
486b5c22 5604
bbf33d1d
EP
5605 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
5606 if (rc)
5607 return rc;
5608
5609 req->fid = cpu_to_le16(0xffff);
5610 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5611 req->async_event_cr = cpu_to_le16(idx);
5612 return hwrm_req_send(bp, req);
486b5c22 5613 } else {
bbf33d1d
EP
5614 struct hwrm_func_vf_cfg_input *req;
5615
5616 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
5617 if (rc)
5618 return rc;
486b5c22 5619
bbf33d1d 5620 req->enables =
486b5c22 5621 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
bbf33d1d
EP
5622 req->async_event_cr = cpu_to_le16(idx);
5623 return hwrm_req_send(bp, req);
486b5c22 5624 }
486b5c22
MC
5625}
5626
697197e5
MC
5627static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5628 u32 map_idx, u32 xid)
5629{
5630 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5631 if (BNXT_PF(bp))
ebdf73dc 5632 db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
697197e5 5633 else
ebdf73dc 5634 db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
697197e5
MC
5635 switch (ring_type) {
5636 case HWRM_RING_ALLOC_TX:
5637 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5638 break;
5639 case HWRM_RING_ALLOC_RX:
5640 case HWRM_RING_ALLOC_AGG:
5641 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5642 break;
5643 case HWRM_RING_ALLOC_CMPL:
5644 db->db_key64 = DBR_PATH_L2;
5645 break;
5646 case HWRM_RING_ALLOC_NQ:
5647 db->db_key64 = DBR_PATH_L2;
5648 break;
5649 }
5650 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5651 } else {
5652 db->doorbell = bp->bar1 + map_idx * 0x80;
5653 switch (ring_type) {
5654 case HWRM_RING_ALLOC_TX:
5655 db->db_key32 = DB_KEY_TX;
5656 break;
5657 case HWRM_RING_ALLOC_RX:
5658 case HWRM_RING_ALLOC_AGG:
5659 db->db_key32 = DB_KEY_RX;
5660 break;
5661 case HWRM_RING_ALLOC_CMPL:
5662 db->db_key32 = DB_KEY_CP;
5663 break;
5664 }
5665 }
5666}
5667
c0c050c5
MC
5668static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5669{
e8f267b0 5670 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
c0c050c5 5671 int i, rc = 0;
697197e5 5672 u32 type;
c0c050c5 5673
23aefdd7
MC
5674 if (bp->flags & BNXT_FLAG_CHIP_P5)
5675 type = HWRM_RING_ALLOC_NQ;
5676 else
5677 type = HWRM_RING_ALLOC_CMPL;
edd0c2cc
MC
5678 for (i = 0; i < bp->cp_nr_rings; i++) {
5679 struct bnxt_napi *bnapi = bp->bnapi[i];
5680 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5681 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
9899bb59 5682 u32 map_idx = ring->map_idx;
5e66e35a 5683 unsigned int vector;
c0c050c5 5684
5e66e35a
MC
5685 vector = bp->irq_tbl[map_idx].vector;
5686 disable_irq_nosync(vector);
697197e5 5687 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5e66e35a
MC
5688 if (rc) {
5689 enable_irq(vector);
edd0c2cc 5690 goto err_out;
5e66e35a 5691 }
697197e5
MC
5692 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5693 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5e66e35a 5694 enable_irq(vector);
edd0c2cc 5695 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
486b5c22
MC
5696
5697 if (!i) {
5698 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5699 if (rc)
5700 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5701 }
c0c050c5
MC
5702 }
5703
697197e5 5704 type = HWRM_RING_ALLOC_TX;
edd0c2cc 5705 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 5706 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3e08b184
MC
5707 struct bnxt_ring_struct *ring;
5708 u32 map_idx;
c0c050c5 5709
3e08b184
MC
5710 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5711 struct bnxt_napi *bnapi = txr->bnapi;
5712 struct bnxt_cp_ring_info *cpr, *cpr2;
5713 u32 type2 = HWRM_RING_ALLOC_CMPL;
5714
5715 cpr = &bnapi->cp_ring;
5716 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5717 ring = &cpr2->cp_ring_struct;
5718 ring->handle = BNXT_TX_HDL;
5719 map_idx = bnapi->index;
5720 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5721 if (rc)
5722 goto err_out;
5723 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5724 ring->fw_ring_id);
5725 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5726 }
5727 ring = &txr->tx_ring_struct;
5728 map_idx = i;
697197e5 5729 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
edd0c2cc
MC
5730 if (rc)
5731 goto err_out;
697197e5 5732 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
c0c050c5
MC
5733 }
5734
697197e5 5735 type = HWRM_RING_ALLOC_RX;
edd0c2cc 5736 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5737 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 5738 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
3e08b184
MC
5739 struct bnxt_napi *bnapi = rxr->bnapi;
5740 u32 map_idx = bnapi->index;
c0c050c5 5741
697197e5 5742 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
edd0c2cc
MC
5743 if (rc)
5744 goto err_out;
697197e5 5745 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
e8f267b0
MC
5746 /* If we have agg rings, post agg buffers first. */
5747 if (!agg_rings)
5748 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
b81a90d3 5749 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
3e08b184
MC
5750 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5751 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5752 u32 type2 = HWRM_RING_ALLOC_CMPL;
5753 struct bnxt_cp_ring_info *cpr2;
5754
5755 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5756 ring = &cpr2->cp_ring_struct;
5757 ring->handle = BNXT_RX_HDL;
5758 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5759 if (rc)
5760 goto err_out;
5761 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5762 ring->fw_ring_id);
5763 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5764 }
c0c050c5
MC
5765 }
5766
e8f267b0 5767 if (agg_rings) {
697197e5 5768 type = HWRM_RING_ALLOC_AGG;
c0c050c5 5769 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5770 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
5771 struct bnxt_ring_struct *ring =
5772 &rxr->rx_agg_ring_struct;
9899bb59 5773 u32 grp_idx = ring->grp_idx;
b81a90d3 5774 u32 map_idx = grp_idx + bp->rx_nr_rings;
c0c050c5 5775
697197e5 5776 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
c0c050c5
MC
5777 if (rc)
5778 goto err_out;
5779
697197e5
MC
5780 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5781 ring->fw_ring_id);
5782 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
e8f267b0 5783 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
b81a90d3 5784 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
c0c050c5
MC
5785 }
5786 }
5787err_out:
5788 return rc;
5789}
5790
5791static int hwrm_ring_free_send_msg(struct bnxt *bp,
5792 struct bnxt_ring_struct *ring,
5793 u32 ring_type, int cmpl_ring_id)
5794{
bbf33d1d
EP
5795 struct hwrm_ring_free_output *resp;
5796 struct hwrm_ring_free_input *req;
5797 u16 error_code = 0;
c0c050c5 5798 int rc;
c0c050c5 5799
b340dc68 5800 if (BNXT_NO_FW_ACCESS(bp))
b4fff207
MC
5801 return 0;
5802
bbf33d1d
EP
5803 rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
5804 if (rc)
5805 goto exit;
c0c050c5 5806
bbf33d1d
EP
5807 req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
5808 req->ring_type = ring_type;
5809 req->ring_id = cpu_to_le16(ring->fw_ring_id);
c0c050c5 5810
bbf33d1d
EP
5811 resp = hwrm_req_hold(bp, req);
5812 rc = hwrm_req_send(bp, req);
5813 error_code = le16_to_cpu(resp->error_code);
5814 hwrm_req_drop(bp, req);
5815exit:
c0c050c5 5816 if (rc || error_code) {
2727c888
MC
5817 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5818 ring_type, rc, error_code);
5819 return -EIO;
c0c050c5
MC
5820 }
5821 return 0;
5822}
5823
edd0c2cc 5824static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
c0c050c5 5825{
23aefdd7 5826 u32 type;
edd0c2cc 5827 int i;
c0c050c5
MC
5828
5829 if (!bp->bnapi)
edd0c2cc 5830 return;
c0c050c5 5831
edd0c2cc 5832 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 5833 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
edd0c2cc 5834 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
edd0c2cc
MC
5835
5836 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
5837 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5838
edd0c2cc
MC
5839 hwrm_ring_free_send_msg(bp, ring,
5840 RING_FREE_REQ_RING_TYPE_TX,
5841 close_path ? cmpl_ring_id :
5842 INVALID_HW_RING_ID);
5843 ring->fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
5844 }
5845 }
5846
edd0c2cc 5847 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5848 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 5849 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
b81a90d3 5850 u32 grp_idx = rxr->bnapi->index;
edd0c2cc
MC
5851
5852 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
5853 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5854
edd0c2cc
MC
5855 hwrm_ring_free_send_msg(bp, ring,
5856 RING_FREE_REQ_RING_TYPE_RX,
5857 close_path ? cmpl_ring_id :
5858 INVALID_HW_RING_ID);
5859 ring->fw_ring_id = INVALID_HW_RING_ID;
b81a90d3
MC
5860 bp->grp_info[grp_idx].rx_fw_ring_id =
5861 INVALID_HW_RING_ID;
c0c050c5
MC
5862 }
5863 }
5864
23aefdd7
MC
5865 if (bp->flags & BNXT_FLAG_CHIP_P5)
5866 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5867 else
5868 type = RING_FREE_REQ_RING_TYPE_RX;
edd0c2cc 5869 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5870 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 5871 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
b81a90d3 5872 u32 grp_idx = rxr->bnapi->index;
edd0c2cc
MC
5873
5874 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
5875 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5876
23aefdd7 5877 hwrm_ring_free_send_msg(bp, ring, type,
edd0c2cc
MC
5878 close_path ? cmpl_ring_id :
5879 INVALID_HW_RING_ID);
5880 ring->fw_ring_id = INVALID_HW_RING_ID;
b81a90d3
MC
5881 bp->grp_info[grp_idx].agg_fw_ring_id =
5882 INVALID_HW_RING_ID;
c0c050c5
MC
5883 }
5884 }
5885
9d8bc097
MC
5886 /* The completion rings are about to be freed. After that the
5887 * IRQ doorbell will not work anymore. So we need to disable
5888 * IRQ here.
5889 */
5890 bnxt_disable_int_sync(bp);
5891
23aefdd7
MC
5892 if (bp->flags & BNXT_FLAG_CHIP_P5)
5893 type = RING_FREE_REQ_RING_TYPE_NQ;
5894 else
5895 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
edd0c2cc
MC
5896 for (i = 0; i < bp->cp_nr_rings; i++) {
5897 struct bnxt_napi *bnapi = bp->bnapi[i];
5898 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3e08b184
MC
5899 struct bnxt_ring_struct *ring;
5900 int j;
edd0c2cc 5901
3e08b184
MC
5902 for (j = 0; j < 2; j++) {
5903 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5904
5905 if (cpr2) {
5906 ring = &cpr2->cp_ring_struct;
5907 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5908 continue;
5909 hwrm_ring_free_send_msg(bp, ring,
5910 RING_FREE_REQ_RING_TYPE_L2_CMPL,
5911 INVALID_HW_RING_ID);
5912 ring->fw_ring_id = INVALID_HW_RING_ID;
5913 }
5914 }
5915 ring = &cpr->cp_ring_struct;
edd0c2cc 5916 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
23aefdd7 5917 hwrm_ring_free_send_msg(bp, ring, type,
edd0c2cc
MC
5918 INVALID_HW_RING_ID);
5919 ring->fw_ring_id = INVALID_HW_RING_ID;
5920 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
5921 }
5922 }
c0c050c5
MC
5923}
5924
41e8d798
MC
5925static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5926 bool shared);
5927
674f50a5
MC
5928static int bnxt_hwrm_get_rings(struct bnxt *bp)
5929{
674f50a5 5930 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
bbf33d1d
EP
5931 struct hwrm_func_qcfg_output *resp;
5932 struct hwrm_func_qcfg_input *req;
674f50a5
MC
5933 int rc;
5934
5935 if (bp->hwrm_spec_code < 0x10601)
5936 return 0;
5937
bbf33d1d
EP
5938 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
5939 if (rc)
5940 return rc;
5941
5942 req->fid = cpu_to_le16(0xffff);
5943 resp = hwrm_req_hold(bp, req);
5944 rc = hwrm_req_send(bp, req);
674f50a5 5945 if (rc) {
bbf33d1d 5946 hwrm_req_drop(bp, req);
d4f1420d 5947 return rc;
674f50a5
MC
5948 }
5949
5950 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
f1ca94de 5951 if (BNXT_NEW_RM(bp)) {
674f50a5
MC
5952 u16 cp, stats;
5953
5954 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5955 hw_resc->resv_hw_ring_grps =
5956 le32_to_cpu(resp->alloc_hw_ring_grps);
5957 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
5958 cp = le16_to_cpu(resp->alloc_cmpl_rings);
5959 stats = le16_to_cpu(resp->alloc_stat_ctx);
75720e63 5960 hw_resc->resv_irqs = cp;
41e8d798
MC
5961 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5962 int rx = hw_resc->resv_rx_rings;
5963 int tx = hw_resc->resv_tx_rings;
5964
5965 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5966 rx >>= 1;
5967 if (cp < (rx + tx)) {
5968 bnxt_trim_rings(bp, &rx, &tx, cp, false);
5969 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5970 rx <<= 1;
5971 hw_resc->resv_rx_rings = rx;
5972 hw_resc->resv_tx_rings = tx;
5973 }
75720e63 5974 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
41e8d798
MC
5975 hw_resc->resv_hw_ring_grps = rx;
5976 }
674f50a5 5977 hw_resc->resv_cp_rings = cp;
780baad4 5978 hw_resc->resv_stat_ctxs = stats;
674f50a5 5979 }
bbf33d1d 5980 hwrm_req_drop(bp, req);
674f50a5
MC
5981 return 0;
5982}
5983
391be5c2
MC
5984int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
5985{
bbf33d1d
EP
5986 struct hwrm_func_qcfg_output *resp;
5987 struct hwrm_func_qcfg_input *req;
391be5c2
MC
5988 int rc;
5989
5990 if (bp->hwrm_spec_code < 0x10601)
5991 return 0;
5992
bbf33d1d
EP
5993 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
5994 if (rc)
5995 return rc;
5996
5997 req->fid = cpu_to_le16(fid);
5998 resp = hwrm_req_hold(bp, req);
5999 rc = hwrm_req_send(bp, req);
391be5c2
MC
6000 if (!rc)
6001 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6002
bbf33d1d 6003 hwrm_req_drop(bp, req);
391be5c2
MC
6004 return rc;
6005}
6006
41e8d798
MC
6007static bool bnxt_rfs_supported(struct bnxt *bp);
6008
bbf33d1d
EP
6009static struct hwrm_func_cfg_input *
6010__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6011 int ring_grps, int cp_rings, int stats, int vnics)
391be5c2 6012{
bbf33d1d 6013 struct hwrm_func_cfg_input *req;
674f50a5 6014 u32 enables = 0;
391be5c2 6015
bbf33d1d
EP
6016 if (hwrm_req_init(bp, req, HWRM_FUNC_CFG))
6017 return NULL;
6018
4ed50ef4 6019 req->fid = cpu_to_le16(0xffff);
674f50a5 6020 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
4ed50ef4 6021 req->num_tx_rings = cpu_to_le16(tx_rings);
f1ca94de 6022 if (BNXT_NEW_RM(bp)) {
674f50a5 6023 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
3f93cd3f 6024 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
41e8d798
MC
6025 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6026 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6027 enables |= tx_rings + ring_grps ?
3f93cd3f 6028 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
6029 enables |= rx_rings ?
6030 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6031 } else {
6032 enables |= cp_rings ?
3f93cd3f 6033 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
6034 enables |= ring_grps ?
6035 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
6036 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6037 }
dbe80d44 6038 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
674f50a5 6039
4ed50ef4 6040 req->num_rx_rings = cpu_to_le16(rx_rings);
41e8d798
MC
6041 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6042 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6043 req->num_msix = cpu_to_le16(cp_rings);
6044 req->num_rsscos_ctxs =
6045 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6046 } else {
6047 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6048 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6049 req->num_rsscos_ctxs = cpu_to_le16(1);
6050 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6051 bnxt_rfs_supported(bp))
6052 req->num_rsscos_ctxs =
6053 cpu_to_le16(ring_grps + 1);
6054 }
780baad4 6055 req->num_stat_ctxs = cpu_to_le16(stats);
4ed50ef4 6056 req->num_vnics = cpu_to_le16(vnics);
674f50a5 6057 }
4ed50ef4 6058 req->enables = cpu_to_le32(enables);
bbf33d1d 6059 return req;
4ed50ef4
MC
6060}
6061
bbf33d1d
EP
6062static struct hwrm_func_vf_cfg_input *
6063__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6064 int ring_grps, int cp_rings, int stats, int vnics)
4ed50ef4 6065{
bbf33d1d 6066 struct hwrm_func_vf_cfg_input *req;
4ed50ef4
MC
6067 u32 enables = 0;
6068
bbf33d1d
EP
6069 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
6070 return NULL;
6071
4ed50ef4 6072 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
41e8d798
MC
6073 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
6074 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
3f93cd3f 6075 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
41e8d798
MC
6076 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6077 enables |= tx_rings + ring_grps ?
3f93cd3f 6078 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
6079 } else {
6080 enables |= cp_rings ?
3f93cd3f 6081 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
6082 enables |= ring_grps ?
6083 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
6084 }
4ed50ef4 6085 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
41e8d798 6086 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
4ed50ef4 6087
41e8d798 6088 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
4ed50ef4
MC
6089 req->num_tx_rings = cpu_to_le16(tx_rings);
6090 req->num_rx_rings = cpu_to_le16(rx_rings);
41e8d798
MC
6091 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6092 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6093 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6094 } else {
6095 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6096 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6097 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6098 }
780baad4 6099 req->num_stat_ctxs = cpu_to_le16(stats);
4ed50ef4
MC
6100 req->num_vnics = cpu_to_le16(vnics);
6101
6102 req->enables = cpu_to_le32(enables);
bbf33d1d 6103 return req;
4ed50ef4
MC
6104}
6105
6106static int
6107bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4 6108 int ring_grps, int cp_rings, int stats, int vnics)
4ed50ef4 6109{
bbf33d1d 6110 struct hwrm_func_cfg_input *req;
4ed50ef4
MC
6111 int rc;
6112
bbf33d1d
EP
6113 req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6114 cp_rings, stats, vnics);
6115 if (!req)
6116 return -ENOMEM;
6117
6118 if (!req->enables) {
6119 hwrm_req_drop(bp, req);
391be5c2 6120 return 0;
bbf33d1d 6121 }
391be5c2 6122
bbf33d1d 6123 rc = hwrm_req_send(bp, req);
674f50a5 6124 if (rc)
d4f1420d 6125 return rc;
674f50a5
MC
6126
6127 if (bp->hwrm_spec_code < 0x10601)
6128 bp->hw_resc.resv_tx_rings = tx_rings;
6129
9f90445c 6130 return bnxt_hwrm_get_rings(bp);
674f50a5
MC
6131}
6132
6133static int
6134bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4 6135 int ring_grps, int cp_rings, int stats, int vnics)
674f50a5 6136{
bbf33d1d 6137 struct hwrm_func_vf_cfg_input *req;
674f50a5
MC
6138 int rc;
6139
f1ca94de 6140 if (!BNXT_NEW_RM(bp)) {
674f50a5 6141 bp->hw_resc.resv_tx_rings = tx_rings;
391be5c2 6142 return 0;
674f50a5 6143 }
391be5c2 6144
bbf33d1d
EP
6145 req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6146 cp_rings, stats, vnics);
6147 if (!req)
6148 return -ENOMEM;
6149
6150 rc = hwrm_req_send(bp, req);
674f50a5 6151 if (rc)
d4f1420d 6152 return rc;
674f50a5 6153
9f90445c 6154 return bnxt_hwrm_get_rings(bp);
674f50a5
MC
6155}
6156
6157static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
780baad4 6158 int cp, int stat, int vnic)
674f50a5
MC
6159{
6160 if (BNXT_PF(bp))
780baad4
VV
6161 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6162 vnic);
674f50a5 6163 else
780baad4
VV
6164 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6165 vnic);
674f50a5
MC
6166}
6167
b16b6891 6168int bnxt_nq_rings_in_use(struct bnxt *bp)
08654eb2
MC
6169{
6170 int cp = bp->cp_nr_rings;
6171 int ulp_msix, ulp_base;
6172
6173 ulp_msix = bnxt_get_ulp_msix_num(bp);
6174 if (ulp_msix) {
6175 ulp_base = bnxt_get_ulp_msix_base(bp);
6176 cp += ulp_msix;
6177 if ((ulp_base + ulp_msix) > cp)
6178 cp = ulp_base + ulp_msix;
6179 }
6180 return cp;
6181}
6182
c0b8cda0
MC
6183static int bnxt_cp_rings_in_use(struct bnxt *bp)
6184{
6185 int cp;
6186
6187 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6188 return bnxt_nq_rings_in_use(bp);
6189
6190 cp = bp->tx_nr_rings + bp->rx_nr_rings;
6191 return cp;
6192}
6193
780baad4
VV
6194static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6195{
d77b1ad8
MC
6196 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6197 int cp = bp->cp_nr_rings;
6198
6199 if (!ulp_stat)
6200 return cp;
6201
6202 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6203 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6204
6205 return cp + ulp_stat;
780baad4
VV
6206}
6207
b43b9f53
MC
6208/* Check if a default RSS map needs to be setup. This function is only
6209 * used on older firmware that does not require reserving RX rings.
6210 */
6211static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6212{
6213 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6214
6215 /* The RSS map is valid for RX rings set to resv_rx_rings */
6216 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6217 hw_resc->resv_rx_rings = bp->rx_nr_rings;
6218 if (!netif_is_rxfh_configured(bp->dev))
6219 bnxt_set_dflt_rss_indir_tbl(bp);
6220 }
6221}
6222
4e41dc5d
MC
6223static bool bnxt_need_reserve_rings(struct bnxt *bp)
6224{
6225 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
fbcfc8e4 6226 int cp = bnxt_cp_rings_in_use(bp);
c0b8cda0 6227 int nq = bnxt_nq_rings_in_use(bp);
780baad4 6228 int rx = bp->rx_nr_rings, stat;
4e41dc5d
MC
6229 int vnic = 1, grp = rx;
6230
b43b9f53
MC
6231 if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6232 bp->hwrm_spec_code >= 0x10601)
4e41dc5d
MC
6233 return true;
6234
b43b9f53
MC
6235 /* Old firmware does not need RX ring reservations but we still
6236 * need to setup a default RSS map when needed. With new firmware
6237 * we go through RX ring reservations first and then set up the
6238 * RSS map for the successfully reserved RX rings when needed.
6239 */
6240 if (!BNXT_NEW_RM(bp)) {
6241 bnxt_check_rss_tbl_no_rmgr(bp);
6242 return false;
6243 }
41e8d798 6244 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
4e41dc5d
MC
6245 vnic = rx + 1;
6246 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6247 rx <<= 1;
780baad4 6248 stat = bnxt_get_func_stat_ctxs(bp);
b43b9f53
MC
6249 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6250 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6251 (hw_resc->resv_hw_ring_grps != grp &&
6252 !(bp->flags & BNXT_FLAG_CHIP_P5)))
4e41dc5d 6253 return true;
01989c6b
MC
6254 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6255 hw_resc->resv_irqs != nq)
6256 return true;
4e41dc5d
MC
6257 return false;
6258}
6259
674f50a5
MC
6260static int __bnxt_reserve_rings(struct bnxt *bp)
6261{
6262 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
c0b8cda0 6263 int cp = bnxt_nq_rings_in_use(bp);
674f50a5
MC
6264 int tx = bp->tx_nr_rings;
6265 int rx = bp->rx_nr_rings;
674f50a5 6266 int grp, rx_rings, rc;
780baad4 6267 int vnic = 1, stat;
674f50a5 6268 bool sh = false;
674f50a5 6269
4e41dc5d 6270 if (!bnxt_need_reserve_rings(bp))
674f50a5
MC
6271 return 0;
6272
6273 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6274 sh = true;
41e8d798 6275 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
674f50a5
MC
6276 vnic = rx + 1;
6277 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6278 rx <<= 1;
674f50a5 6279 grp = bp->rx_nr_rings;
780baad4 6280 stat = bnxt_get_func_stat_ctxs(bp);
674f50a5 6281
780baad4 6282 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
391be5c2
MC
6283 if (rc)
6284 return rc;
6285
674f50a5 6286 tx = hw_resc->resv_tx_rings;
f1ca94de 6287 if (BNXT_NEW_RM(bp)) {
674f50a5 6288 rx = hw_resc->resv_rx_rings;
c0b8cda0 6289 cp = hw_resc->resv_irqs;
674f50a5
MC
6290 grp = hw_resc->resv_hw_ring_grps;
6291 vnic = hw_resc->resv_vnics;
780baad4 6292 stat = hw_resc->resv_stat_ctxs;
674f50a5
MC
6293 }
6294
6295 rx_rings = rx;
6296 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6297 if (rx >= 2) {
6298 rx_rings = rx >> 1;
6299 } else {
6300 if (netif_running(bp->dev))
6301 return -ENOMEM;
6302
6303 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6304 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6305 bp->dev->hw_features &= ~NETIF_F_LRO;
6306 bp->dev->features &= ~NETIF_F_LRO;
6307 bnxt_set_ring_params(bp);
6308 }
6309 }
6310 rx_rings = min_t(int, rx_rings, grp);
780baad4
VV
6311 cp = min_t(int, cp, bp->cp_nr_rings);
6312 if (stat > bnxt_get_ulp_stat_ctxs(bp))
6313 stat -= bnxt_get_ulp_stat_ctxs(bp);
6314 cp = min_t(int, cp, stat);
674f50a5
MC
6315 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6316 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6317 rx = rx_rings << 1;
6318 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6319 bp->tx_nr_rings = tx;
bd3191b5
MC
6320
6321 /* If we cannot reserve all the RX rings, reset the RSS map only
6322 * if absolutely necessary
6323 */
6324 if (rx_rings != bp->rx_nr_rings) {
6325 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6326 rx_rings, bp->rx_nr_rings);
6327 if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
6328 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6329 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6330 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6331 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6332 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6333 }
6334 }
674f50a5
MC
6335 bp->rx_nr_rings = rx_rings;
6336 bp->cp_nr_rings = cp;
6337
780baad4 6338 if (!tx || !rx || !cp || !grp || !vnic || !stat)
674f50a5
MC
6339 return -ENOMEM;
6340
5fa65524
EP
6341 if (!netif_is_rxfh_configured(bp->dev))
6342 bnxt_set_dflt_rss_indir_tbl(bp);
6343
391be5c2
MC
6344 return rc;
6345}
6346
8f23d638 6347static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
6348 int ring_grps, int cp_rings, int stats,
6349 int vnics)
98fdbe73 6350{
bbf33d1d 6351 struct hwrm_func_vf_cfg_input *req;
6fc2ffdf 6352 u32 flags;
98fdbe73 6353
f1ca94de 6354 if (!BNXT_NEW_RM(bp))
98fdbe73
MC
6355 return 0;
6356
bbf33d1d
EP
6357 req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6358 cp_rings, stats, vnics);
8f23d638
MC
6359 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6360 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6361 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8f23d638 6362 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
41e8d798
MC
6363 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6364 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6365 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6366 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8f23d638 6367
bbf33d1d
EP
6368 req->flags = cpu_to_le32(flags);
6369 return hwrm_req_send_silent(bp, req);
8f23d638
MC
6370}
6371
6372static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
6373 int ring_grps, int cp_rings, int stats,
6374 int vnics)
8f23d638 6375{
bbf33d1d 6376 struct hwrm_func_cfg_input *req;
6fc2ffdf 6377 u32 flags;
98fdbe73 6378
bbf33d1d
EP
6379 req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6380 cp_rings, stats, vnics);
8f23d638 6381 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
41e8d798 6382 if (BNXT_NEW_RM(bp)) {
8f23d638
MC
6383 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6384 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8f23d638
MC
6385 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6386 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
41e8d798 6387 if (bp->flags & BNXT_FLAG_CHIP_P5)
0b815023
MC
6388 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6389 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
41e8d798
MC
6390 else
6391 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6392 }
6fc2ffdf 6393
bbf33d1d
EP
6394 req->flags = cpu_to_le32(flags);
6395 return hwrm_req_send_silent(bp, req);
98fdbe73
MC
6396}
6397
8f23d638 6398static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
6399 int ring_grps, int cp_rings, int stats,
6400 int vnics)
8f23d638
MC
6401{
6402 if (bp->hwrm_spec_code < 0x10801)
6403 return 0;
6404
6405 if (BNXT_PF(bp))
6406 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
780baad4
VV
6407 ring_grps, cp_rings, stats,
6408 vnics);
8f23d638
MC
6409
6410 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
780baad4 6411 cp_rings, stats, vnics);
8f23d638
MC
6412}
6413
74706afa
MC
6414static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6415{
74706afa 6416 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
bbf33d1d
EP
6417 struct hwrm_ring_aggint_qcaps_output *resp;
6418 struct hwrm_ring_aggint_qcaps_input *req;
74706afa
MC
6419 int rc;
6420
6421 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6422 coal_cap->num_cmpl_dma_aggr_max = 63;
6423 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6424 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6425 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6426 coal_cap->int_lat_tmr_min_max = 65535;
6427 coal_cap->int_lat_tmr_max_max = 65535;
6428 coal_cap->num_cmpl_aggr_int_max = 65535;
6429 coal_cap->timer_units = 80;
6430
6431 if (bp->hwrm_spec_code < 0x10902)
6432 return;
6433
bbf33d1d
EP
6434 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
6435 return;
6436
6437 resp = hwrm_req_hold(bp, req);
6438 rc = hwrm_req_send_silent(bp, req);
74706afa
MC
6439 if (!rc) {
6440 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
58590c8d 6441 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
74706afa
MC
6442 coal_cap->num_cmpl_dma_aggr_max =
6443 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6444 coal_cap->num_cmpl_dma_aggr_during_int_max =
6445 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6446 coal_cap->cmpl_aggr_dma_tmr_max =
6447 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6448 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6449 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6450 coal_cap->int_lat_tmr_min_max =
6451 le16_to_cpu(resp->int_lat_tmr_min_max);
6452 coal_cap->int_lat_tmr_max_max =
6453 le16_to_cpu(resp->int_lat_tmr_max_max);
6454 coal_cap->num_cmpl_aggr_int_max =
6455 le16_to_cpu(resp->num_cmpl_aggr_int_max);
6456 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6457 }
bbf33d1d 6458 hwrm_req_drop(bp, req);
74706afa
MC
6459}
6460
6461static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6462{
6463 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6464
6465 return usec * 1000 / coal_cap->timer_units;
6466}
6467
6468static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6469 struct bnxt_coal *hw_coal,
bb053f52
MC
6470 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6471{
74706afa
MC
6472 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6473 u32 cmpl_params = coal_cap->cmpl_params;
6474 u16 val, tmr, max, flags = 0;
f8503969
MC
6475
6476 max = hw_coal->bufs_per_record * 128;
6477 if (hw_coal->budget)
6478 max = hw_coal->bufs_per_record * hw_coal->budget;
74706afa 6479 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
f8503969
MC
6480
6481 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6482 req->num_cmpl_aggr_int = cpu_to_le16(val);
b153cbc5 6483
74706afa 6484 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
f8503969
MC
6485 req->num_cmpl_dma_aggr = cpu_to_le16(val);
6486
74706afa
MC
6487 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6488 coal_cap->num_cmpl_dma_aggr_during_int_max);
f8503969
MC
6489 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6490
74706afa
MC
6491 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6492 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
f8503969
MC
6493 req->int_lat_tmr_max = cpu_to_le16(tmr);
6494
6495 /* min timer set to 1/2 of interrupt timer */
74706afa
MC
6496 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6497 val = tmr / 2;
6498 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6499 req->int_lat_tmr_min = cpu_to_le16(val);
6500 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6501 }
f8503969
MC
6502
6503 /* buf timer set to 1/4 of interrupt timer */
74706afa 6504 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
f8503969
MC
6505 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6506
74706afa
MC
6507 if (cmpl_params &
6508 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6509 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6510 val = clamp_t(u16, tmr, 1,
6511 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6adc4601 6512 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
74706afa
MC
6513 req->enables |=
6514 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6515 }
f8503969 6516
74706afa
MC
6517 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6518 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6519 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6520 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
f8503969 6521 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
bb053f52 6522 req->flags = cpu_to_le16(flags);
74706afa 6523 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
bb053f52
MC
6524}
6525
58590c8d
MC
6526static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6527 struct bnxt_coal *hw_coal)
6528{
bbf33d1d 6529 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
58590c8d
MC
6530 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6531 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6532 u32 nq_params = coal_cap->nq_params;
6533 u16 tmr;
bbf33d1d 6534 int rc;
58590c8d
MC
6535
6536 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6537 return 0;
6538
bbf33d1d
EP
6539 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6540 if (rc)
6541 return rc;
6542
6543 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6544 req->flags =
58590c8d
MC
6545 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6546
6547 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6548 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
bbf33d1d
EP
6549 req->int_lat_tmr_min = cpu_to_le16(tmr);
6550 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6551 return hwrm_req_send(bp, req);
58590c8d
MC
6552}
6553
6a8788f2
AG
6554int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6555{
bbf33d1d 6556 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
6a8788f2
AG
6557 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6558 struct bnxt_coal coal;
bbf33d1d 6559 int rc;
6a8788f2
AG
6560
6561 /* Tick values in micro seconds.
6562 * 1 coal_buf x bufs_per_record = 1 completion record.
6563 */
6564 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6565
6566 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6567 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6568
6569 if (!bnapi->rx_ring)
6570 return -ENODEV;
6571
bbf33d1d
EP
6572 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6573 if (rc)
6574 return rc;
6a8788f2 6575
bbf33d1d 6576 bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
6a8788f2 6577
bbf33d1d 6578 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6a8788f2 6579
bbf33d1d 6580 return hwrm_req_send(bp, req_rx);
6a8788f2
AG
6581}
6582
c0c050c5
MC
6583int bnxt_hwrm_set_coal(struct bnxt *bp)
6584{
bbf33d1d
EP
6585 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx,
6586 *req;
6587 int i, rc;
6588
6589 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6590 if (rc)
6591 return rc;
c0c050c5 6592
bbf33d1d
EP
6593 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6594 if (rc) {
6595 hwrm_req_drop(bp, req_rx);
6596 return rc;
6597 }
c0c050c5 6598
bbf33d1d
EP
6599 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
6600 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
c0c050c5 6601
bbf33d1d
EP
6602 hwrm_req_hold(bp, req_rx);
6603 hwrm_req_hold(bp, req_tx);
c0c050c5 6604 for (i = 0; i < bp->cp_nr_rings; i++) {
dfc9c94a 6605 struct bnxt_napi *bnapi = bp->bnapi[i];
58590c8d 6606 struct bnxt_coal *hw_coal;
2c61d211 6607 u16 ring_id;
c0c050c5 6608
bbf33d1d 6609 req = req_rx;
2c61d211
MC
6610 if (!bnapi->rx_ring) {
6611 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
bbf33d1d 6612 req = req_tx;
2c61d211
MC
6613 } else {
6614 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6615 }
6616 req->ring_id = cpu_to_le16(ring_id);
dfc9c94a 6617
bbf33d1d 6618 rc = hwrm_req_send(bp, req);
c0c050c5
MC
6619 if (rc)
6620 break;
58590c8d
MC
6621
6622 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6623 continue;
6624
6625 if (bnapi->rx_ring && bnapi->tx_ring) {
bbf33d1d 6626 req = req_tx;
58590c8d
MC
6627 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6628 req->ring_id = cpu_to_le16(ring_id);
bbf33d1d 6629 rc = hwrm_req_send(bp, req);
58590c8d
MC
6630 if (rc)
6631 break;
6632 }
6633 if (bnapi->rx_ring)
6634 hw_coal = &bp->rx_coal;
6635 else
6636 hw_coal = &bp->tx_coal;
6637 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
c0c050c5 6638 }
bbf33d1d
EP
6639 hwrm_req_drop(bp, req_rx);
6640 hwrm_req_drop(bp, req_tx);
c0c050c5
MC
6641 return rc;
6642}
6643
3d061591 6644static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
c0c050c5 6645{
bbf33d1d
EP
6646 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
6647 struct hwrm_stat_ctx_free_input *req;
3d061591 6648 int i;
c0c050c5
MC
6649
6650 if (!bp->bnapi)
3d061591 6651 return;
c0c050c5 6652
3e8060fa 6653 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3d061591 6654 return;
3e8060fa 6655
bbf33d1d
EP
6656 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
6657 return;
6658 if (BNXT_FW_MAJ(bp) <= 20) {
6659 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
6660 hwrm_req_drop(bp, req);
6661 return;
6662 }
6663 hwrm_req_hold(bp, req0);
6664 }
6665 hwrm_req_hold(bp, req);
c0c050c5
MC
6666 for (i = 0; i < bp->cp_nr_rings; i++) {
6667 struct bnxt_napi *bnapi = bp->bnapi[i];
6668 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6669
6670 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
bbf33d1d
EP
6671 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6672 if (req0) {
6673 req0->stat_ctx_id = req->stat_ctx_id;
6674 hwrm_req_send(bp, req0);
c2dec363 6675 }
bbf33d1d 6676 hwrm_req_send(bp, req);
c0c050c5
MC
6677
6678 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6679 }
6680 }
bbf33d1d
EP
6681 hwrm_req_drop(bp, req);
6682 if (req0)
6683 hwrm_req_drop(bp, req0);
c0c050c5
MC
6684}
6685
6686static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6687{
bbf33d1d
EP
6688 struct hwrm_stat_ctx_alloc_output *resp;
6689 struct hwrm_stat_ctx_alloc_input *req;
6690 int rc, i;
c0c050c5 6691
3e8060fa
PS
6692 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6693 return 0;
6694
bbf33d1d
EP
6695 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
6696 if (rc)
6697 return rc;
c0c050c5 6698
bbf33d1d
EP
6699 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6700 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
c0c050c5 6701
bbf33d1d 6702 resp = hwrm_req_hold(bp, req);
c0c050c5
MC
6703 for (i = 0; i < bp->cp_nr_rings; i++) {
6704 struct bnxt_napi *bnapi = bp->bnapi[i];
6705 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6706
bbf33d1d 6707 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
c0c050c5 6708
bbf33d1d 6709 rc = hwrm_req_send(bp, req);
c0c050c5
MC
6710 if (rc)
6711 break;
6712
6713 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6714
6715 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6716 }
bbf33d1d 6717 hwrm_req_drop(bp, req);
89aa8445 6718 return rc;
c0c050c5
MC
6719}
6720
cf6645f8
MC
6721static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6722{
bbf33d1d
EP
6723 struct hwrm_func_qcfg_output *resp;
6724 struct hwrm_func_qcfg_input *req;
8ae24738 6725 u32 min_db_offset = 0;
9315edca 6726 u16 flags;
cf6645f8
MC
6727 int rc;
6728
bbf33d1d
EP
6729 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6730 if (rc)
6731 return rc;
6732
6733 req->fid = cpu_to_le16(0xffff);
6734 resp = hwrm_req_hold(bp, req);
6735 rc = hwrm_req_send(bp, req);
cf6645f8
MC
6736 if (rc)
6737 goto func_qcfg_exit;
6738
6739#ifdef CONFIG_BNXT_SRIOV
6740 if (BNXT_VF(bp)) {
cf6645f8
MC
6741 struct bnxt_vf_info *vf = &bp->vf;
6742
6743 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
230d1f0d
MC
6744 } else {
6745 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
cf6645f8
MC
6746 }
6747#endif
9315edca
MC
6748 flags = le16_to_cpu(resp->flags);
6749 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6750 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
97381a18 6751 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
9315edca 6752 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
97381a18 6753 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
9315edca
MC
6754 }
6755 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6756 bp->flags |= BNXT_FLAG_MULTI_HOST;
8d4bd96b
MC
6757 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
6758 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
bc39f885 6759
567b2abe
SB
6760 switch (resp->port_partition_type) {
6761 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6762 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6763 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6764 bp->port_partition_type = resp->port_partition_type;
6765 break;
6766 }
32e8239c
MC
6767 if (bp->hwrm_spec_code < 0x10707 ||
6768 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6769 bp->br_mode = BRIDGE_MODE_VEB;
6770 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6771 bp->br_mode = BRIDGE_MODE_VEPA;
6772 else
6773 bp->br_mode = BRIDGE_MODE_UNDEF;
cf6645f8 6774
7eb9bb3a
MC
6775 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6776 if (!bp->max_mtu)
6777 bp->max_mtu = BNXT_MAX_MTU;
6778
8ae24738
MC
6779 if (bp->db_size)
6780 goto func_qcfg_exit;
6781
6782 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6783 if (BNXT_PF(bp))
6784 min_db_offset = DB_PF_OFFSET_P5;
6785 else
6786 min_db_offset = DB_VF_OFFSET_P5;
6787 }
6788 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6789 1024);
6790 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6791 bp->db_size <= min_db_offset)
6792 bp->db_size = pci_resource_len(bp->pdev, 2);
6793
cf6645f8 6794func_qcfg_exit:
bbf33d1d 6795 hwrm_req_drop(bp, req);
cf6645f8
MC
6796 return rc;
6797}
6798
e9696ff3
MC
6799static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
6800 struct hwrm_func_backing_store_qcaps_output *resp)
6801{
6802 struct bnxt_mem_init *mem_init;
41435c39 6803 u16 init_mask;
e9696ff3 6804 u8 init_val;
41435c39 6805 u8 *offset;
e9696ff3
MC
6806 int i;
6807
6808 init_val = resp->ctx_kind_initializer;
41435c39
MC
6809 init_mask = le16_to_cpu(resp->ctx_init_mask);
6810 offset = &resp->qp_init_offset;
6811 mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
6812 for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
e9696ff3 6813 mem_init->init_val = init_val;
41435c39
MC
6814 mem_init->offset = BNXT_MEM_INVALID_OFFSET;
6815 if (!init_mask)
6816 continue;
6817 if (i == BNXT_CTX_MEM_INIT_STAT)
6818 offset = &resp->stat_init_offset;
6819 if (init_mask & (1 << i))
6820 mem_init->offset = *offset * 4;
6821 else
6822 mem_init->init_val = 0;
6823 }
6824 ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
6825 ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
6826 ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
6827 ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
6828 ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
6829 ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
e9696ff3
MC
6830}
6831
98f04cf0
MC
6832static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6833{
bbf33d1d
EP
6834 struct hwrm_func_backing_store_qcaps_output *resp;
6835 struct hwrm_func_backing_store_qcaps_input *req;
98f04cf0
MC
6836 int rc;
6837
6838 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6839 return 0;
6840
bbf33d1d
EP
6841 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
6842 if (rc)
6843 return rc;
6844
6845 resp = hwrm_req_hold(bp, req);
6846 rc = hwrm_req_send_silent(bp, req);
98f04cf0
MC
6847 if (!rc) {
6848 struct bnxt_ctx_pg_info *ctx_pg;
6849 struct bnxt_ctx_mem_info *ctx;
ac3158cb 6850 int i, tqm_rings;
98f04cf0
MC
6851
6852 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6853 if (!ctx) {
6854 rc = -ENOMEM;
6855 goto ctx_err;
6856 }
98f04cf0
MC
6857 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6858 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6859 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6860 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6861 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6862 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6863 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6864 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6865 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6866 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6867 ctx->vnic_max_vnic_entries =
6868 le16_to_cpu(resp->vnic_max_vnic_entries);
6869 ctx->vnic_max_ring_table_entries =
6870 le16_to_cpu(resp->vnic_max_ring_table_entries);
6871 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6872 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6873 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6874 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6875 ctx->tqm_min_entries_per_ring =
6876 le32_to_cpu(resp->tqm_min_entries_per_ring);
6877 ctx->tqm_max_entries_per_ring =
6878 le32_to_cpu(resp->tqm_max_entries_per_ring);
6879 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6880 if (!ctx->tqm_entries_multiple)
6881 ctx->tqm_entries_multiple = 1;
6882 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6883 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
53579e37
DS
6884 ctx->mrav_num_entries_units =
6885 le16_to_cpu(resp->mrav_num_entries_units);
98f04cf0
MC
6886 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6887 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
e9696ff3
MC
6888
6889 bnxt_init_ctx_initializer(ctx, resp);
6890
ac3158cb
MC
6891 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
6892 if (!ctx->tqm_fp_rings_count)
6893 ctx->tqm_fp_rings_count = bp->max_q;
a029a2fe
MC
6894 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
6895 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
ac3158cb 6896
a029a2fe 6897 tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
ac3158cb
MC
6898 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
6899 if (!ctx_pg) {
6900 kfree(ctx);
6901 rc = -ENOMEM;
6902 goto ctx_err;
6903 }
6904 for (i = 0; i < tqm_rings; i++, ctx_pg++)
6905 ctx->tqm_mem[i] = ctx_pg;
6906 bp->ctx = ctx;
98f04cf0
MC
6907 } else {
6908 rc = 0;
6909 }
6910ctx_err:
bbf33d1d 6911 hwrm_req_drop(bp, req);
98f04cf0
MC
6912 return rc;
6913}
6914
1b9394e5
MC
6915static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6916 __le64 *pg_dir)
6917{
be6d755f
EP
6918 if (!rmem->nr_pages)
6919 return;
6920
702279d2 6921 BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
08fe9d18
MC
6922 if (rmem->depth >= 1) {
6923 if (rmem->depth == 2)
6924 *pg_attr |= 2;
6925 else
6926 *pg_attr |= 1;
1b9394e5
MC
6927 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6928 } else {
6929 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6930 }
6931}
6932
6933#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
6934 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
6935 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
6936 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
6937 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
6938 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6939
6940static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6941{
bbf33d1d 6942 struct hwrm_func_backing_store_cfg_input *req;
1b9394e5
MC
6943 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6944 struct bnxt_ctx_pg_info *ctx_pg;
bbf33d1d
EP
6945 void **__req = (void **)&req;
6946 u32 req_len = sizeof(*req);
1b9394e5
MC
6947 __le32 *num_entries;
6948 __le64 *pg_dir;
53579e37 6949 u32 flags = 0;
1b9394e5 6950 u8 *pg_attr;
1b9394e5 6951 u32 ena;
bbf33d1d 6952 int rc;
9f90445c 6953 int i;
1b9394e5
MC
6954
6955 if (!ctx)
6956 return 0;
6957
16db6323
MC
6958 if (req_len > bp->hwrm_max_ext_req_len)
6959 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
bbf33d1d
EP
6960 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
6961 if (rc)
6962 return rc;
1b9394e5 6963
bbf33d1d 6964 req->enables = cpu_to_le32(enables);
1b9394e5
MC
6965 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
6966 ctx_pg = &ctx->qp_mem;
bbf33d1d
EP
6967 req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
6968 req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
6969 req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
6970 req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
1b9394e5 6971 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
bbf33d1d
EP
6972 &req->qpc_pg_size_qpc_lvl,
6973 &req->qpc_page_dir);
1b9394e5
MC
6974 }
6975 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
6976 ctx_pg = &ctx->srq_mem;
bbf33d1d
EP
6977 req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
6978 req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
6979 req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
1b9394e5 6980 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
bbf33d1d
EP
6981 &req->srq_pg_size_srq_lvl,
6982 &req->srq_page_dir);
1b9394e5
MC
6983 }
6984 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
6985 ctx_pg = &ctx->cq_mem;
bbf33d1d
EP
6986 req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
6987 req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
6988 req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
6989 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6990 &req->cq_pg_size_cq_lvl,
6991 &req->cq_page_dir);
1b9394e5
MC
6992 }
6993 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
6994 ctx_pg = &ctx->vnic_mem;
bbf33d1d 6995 req->vnic_num_vnic_entries =
1b9394e5 6996 cpu_to_le16(ctx->vnic_max_vnic_entries);
bbf33d1d 6997 req->vnic_num_ring_table_entries =
1b9394e5 6998 cpu_to_le16(ctx->vnic_max_ring_table_entries);
bbf33d1d 6999 req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
1b9394e5 7000 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
bbf33d1d
EP
7001 &req->vnic_pg_size_vnic_lvl,
7002 &req->vnic_page_dir);
1b9394e5
MC
7003 }
7004 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
7005 ctx_pg = &ctx->stat_mem;
bbf33d1d
EP
7006 req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
7007 req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
1b9394e5 7008 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
bbf33d1d
EP
7009 &req->stat_pg_size_stat_lvl,
7010 &req->stat_page_dir);
1b9394e5 7011 }
cf6daed0
MC
7012 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
7013 ctx_pg = &ctx->mrav_mem;
bbf33d1d 7014 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
53579e37
DS
7015 if (ctx->mrav_num_entries_units)
7016 flags |=
7017 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
bbf33d1d 7018 req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
cf6daed0 7019 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
bbf33d1d
EP
7020 &req->mrav_pg_size_mrav_lvl,
7021 &req->mrav_page_dir);
cf6daed0
MC
7022 }
7023 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
7024 ctx_pg = &ctx->tim_mem;
bbf33d1d
EP
7025 req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
7026 req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
cf6daed0 7027 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
bbf33d1d
EP
7028 &req->tim_pg_size_tim_lvl,
7029 &req->tim_page_dir);
cf6daed0 7030 }
bbf33d1d
EP
7031 for (i = 0, num_entries = &req->tqm_sp_num_entries,
7032 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
7033 pg_dir = &req->tqm_sp_page_dir,
1b9394e5 7034 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
a029a2fe
MC
7035 i < BNXT_MAX_TQM_RINGS;
7036 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
1b9394e5
MC
7037 if (!(enables & ena))
7038 continue;
7039
bbf33d1d 7040 req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
1b9394e5
MC
7041 ctx_pg = ctx->tqm_mem[i];
7042 *num_entries = cpu_to_le32(ctx_pg->entries);
7043 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
7044 }
bbf33d1d
EP
7045 req->flags = cpu_to_le32(flags);
7046 return hwrm_req_send(bp, req);
1b9394e5
MC
7047}
7048
98f04cf0 7049static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
08fe9d18 7050 struct bnxt_ctx_pg_info *ctx_pg)
98f04cf0
MC
7051{
7052 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7053
98f04cf0
MC
7054 rmem->page_size = BNXT_PAGE_SIZE;
7055 rmem->pg_arr = ctx_pg->ctx_pg_arr;
7056 rmem->dma_arr = ctx_pg->ctx_dma_arr;
1b9394e5 7057 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
08fe9d18
MC
7058 if (rmem->depth >= 1)
7059 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
98f04cf0
MC
7060 return bnxt_alloc_ring(bp, rmem);
7061}
7062
08fe9d18
MC
7063static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7064 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
e9696ff3 7065 u8 depth, struct bnxt_mem_init *mem_init)
08fe9d18
MC
7066{
7067 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7068 int rc;
7069
7070 if (!mem_size)
bbf211b1 7071 return -EINVAL;
08fe9d18
MC
7072
7073 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7074 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
7075 ctx_pg->nr_pages = 0;
7076 return -EINVAL;
7077 }
7078 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
7079 int nr_tbls, i;
7080
7081 rmem->depth = 2;
7082 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
7083 GFP_KERNEL);
7084 if (!ctx_pg->ctx_pg_tbl)
7085 return -ENOMEM;
7086 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
7087 rmem->nr_pages = nr_tbls;
7088 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7089 if (rc)
7090 return rc;
7091 for (i = 0; i < nr_tbls; i++) {
7092 struct bnxt_ctx_pg_info *pg_tbl;
7093
7094 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
7095 if (!pg_tbl)
7096 return -ENOMEM;
7097 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
7098 rmem = &pg_tbl->ring_mem;
7099 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
7100 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
7101 rmem->depth = 1;
7102 rmem->nr_pages = MAX_CTX_PAGES;
e9696ff3 7103 rmem->mem_init = mem_init;
6ef982de
MC
7104 if (i == (nr_tbls - 1)) {
7105 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
7106
7107 if (rem)
7108 rmem->nr_pages = rem;
7109 }
08fe9d18
MC
7110 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7111 if (rc)
7112 break;
7113 }
7114 } else {
7115 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7116 if (rmem->nr_pages > 1 || depth)
7117 rmem->depth = 1;
e9696ff3 7118 rmem->mem_init = mem_init;
08fe9d18
MC
7119 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7120 }
7121 return rc;
7122}
7123
7124static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7125 struct bnxt_ctx_pg_info *ctx_pg)
7126{
7127 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7128
7129 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
7130 ctx_pg->ctx_pg_tbl) {
7131 int i, nr_tbls = rmem->nr_pages;
7132
7133 for (i = 0; i < nr_tbls; i++) {
7134 struct bnxt_ctx_pg_info *pg_tbl;
7135 struct bnxt_ring_mem_info *rmem2;
7136
7137 pg_tbl = ctx_pg->ctx_pg_tbl[i];
7138 if (!pg_tbl)
7139 continue;
7140 rmem2 = &pg_tbl->ring_mem;
7141 bnxt_free_ring(bp, rmem2);
7142 ctx_pg->ctx_pg_arr[i] = NULL;
7143 kfree(pg_tbl);
7144 ctx_pg->ctx_pg_tbl[i] = NULL;
7145 }
7146 kfree(ctx_pg->ctx_pg_tbl);
7147 ctx_pg->ctx_pg_tbl = NULL;
7148 }
7149 bnxt_free_ring(bp, rmem);
7150 ctx_pg->nr_pages = 0;
7151}
7152
98f04cf0
MC
7153static void bnxt_free_ctx_mem(struct bnxt *bp)
7154{
7155 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7156 int i;
7157
7158 if (!ctx)
7159 return;
7160
7161 if (ctx->tqm_mem[0]) {
ac3158cb 7162 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
08fe9d18 7163 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
98f04cf0
MC
7164 kfree(ctx->tqm_mem[0]);
7165 ctx->tqm_mem[0] = NULL;
7166 }
7167
cf6daed0
MC
7168 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7169 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
08fe9d18
MC
7170 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7171 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7172 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7173 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7174 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
98f04cf0
MC
7175 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7176}
7177
7178static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7179{
7180 struct bnxt_ctx_pg_info *ctx_pg;
7181 struct bnxt_ctx_mem_info *ctx;
e9696ff3 7182 struct bnxt_mem_init *init;
1b9394e5 7183 u32 mem_size, ena, entries;
c7dd7ab4 7184 u32 entries_sp, min;
53579e37 7185 u32 num_mr, num_ah;
cf6daed0
MC
7186 u32 extra_srqs = 0;
7187 u32 extra_qps = 0;
7188 u8 pg_lvl = 1;
98f04cf0
MC
7189 int i, rc;
7190
7191 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7192 if (rc) {
7193 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7194 rc);
7195 return rc;
7196 }
7197 ctx = bp->ctx;
7198 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7199 return 0;
7200
d629522e 7201 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
cf6daed0
MC
7202 pg_lvl = 2;
7203 extra_qps = 65536;
7204 extra_srqs = 8192;
7205 }
7206
98f04cf0 7207 ctx_pg = &ctx->qp_mem;
cf6daed0
MC
7208 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7209 extra_qps;
be6d755f
EP
7210 if (ctx->qp_entry_size) {
7211 mem_size = ctx->qp_entry_size * ctx_pg->entries;
e9696ff3
MC
7212 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7213 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
be6d755f
EP
7214 if (rc)
7215 return rc;
7216 }
98f04cf0
MC
7217
7218 ctx_pg = &ctx->srq_mem;
cf6daed0 7219 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
be6d755f
EP
7220 if (ctx->srq_entry_size) {
7221 mem_size = ctx->srq_entry_size * ctx_pg->entries;
e9696ff3
MC
7222 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
7223 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
be6d755f
EP
7224 if (rc)
7225 return rc;
7226 }
98f04cf0
MC
7227
7228 ctx_pg = &ctx->cq_mem;
cf6daed0 7229 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
be6d755f
EP
7230 if (ctx->cq_entry_size) {
7231 mem_size = ctx->cq_entry_size * ctx_pg->entries;
e9696ff3
MC
7232 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
7233 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
be6d755f
EP
7234 if (rc)
7235 return rc;
7236 }
98f04cf0
MC
7237
7238 ctx_pg = &ctx->vnic_mem;
7239 ctx_pg->entries = ctx->vnic_max_vnic_entries +
7240 ctx->vnic_max_ring_table_entries;
be6d755f
EP
7241 if (ctx->vnic_entry_size) {
7242 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
e9696ff3
MC
7243 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
7244 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
be6d755f
EP
7245 if (rc)
7246 return rc;
7247 }
98f04cf0
MC
7248
7249 ctx_pg = &ctx->stat_mem;
7250 ctx_pg->entries = ctx->stat_max_entries;
be6d755f
EP
7251 if (ctx->stat_entry_size) {
7252 mem_size = ctx->stat_entry_size * ctx_pg->entries;
e9696ff3
MC
7253 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
7254 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
be6d755f
EP
7255 if (rc)
7256 return rc;
7257 }
98f04cf0 7258
cf6daed0
MC
7259 ena = 0;
7260 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7261 goto skip_rdma;
7262
7263 ctx_pg = &ctx->mrav_mem;
53579e37
DS
7264 /* 128K extra is needed to accommodate static AH context
7265 * allocation by f/w.
7266 */
7267 num_mr = 1024 * 256;
7268 num_ah = 1024 * 128;
7269 ctx_pg->entries = num_mr + num_ah;
be6d755f
EP
7270 if (ctx->mrav_entry_size) {
7271 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
e9696ff3
MC
7272 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
7273 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
be6d755f
EP
7274 if (rc)
7275 return rc;
7276 }
cf6daed0 7277 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
53579e37
DS
7278 if (ctx->mrav_num_entries_units)
7279 ctx_pg->entries =
7280 ((num_mr / ctx->mrav_num_entries_units) << 16) |
7281 (num_ah / ctx->mrav_num_entries_units);
cf6daed0
MC
7282
7283 ctx_pg = &ctx->tim_mem;
7284 ctx_pg->entries = ctx->qp_mem.entries;
be6d755f
EP
7285 if (ctx->tim_entry_size) {
7286 mem_size = ctx->tim_entry_size * ctx_pg->entries;
e9696ff3 7287 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
be6d755f
EP
7288 if (rc)
7289 return rc;
7290 }
cf6daed0
MC
7291 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7292
7293skip_rdma:
c7dd7ab4
MC
7294 min = ctx->tqm_min_entries_per_ring;
7295 entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7296 2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7297 entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
c12e1643 7298 entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
98f04cf0 7299 entries = roundup(entries, ctx->tqm_entries_multiple);
c7dd7ab4 7300 entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
ac3158cb 7301 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
98f04cf0 7302 ctx_pg = ctx->tqm_mem[i];
c7dd7ab4 7303 ctx_pg->entries = i ? entries : entries_sp;
be6d755f
EP
7304 if (ctx->tqm_entry_size) {
7305 mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7306 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
e9696ff3 7307 NULL);
be6d755f
EP
7308 if (rc)
7309 return rc;
7310 }
1b9394e5 7311 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
98f04cf0 7312 }
1b9394e5
MC
7313 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7314 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
0b5b561c 7315 if (rc) {
1b9394e5
MC
7316 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7317 rc);
0b5b561c
MC
7318 return rc;
7319 }
7320 ctx->flags |= BNXT_CTX_FLAG_INITED;
98f04cf0
MC
7321 return 0;
7322}
7323
db4723b3 7324int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
be0dd9c4 7325{
bbf33d1d
EP
7326 struct hwrm_func_resource_qcaps_output *resp;
7327 struct hwrm_func_resource_qcaps_input *req;
be0dd9c4
MC
7328 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7329 int rc;
7330
bbf33d1d
EP
7331 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
7332 if (rc)
7333 return rc;
be0dd9c4 7334
bbf33d1d
EP
7335 req->fid = cpu_to_le16(0xffff);
7336 resp = hwrm_req_hold(bp, req);
7337 rc = hwrm_req_send_silent(bp, req);
d4f1420d 7338 if (rc)
be0dd9c4 7339 goto hwrm_func_resc_qcaps_exit;
be0dd9c4 7340
db4723b3
MC
7341 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7342 if (!all)
7343 goto hwrm_func_resc_qcaps_exit;
7344
be0dd9c4
MC
7345 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7346 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7347 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7348 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7349 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7350 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7351 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7352 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7353 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7354 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7355 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7356 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7357 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7358 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7359 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7360 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7361
9c1fabdf
MC
7362 if (bp->flags & BNXT_FLAG_CHIP_P5) {
7363 u16 max_msix = le16_to_cpu(resp->max_msix);
7364
f7588cd8 7365 hw_resc->max_nqs = max_msix;
9c1fabdf
MC
7366 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7367 }
7368
4673d664
MC
7369 if (BNXT_PF(bp)) {
7370 struct bnxt_pf_info *pf = &bp->pf;
7371
7372 pf->vf_resv_strategy =
7373 le16_to_cpu(resp->vf_reservation_strategy);
bf82736d 7374 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
4673d664
MC
7375 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7376 }
be0dd9c4 7377hwrm_func_resc_qcaps_exit:
bbf33d1d 7378 hwrm_req_drop(bp, req);
be0dd9c4
MC
7379 return rc;
7380}
7381
ae5c42f0
MC
7382static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
7383{
bbf33d1d
EP
7384 struct hwrm_port_mac_ptp_qcfg_output *resp;
7385 struct hwrm_port_mac_ptp_qcfg_input *req;
ae5c42f0
MC
7386 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
7387 u8 flags;
7388 int rc;
7389
7390 if (bp->hwrm_spec_code < 0x10801) {
7391 rc = -ENODEV;
7392 goto no_ptp;
7393 }
7394
bbf33d1d 7395 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
ae5c42f0
MC
7396 if (rc)
7397 goto no_ptp;
7398
bbf33d1d
EP
7399 req->port_id = cpu_to_le16(bp->pf.port_id);
7400 resp = hwrm_req_hold(bp, req);
7401 rc = hwrm_req_send(bp, req);
7402 if (rc)
7403 goto exit;
7404
ae5c42f0
MC
7405 flags = resp->flags;
7406 if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
7407 rc = -ENODEV;
bbf33d1d 7408 goto exit;
ae5c42f0
MC
7409 }
7410 if (!ptp) {
7411 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
bbf33d1d
EP
7412 if (!ptp) {
7413 rc = -ENOMEM;
7414 goto exit;
7415 }
ae5c42f0
MC
7416 ptp->bp = bp;
7417 bp->ptp_cfg = ptp;
7418 }
7419 if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
7420 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
7421 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
7422 } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
7423 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
7424 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
7425 } else {
7426 rc = -ENODEV;
bbf33d1d 7427 goto exit;
ae5c42f0 7428 }
a521c8a0 7429 rc = bnxt_ptp_init(bp);
bbf33d1d
EP
7430 if (rc)
7431 netdev_warn(bp->dev, "PTP initialization failed.\n");
7432exit:
7433 hwrm_req_drop(bp, req);
a521c8a0
MC
7434 if (!rc)
7435 return 0;
7436
ae5c42f0 7437no_ptp:
a521c8a0 7438 bnxt_ptp_clear(bp);
ae5c42f0
MC
7439 kfree(ptp);
7440 bp->ptp_cfg = NULL;
7441 return rc;
7442}
7443
be0dd9c4 7444static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
c0c050c5 7445{
bbf33d1d
EP
7446 struct hwrm_func_qcaps_output *resp;
7447 struct hwrm_func_qcaps_input *req;
6a4f2947 7448 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
1da63ddd 7449 u32 flags, flags_ext;
bbf33d1d 7450 int rc;
c0c050c5 7451
bbf33d1d
EP
7452 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
7453 if (rc)
7454 return rc;
c0c050c5 7455
bbf33d1d
EP
7456 req->fid = cpu_to_le16(0xffff);
7457 resp = hwrm_req_hold(bp, req);
7458 rc = hwrm_req_send(bp, req);
c0c050c5
MC
7459 if (rc)
7460 goto hwrm_func_qcaps_exit;
7461
6a4f2947
MC
7462 flags = le32_to_cpu(resp->flags);
7463 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
e4060d30 7464 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
6a4f2947 7465 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
e4060d30 7466 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
55e4398d
VV
7467 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7468 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
0a3f4e4f
VV
7469 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7470 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
6154532f
VV
7471 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7472 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
07f83d72
MC
7473 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7474 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
4037eb71
VV
7475 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7476 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
1da63ddd
EP
7477 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7478 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7479
7480 flags_ext = le32_to_cpu(resp->flags_ext);
7481 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7482 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
caf3eedb
PC
7483 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
7484 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
e4060d30 7485
7cc5a20e 7486 bp->tx_push_thresh = 0;
fed7edd1
MC
7487 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7488 BNXT_FW_MAJ(bp) > 217)
7cc5a20e
MC
7489 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7490
6a4f2947
MC
7491 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7492 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7493 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7494 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7495 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7496 if (!hw_resc->max_hw_ring_grps)
7497 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7498 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7499 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7500 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7501
c0c050c5
MC
7502 if (BNXT_PF(bp)) {
7503 struct bnxt_pf_info *pf = &bp->pf;
7504
7505 pf->fw_fid = le16_to_cpu(resp->fid);
7506 pf->port_id = le16_to_cpu(resp->port_id);
11f15ed3 7507 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
c0c050c5
MC
7508 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7509 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7510 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7511 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7512 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7513 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7514 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7515 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
ba642ab7 7516 bp->flags &= ~BNXT_FLAG_WOL_CAP;
6a4f2947 7517 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
c1ef146a 7518 bp->flags |= BNXT_FLAG_WOL_CAP;
de5bf194 7519 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
ae5c42f0 7520 __bnxt_hwrm_ptp_qcfg(bp);
de5bf194 7521 } else {
a521c8a0 7522 bnxt_ptp_clear(bp);
de5bf194
MC
7523 kfree(bp->ptp_cfg);
7524 bp->ptp_cfg = NULL;
7525 }
c0c050c5 7526 } else {
379a80a1 7527#ifdef CONFIG_BNXT_SRIOV
c0c050c5
MC
7528 struct bnxt_vf_info *vf = &bp->vf;
7529
7530 vf->fw_fid = le16_to_cpu(resp->fid);
7cc5a20e 7531 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
379a80a1 7532#endif
c0c050c5
MC
7533 }
7534
c0c050c5 7535hwrm_func_qcaps_exit:
bbf33d1d 7536 hwrm_req_drop(bp, req);
c0c050c5
MC
7537 return rc;
7538}
7539
804fba4e
MC
7540static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7541
be0dd9c4
MC
7542static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7543{
7544 int rc;
7545
7546 rc = __bnxt_hwrm_func_qcaps(bp);
7547 if (rc)
7548 return rc;
804fba4e
MC
7549 rc = bnxt_hwrm_queue_qportcfg(bp);
7550 if (rc) {
7551 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7552 return rc;
7553 }
be0dd9c4 7554 if (bp->hwrm_spec_code >= 0x10803) {
98f04cf0
MC
7555 rc = bnxt_alloc_ctx_mem(bp);
7556 if (rc)
7557 return rc;
db4723b3 7558 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
be0dd9c4 7559 if (!rc)
97381a18 7560 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
be0dd9c4
MC
7561 }
7562 return 0;
7563}
7564
e969ae5b
MC
7565static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7566{
e969ae5b 7567 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
bbf33d1d 7568 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
e969ae5b 7569 u32 flags;
bbf33d1d 7570 int rc;
e969ae5b
MC
7571
7572 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7573 return 0;
7574
bbf33d1d
EP
7575 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
7576 if (rc)
7577 return rc;
e969ae5b 7578
bbf33d1d
EP
7579 resp = hwrm_req_hold(bp, req);
7580 rc = hwrm_req_send(bp, req);
e969ae5b
MC
7581 if (rc)
7582 goto hwrm_cfa_adv_qcaps_exit;
7583
7584 flags = le32_to_cpu(resp->flags);
7585 if (flags &
41136ab3
MC
7586 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7587 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
e969ae5b
MC
7588
7589hwrm_cfa_adv_qcaps_exit:
bbf33d1d 7590 hwrm_req_drop(bp, req);
e969ae5b
MC
7591 return rc;
7592}
7593
3e9ec2bb
EP
7594static int __bnxt_alloc_fw_health(struct bnxt *bp)
7595{
7596 if (bp->fw_health)
7597 return 0;
7598
7599 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7600 if (!bp->fw_health)
7601 return -ENOMEM;
7602
7603 return 0;
7604}
7605
7606static int bnxt_alloc_fw_health(struct bnxt *bp)
7607{
7608 int rc;
7609
7610 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7611 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7612 return 0;
7613
7614 rc = __bnxt_alloc_fw_health(bp);
7615 if (rc) {
7616 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7617 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7618 return rc;
7619 }
7620
7621 return 0;
7622}
7623
ba02629f
EP
7624static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7625{
7626 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7627 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7628 BNXT_FW_HEALTH_WIN_MAP_OFF);
7629}
7630
80a9641f
PC
7631bool bnxt_is_fw_healthy(struct bnxt *bp)
7632{
7633 if (bp->fw_health && bp->fw_health->status_reliable) {
7634 u32 fw_status;
7635
7636 fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
7637 if (fw_status && !BNXT_FW_IS_HEALTHY(fw_status))
7638 return false;
7639 }
7640
7641 return true;
7642}
7643
43a440c4
MC
7644static void bnxt_inv_fw_health_reg(struct bnxt *bp)
7645{
7646 struct bnxt_fw_health *fw_health = bp->fw_health;
7647 u32 reg_type;
7648
7649 if (!fw_health || !fw_health->status_reliable)
7650 return;
7651
7652 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
7653 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7654 fw_health->status_reliable = false;
7655}
7656
ba02629f
EP
7657static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7658{
7659 void __iomem *hs;
7660 u32 status_loc;
7661 u32 reg_type;
7662 u32 sig;
7663
43a440c4
MC
7664 if (bp->fw_health)
7665 bp->fw_health->status_reliable = false;
7666
ba02629f
EP
7667 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7668 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7669
7670 sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
7671 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
d1cbd165
MC
7672 if (!bp->chip_num) {
7673 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
7674 bp->chip_num = readl(bp->bar0 +
7675 BNXT_FW_HEALTH_WIN_BASE +
7676 BNXT_GRC_REG_CHIP_NUM);
7677 }
43a440c4 7678 if (!BNXT_CHIP_P5(bp))
d1cbd165 7679 return;
43a440c4 7680
d1cbd165
MC
7681 status_loc = BNXT_GRC_REG_STATUS_P5 |
7682 BNXT_FW_HEALTH_REG_TYPE_BAR0;
7683 } else {
7684 status_loc = readl(hs + offsetof(struct hcomm_status,
7685 fw_status_loc));
ba02629f
EP
7686 }
7687
7688 if (__bnxt_alloc_fw_health(bp)) {
7689 netdev_warn(bp->dev, "no memory for firmware status checks\n");
7690 return;
7691 }
7692
ba02629f
EP
7693 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7694 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
7695 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
7696 __bnxt_map_fw_health_reg(bp, status_loc);
7697 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7698 BNXT_FW_HEALTH_WIN_OFF(status_loc);
7699 }
7700
7701 bp->fw_health->status_reliable = true;
7702}
7703
9ffbd677
MC
7704static int bnxt_map_fw_health_regs(struct bnxt *bp)
7705{
7706 struct bnxt_fw_health *fw_health = bp->fw_health;
7707 u32 reg_base = 0xffffffff;
7708 int i;
7709
43a440c4 7710 bp->fw_health->status_reliable = false;
9ffbd677
MC
7711 /* Only pre-map the monitoring GRC registers using window 3 */
7712 for (i = 0; i < 4; i++) {
7713 u32 reg = fw_health->regs[i];
7714
7715 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7716 continue;
7717 if (reg_base == 0xffffffff)
7718 reg_base = reg & BNXT_GRC_BASE_MASK;
7719 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7720 return -ERANGE;
ba02629f 7721 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
9ffbd677 7722 }
43a440c4 7723 bp->fw_health->status_reliable = true;
9ffbd677
MC
7724 if (reg_base == 0xffffffff)
7725 return 0;
7726
ba02629f 7727 __bnxt_map_fw_health_reg(bp, reg_base);
9ffbd677
MC
7728 return 0;
7729}
7730
07f83d72
MC
7731static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7732{
07f83d72 7733 struct bnxt_fw_health *fw_health = bp->fw_health;
bbf33d1d
EP
7734 struct hwrm_error_recovery_qcfg_output *resp;
7735 struct hwrm_error_recovery_qcfg_input *req;
07f83d72
MC
7736 int rc, i;
7737
7738 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7739 return 0;
7740
bbf33d1d
EP
7741 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
7742 if (rc)
7743 return rc;
7744
7745 resp = hwrm_req_hold(bp, req);
7746 rc = hwrm_req_send(bp, req);
07f83d72
MC
7747 if (rc)
7748 goto err_recovery_out;
07f83d72
MC
7749 fw_health->flags = le32_to_cpu(resp->flags);
7750 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7751 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7752 rc = -EINVAL;
7753 goto err_recovery_out;
7754 }
7755 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7756 fw_health->master_func_wait_dsecs =
7757 le32_to_cpu(resp->master_func_wait_period);
7758 fw_health->normal_func_wait_dsecs =
7759 le32_to_cpu(resp->normal_func_wait_period);
7760 fw_health->post_reset_wait_dsecs =
7761 le32_to_cpu(resp->master_func_wait_period_after_reset);
7762 fw_health->post_reset_max_wait_dsecs =
7763 le32_to_cpu(resp->max_bailout_time_after_reset);
7764 fw_health->regs[BNXT_FW_HEALTH_REG] =
7765 le32_to_cpu(resp->fw_health_status_reg);
7766 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7767 le32_to_cpu(resp->fw_heartbeat_reg);
7768 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7769 le32_to_cpu(resp->fw_reset_cnt_reg);
7770 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7771 le32_to_cpu(resp->reset_inprogress_reg);
7772 fw_health->fw_reset_inprog_reg_mask =
7773 le32_to_cpu(resp->reset_inprogress_reg_mask);
7774 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7775 if (fw_health->fw_reset_seq_cnt >= 16) {
7776 rc = -EINVAL;
7777 goto err_recovery_out;
7778 }
7779 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7780 fw_health->fw_reset_seq_regs[i] =
7781 le32_to_cpu(resp->reset_reg[i]);
7782 fw_health->fw_reset_seq_vals[i] =
7783 le32_to_cpu(resp->reset_reg_val[i]);
7784 fw_health->fw_reset_seq_delay_msec[i] =
7785 resp->delay_after_reset[i];
7786 }
7787err_recovery_out:
bbf33d1d 7788 hwrm_req_drop(bp, req);
9ffbd677
MC
7789 if (!rc)
7790 rc = bnxt_map_fw_health_regs(bp);
07f83d72
MC
7791 if (rc)
7792 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7793 return rc;
7794}
7795
c0c050c5
MC
7796static int bnxt_hwrm_func_reset(struct bnxt *bp)
7797{
bbf33d1d
EP
7798 struct hwrm_func_reset_input *req;
7799 int rc;
c0c050c5 7800
bbf33d1d
EP
7801 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
7802 if (rc)
7803 return rc;
c0c050c5 7804
bbf33d1d
EP
7805 req->enables = 0;
7806 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
7807 return hwrm_req_send(bp, req);
c0c050c5
MC
7808}
7809
4933f675
VV
7810static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
7811{
7812 struct hwrm_nvm_get_dev_info_output nvm_info;
7813
7814 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
7815 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
7816 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
7817 nvm_info.nvm_cfg_ver_upd);
7818}
7819
c0c050c5
MC
7820static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7821{
bbf33d1d
EP
7822 struct hwrm_queue_qportcfg_output *resp;
7823 struct hwrm_queue_qportcfg_input *req;
aabfc016
MC
7824 u8 i, j, *qptr;
7825 bool no_rdma;
bbf33d1d 7826 int rc = 0;
c0c050c5 7827
bbf33d1d
EP
7828 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
7829 if (rc)
7830 return rc;
c0c050c5 7831
bbf33d1d
EP
7832 resp = hwrm_req_hold(bp, req);
7833 rc = hwrm_req_send(bp, req);
c0c050c5
MC
7834 if (rc)
7835 goto qportcfg_exit;
7836
7837 if (!resp->max_configurable_queues) {
7838 rc = -EINVAL;
7839 goto qportcfg_exit;
7840 }
7841 bp->max_tc = resp->max_configurable_queues;
87c374de 7842 bp->max_lltc = resp->max_configurable_lossless_queues;
c0c050c5
MC
7843 if (bp->max_tc > BNXT_MAX_QUEUE)
7844 bp->max_tc = BNXT_MAX_QUEUE;
7845
aabfc016
MC
7846 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7847 qptr = &resp->queue_id0;
7848 for (i = 0, j = 0; i < bp->max_tc; i++) {
98f04cf0
MC
7849 bp->q_info[j].queue_id = *qptr;
7850 bp->q_ids[i] = *qptr++;
aabfc016
MC
7851 bp->q_info[j].queue_profile = *qptr++;
7852 bp->tc_to_qidx[j] = j;
7853 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7854 (no_rdma && BNXT_PF(bp)))
7855 j++;
7856 }
98f04cf0 7857 bp->max_q = bp->max_tc;
aabfc016
MC
7858 bp->max_tc = max_t(u8, j, 1);
7859
441cabbb
MC
7860 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7861 bp->max_tc = 1;
7862
87c374de
MC
7863 if (bp->max_lltc > bp->max_tc)
7864 bp->max_lltc = bp->max_tc;
7865
c0c050c5 7866qportcfg_exit:
bbf33d1d 7867 hwrm_req_drop(bp, req);
c0c050c5
MC
7868 return rc;
7869}
7870
7b370ad7 7871static int bnxt_hwrm_poll(struct bnxt *bp)
c0c050c5 7872{
bbf33d1d 7873 struct hwrm_ver_get_input *req;
ba642ab7 7874 int rc;
c0c050c5 7875
bbf33d1d
EP
7876 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
7877 if (rc)
7878 return rc;
ba642ab7 7879
bbf33d1d
EP
7880 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
7881 req->hwrm_intf_min = HWRM_VERSION_MINOR;
7882 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
7883
7884 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
7885 rc = hwrm_req_send(bp, req);
ba642ab7
MC
7886 return rc;
7887}
7888
7889static int bnxt_hwrm_ver_get(struct bnxt *bp)
7890{
bbf33d1d
EP
7891 struct hwrm_ver_get_output *resp;
7892 struct hwrm_ver_get_input *req;
d0ad2ea2 7893 u16 fw_maj, fw_min, fw_bld, fw_rsv;
b7a444f0 7894 u32 dev_caps_cfg, hwrm_ver;
d0ad2ea2 7895 int rc, len;
ba642ab7 7896
bbf33d1d
EP
7897 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
7898 if (rc)
7899 return rc;
7900
7901 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
ba642ab7 7902 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
bbf33d1d
EP
7903 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
7904 req->hwrm_intf_min = HWRM_VERSION_MINOR;
7905 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
7b370ad7 7906
bbf33d1d
EP
7907 resp = hwrm_req_hold(bp, req);
7908 rc = hwrm_req_send(bp, req);
c0c050c5
MC
7909 if (rc)
7910 goto hwrm_ver_get_exit;
7911
7912 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7913
894aa69a
MC
7914 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7915 resp->hwrm_intf_min_8b << 8 |
7916 resp->hwrm_intf_upd_8b;
7917 if (resp->hwrm_intf_maj_8b < 1) {
c193554e 7918 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
894aa69a
MC
7919 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7920 resp->hwrm_intf_upd_8b);
c193554e 7921 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
c0c050c5 7922 }
b7a444f0
VV
7923
7924 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
7925 HWRM_VERSION_UPDATE;
7926
7927 if (bp->hwrm_spec_code > hwrm_ver)
7928 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7929 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
7930 HWRM_VERSION_UPDATE);
7931 else
7932 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7933 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7934 resp->hwrm_intf_upd_8b);
7935
d0ad2ea2
MC
7936 fw_maj = le16_to_cpu(resp->hwrm_fw_major);
7937 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
7938 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
7939 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
7940 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
7941 len = FW_VER_STR_LEN;
7942 } else {
7943 fw_maj = resp->hwrm_fw_maj_8b;
7944 fw_min = resp->hwrm_fw_min_8b;
7945 fw_bld = resp->hwrm_fw_bld_8b;
7946 fw_rsv = resp->hwrm_fw_rsvd_8b;
7947 len = BC_HWRM_STR_LEN;
7948 }
7949 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
7950 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
7951 fw_rsv);
c0c050c5 7952
691aa620
VV
7953 if (strlen(resp->active_pkg_name)) {
7954 int fw_ver_len = strlen(bp->fw_ver_str);
7955
7956 snprintf(bp->fw_ver_str + fw_ver_len,
7957 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
7958 resp->active_pkg_name);
7959 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
7960 }
7961
ff4fe81d
MC
7962 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
7963 if (!bp->hwrm_cmd_timeout)
7964 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
7965
1dfddc41 7966 if (resp->hwrm_intf_maj_8b >= 1) {
e6ef2699 7967 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
1dfddc41
MC
7968 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
7969 }
7970 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
7971 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
e6ef2699 7972
659c805c 7973 bp->chip_num = le16_to_cpu(resp->chip_num);
5313845f 7974 bp->chip_rev = resp->chip_rev;
3e8060fa
PS
7975 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
7976 !resp->chip_metal)
7977 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
659c805c 7978
e605db80
DK
7979 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
7980 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
7981 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
97381a18 7982 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
e605db80 7983
760b6d33
VD
7984 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
7985 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
7986
abd43a13
VD
7987 if (dev_caps_cfg &
7988 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
7989 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
7990
2a516444
MC
7991 if (dev_caps_cfg &
7992 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
7993 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
7994
e969ae5b
MC
7995 if (dev_caps_cfg &
7996 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
7997 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
7998
c0c050c5 7999hwrm_ver_get_exit:
bbf33d1d 8000 hwrm_req_drop(bp, req);
c0c050c5
MC
8001 return rc;
8002}
8003
5ac67d8b
RS
8004int bnxt_hwrm_fw_set_time(struct bnxt *bp)
8005{
bbf33d1d 8006 struct hwrm_fw_set_time_input *req;
7dfaa7bc
AB
8007 struct tm tm;
8008 time64_t now = ktime_get_real_seconds();
bbf33d1d 8009 int rc;
5ac67d8b 8010
ca2c39e2
MC
8011 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
8012 bp->hwrm_spec_code < 0x10400)
5ac67d8b
RS
8013 return -EOPNOTSUPP;
8014
7dfaa7bc 8015 time64_to_tm(now, 0, &tm);
bbf33d1d
EP
8016 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
8017 if (rc)
8018 return rc;
8019
8020 req->year = cpu_to_le16(1900 + tm.tm_year);
8021 req->month = 1 + tm.tm_mon;
8022 req->day = tm.tm_mday;
8023 req->hour = tm.tm_hour;
8024 req->minute = tm.tm_min;
8025 req->second = tm.tm_sec;
8026 return hwrm_req_send(bp, req);
5ac67d8b
RS
8027}
8028
fea6b333
MC
8029static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
8030{
8031 u64 sw_tmp;
8032
fa97f303 8033 hw &= mask;
fea6b333
MC
8034 sw_tmp = (*sw & ~mask) | hw;
8035 if (hw < (*sw & mask))
8036 sw_tmp += mask + 1;
8037 WRITE_ONCE(*sw, sw_tmp);
8038}
8039
8040static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
8041 int count, bool ignore_zero)
8042{
8043 int i;
8044
8045 for (i = 0; i < count; i++) {
8046 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
8047
8048 if (ignore_zero && !hw)
8049 continue;
8050
8051 if (masks[i] == -1ULL)
8052 sw_stats[i] = hw;
8053 else
8054 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
8055 }
8056}
8057
8058static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
8059{
8060 if (!stats->hw_stats)
8061 return;
8062
8063 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8064 stats->hw_masks, stats->len / 8, false);
8065}
8066
8067static void bnxt_accumulate_all_stats(struct bnxt *bp)
8068{
8069 struct bnxt_stats_mem *ring0_stats;
8070 bool ignore_zero = false;
8071 int i;
8072
8073 /* Chip bug. Counter intermittently becomes 0. */
8074 if (bp->flags & BNXT_FLAG_CHIP_P5)
8075 ignore_zero = true;
8076
8077 for (i = 0; i < bp->cp_nr_rings; i++) {
8078 struct bnxt_napi *bnapi = bp->bnapi[i];
8079 struct bnxt_cp_ring_info *cpr;
8080 struct bnxt_stats_mem *stats;
8081
8082 cpr = &bnapi->cp_ring;
8083 stats = &cpr->stats;
8084 if (!i)
8085 ring0_stats = stats;
8086 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8087 ring0_stats->hw_masks,
8088 ring0_stats->len / 8, ignore_zero);
8089 }
8090 if (bp->flags & BNXT_FLAG_PORT_STATS) {
8091 struct bnxt_stats_mem *stats = &bp->port_stats;
8092 __le64 *hw_stats = stats->hw_stats;
8093 u64 *sw_stats = stats->sw_stats;
8094 u64 *masks = stats->hw_masks;
8095 int cnt;
8096
8097 cnt = sizeof(struct rx_port_stats) / 8;
8098 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8099
8100 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8101 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8102 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8103 cnt = sizeof(struct tx_port_stats) / 8;
8104 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8105 }
8106 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
8107 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
8108 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
8109 }
8110}
8111
531d1d26 8112static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
3bdf56c4 8113{
bbf33d1d 8114 struct hwrm_port_qstats_input *req;
3bdf56c4 8115 struct bnxt_pf_info *pf = &bp->pf;
bbf33d1d 8116 int rc;
3bdf56c4
MC
8117
8118 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
8119 return 0;
8120
531d1d26
MC
8121 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8122 return -EOPNOTSUPP;
8123
bbf33d1d
EP
8124 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
8125 if (rc)
8126 return rc;
8127
8128 req->flags = flags;
8129 req->port_id = cpu_to_le16(pf->port_id);
8130 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
177a6cde 8131 BNXT_TX_PORT_STATS_BYTE_OFFSET);
bbf33d1d
EP
8132 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
8133 return hwrm_req_send(bp, req);
3bdf56c4
MC
8134}
8135
531d1d26 8136static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
00db3cba 8137{
bbf33d1d
EP
8138 struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
8139 struct hwrm_queue_pri2cos_qcfg_input *req_qc;
8140 struct hwrm_port_qstats_ext_output *resp_qs;
8141 struct hwrm_port_qstats_ext_input *req_qs;
00db3cba 8142 struct bnxt_pf_info *pf = &bp->pf;
ad361adf 8143 u32 tx_stat_size;
36e53349 8144 int rc;
00db3cba
VV
8145
8146 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
8147 return 0;
8148
531d1d26
MC
8149 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8150 return -EOPNOTSUPP;
8151
bbf33d1d
EP
8152 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
8153 if (rc)
8154 return rc;
8155
8156 req_qs->flags = flags;
8157 req_qs->port_id = cpu_to_le16(pf->port_id);
8158 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
8159 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
177a6cde
MC
8160 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
8161 sizeof(struct tx_port_stats_ext) : 0;
bbf33d1d
EP
8162 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
8163 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
8164 resp_qs = hwrm_req_hold(bp, req_qs);
8165 rc = hwrm_req_send(bp, req_qs);
36e53349 8166 if (!rc) {
bbf33d1d
EP
8167 bp->fw_rx_stats_ext_size =
8168 le16_to_cpu(resp_qs->rx_stat_size) / 8;
ad361adf 8169 bp->fw_tx_stats_ext_size = tx_stat_size ?
bbf33d1d 8170 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
36e53349
MC
8171 } else {
8172 bp->fw_rx_stats_ext_size = 0;
8173 bp->fw_tx_stats_ext_size = 0;
8174 }
bbf33d1d
EP
8175 hwrm_req_drop(bp, req_qs);
8176
531d1d26 8177 if (flags)
bbf33d1d 8178 return rc;
531d1d26 8179
e37fed79
MC
8180 if (bp->fw_tx_stats_ext_size <=
8181 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
e37fed79
MC
8182 bp->pri2cos_valid = 0;
8183 return rc;
8184 }
8185
bbf33d1d
EP
8186 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
8187 if (rc)
8188 return rc;
e37fed79 8189
bbf33d1d
EP
8190 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
8191
8192 resp_qc = hwrm_req_hold(bp, req_qc);
8193 rc = hwrm_req_send(bp, req_qc);
e37fed79 8194 if (!rc) {
e37fed79
MC
8195 u8 *pri2cos;
8196 int i, j;
8197
bbf33d1d 8198 pri2cos = &resp_qc->pri0_cos_queue_id;
e37fed79
MC
8199 for (i = 0; i < 8; i++) {
8200 u8 queue_id = pri2cos[i];
a24ec322 8201 u8 queue_idx;
e37fed79 8202
a24ec322
MC
8203 /* Per port queue IDs start from 0, 10, 20, etc */
8204 queue_idx = queue_id % 10;
8205 if (queue_idx > BNXT_MAX_QUEUE) {
8206 bp->pri2cos_valid = false;
bbf33d1d
EP
8207 hwrm_req_drop(bp, req_qc);
8208 return rc;
a24ec322 8209 }
e37fed79
MC
8210 for (j = 0; j < bp->max_q; j++) {
8211 if (bp->q_ids[j] == queue_id)
a24ec322 8212 bp->pri2cos_idx[i] = queue_idx;
e37fed79
MC
8213 }
8214 }
bbf33d1d 8215 bp->pri2cos_valid = true;
e37fed79 8216 }
bbf33d1d
EP
8217 hwrm_req_drop(bp, req_qc);
8218
36e53349 8219 return rc;
00db3cba
VV
8220}
8221
c0c050c5
MC
8222static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8223{
442a35a5 8224 if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID)
c0c050c5
MC
8225 bnxt_hwrm_tunnel_dst_port_free(
8226 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
442a35a5 8227 if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID)
c0c050c5
MC
8228 bnxt_hwrm_tunnel_dst_port_free(
8229 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
c0c050c5
MC
8230}
8231
8232static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8233{
8234 int rc, i;
8235 u32 tpa_flags = 0;
8236
8237 if (set_tpa)
8238 tpa_flags = bp->flags & BNXT_FLAG_TPA;
b340dc68 8239 else if (BNXT_NO_FW_ACCESS(bp))
b4fff207 8240 return 0;
c0c050c5
MC
8241 for (i = 0; i < bp->nr_vnics; i++) {
8242 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8243 if (rc) {
8244 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
23e12c89 8245 i, rc);
c0c050c5
MC
8246 return rc;
8247 }
8248 }
8249 return 0;
8250}
8251
8252static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8253{
8254 int i;
8255
8256 for (i = 0; i < bp->nr_vnics; i++)
8257 bnxt_hwrm_vnic_set_rss(bp, i, false);
8258}
8259
a46ecb11 8260static void bnxt_clear_vnic(struct bnxt *bp)
c0c050c5 8261{
a46ecb11
MC
8262 if (!bp->vnic_info)
8263 return;
8264
8265 bnxt_hwrm_clear_vnic_filter(bp);
8266 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
c0c050c5
MC
8267 /* clear all RSS setting before free vnic ctx */
8268 bnxt_hwrm_clear_vnic_rss(bp);
8269 bnxt_hwrm_vnic_ctx_free(bp);
c0c050c5 8270 }
a46ecb11
MC
8271 /* before free the vnic, undo the vnic tpa settings */
8272 if (bp->flags & BNXT_FLAG_TPA)
8273 bnxt_set_tpa(bp, false);
8274 bnxt_hwrm_vnic_free(bp);
8275 if (bp->flags & BNXT_FLAG_CHIP_P5)
8276 bnxt_hwrm_vnic_ctx_free(bp);
8277}
8278
8279static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8280 bool irq_re_init)
8281{
8282 bnxt_clear_vnic(bp);
c0c050c5
MC
8283 bnxt_hwrm_ring_free(bp, close_path);
8284 bnxt_hwrm_ring_grp_free(bp);
8285 if (irq_re_init) {
8286 bnxt_hwrm_stat_ctx_free(bp);
8287 bnxt_hwrm_free_tunnel_ports(bp);
8288 }
8289}
8290
39d8ba2e
MC
8291static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8292{
bbf33d1d
EP
8293 struct hwrm_func_cfg_input *req;
8294 u8 evb_mode;
8295 int rc;
39d8ba2e 8296
39d8ba2e 8297 if (br_mode == BRIDGE_MODE_VEB)
bbf33d1d 8298 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
39d8ba2e 8299 else if (br_mode == BRIDGE_MODE_VEPA)
bbf33d1d 8300 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
39d8ba2e
MC
8301 else
8302 return -EINVAL;
bbf33d1d
EP
8303
8304 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8305 if (rc)
8306 return rc;
8307
8308 req->fid = cpu_to_le16(0xffff);
8309 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
8310 req->evb_mode = evb_mode;
8311 return hwrm_req_send(bp, req);
39d8ba2e
MC
8312}
8313
c3480a60
MC
8314static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8315{
bbf33d1d
EP
8316 struct hwrm_func_cfg_input *req;
8317 int rc;
c3480a60
MC
8318
8319 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8320 return 0;
8321
bbf33d1d
EP
8322 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8323 if (rc)
8324 return rc;
8325
8326 req->fid = cpu_to_le16(0xffff);
8327 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
8328 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
c3480a60 8329 if (size == 128)
bbf33d1d 8330 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
c3480a60 8331
bbf33d1d 8332 return hwrm_req_send(bp, req);
c3480a60
MC
8333}
8334
7b3af4f7 8335static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
c0c050c5 8336{
ae10ae74 8337 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
c0c050c5
MC
8338 int rc;
8339
ae10ae74
MC
8340 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8341 goto skip_rss_ctx;
8342
c0c050c5 8343 /* allocate context for vnic */
94ce9caa 8344 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
c0c050c5
MC
8345 if (rc) {
8346 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8347 vnic_id, rc);
8348 goto vnic_setup_err;
8349 }
8350 bp->rsscos_nr_ctxs++;
8351
94ce9caa
PS
8352 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8353 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8354 if (rc) {
8355 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8356 vnic_id, rc);
8357 goto vnic_setup_err;
8358 }
8359 bp->rsscos_nr_ctxs++;
8360 }
8361
ae10ae74 8362skip_rss_ctx:
c0c050c5
MC
8363 /* configure default vnic, ring grp */
8364 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8365 if (rc) {
8366 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8367 vnic_id, rc);
8368 goto vnic_setup_err;
8369 }
8370
8371 /* Enable RSS hashing on vnic */
8372 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8373 if (rc) {
8374 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8375 vnic_id, rc);
8376 goto vnic_setup_err;
8377 }
8378
8379 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8380 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8381 if (rc) {
8382 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8383 vnic_id, rc);
8384 }
8385 }
8386
8387vnic_setup_err:
8388 return rc;
8389}
8390
7b3af4f7
MC
8391static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8392{
8393 int rc, i, nr_ctxs;
8394
f9f6a3fb 8395 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
7b3af4f7
MC
8396 for (i = 0; i < nr_ctxs; i++) {
8397 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8398 if (rc) {
8399 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8400 vnic_id, i, rc);
8401 break;
8402 }
8403 bp->rsscos_nr_ctxs++;
8404 }
8405 if (i < nr_ctxs)
8406 return -ENOMEM;
8407
8408 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8409 if (rc) {
8410 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8411 vnic_id, rc);
8412 return rc;
8413 }
8414 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8415 if (rc) {
8416 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8417 vnic_id, rc);
8418 return rc;
8419 }
8420 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8421 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8422 if (rc) {
8423 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8424 vnic_id, rc);
8425 }
8426 }
8427 return rc;
8428}
8429
8430static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8431{
8432 if (bp->flags & BNXT_FLAG_CHIP_P5)
8433 return __bnxt_setup_vnic_p5(bp, vnic_id);
8434 else
8435 return __bnxt_setup_vnic(bp, vnic_id);
8436}
8437
c0c050c5
MC
8438static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8439{
8440#ifdef CONFIG_RFS_ACCEL
8441 int i, rc = 0;
8442
9b3d15e6
MC
8443 if (bp->flags & BNXT_FLAG_CHIP_P5)
8444 return 0;
8445
c0c050c5 8446 for (i = 0; i < bp->rx_nr_rings; i++) {
ae10ae74 8447 struct bnxt_vnic_info *vnic;
c0c050c5
MC
8448 u16 vnic_id = i + 1;
8449 u16 ring_id = i;
8450
8451 if (vnic_id >= bp->nr_vnics)
8452 break;
8453
ae10ae74
MC
8454 vnic = &bp->vnic_info[vnic_id];
8455 vnic->flags |= BNXT_VNIC_RFS_FLAG;
8456 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8457 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
b81a90d3 8458 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
c0c050c5
MC
8459 if (rc) {
8460 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8461 vnic_id, rc);
8462 break;
8463 }
8464 rc = bnxt_setup_vnic(bp, vnic_id);
8465 if (rc)
8466 break;
8467 }
8468 return rc;
8469#else
8470 return 0;
8471#endif
8472}
8473
dd85fc0a 8474/* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
17c71ac3
MC
8475static bool bnxt_promisc_ok(struct bnxt *bp)
8476{
8477#ifdef CONFIG_BNXT_SRIOV
dd85fc0a 8478 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
17c71ac3
MC
8479 return false;
8480#endif
8481 return true;
8482}
8483
dc52c6c7
PS
8484static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8485{
8486 unsigned int rc = 0;
8487
8488 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8489 if (rc) {
8490 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8491 rc);
8492 return rc;
8493 }
8494
8495 rc = bnxt_hwrm_vnic_cfg(bp, 1);
8496 if (rc) {
8497 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8498 rc);
8499 return rc;
8500 }
8501 return rc;
8502}
8503
b664f008 8504static int bnxt_cfg_rx_mode(struct bnxt *);
7d2837dd 8505static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
b664f008 8506
c0c050c5
MC
8507static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8508{
7d2837dd 8509 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
c0c050c5 8510 int rc = 0;
76595193 8511 unsigned int rx_nr_rings = bp->rx_nr_rings;
c0c050c5
MC
8512
8513 if (irq_re_init) {
8514 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8515 if (rc) {
8516 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8517 rc);
8518 goto err_out;
8519 }
8520 }
8521
8522 rc = bnxt_hwrm_ring_alloc(bp);
8523 if (rc) {
8524 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8525 goto err_out;
8526 }
8527
8528 rc = bnxt_hwrm_ring_grp_alloc(bp);
8529 if (rc) {
8530 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8531 goto err_out;
8532 }
8533
76595193
PS
8534 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8535 rx_nr_rings--;
8536
c0c050c5 8537 /* default vnic 0 */
76595193 8538 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
c0c050c5
MC
8539 if (rc) {
8540 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8541 goto err_out;
8542 }
8543
8544 rc = bnxt_setup_vnic(bp, 0);
8545 if (rc)
8546 goto err_out;
8547
8548 if (bp->flags & BNXT_FLAG_RFS) {
8549 rc = bnxt_alloc_rfs_vnics(bp);
8550 if (rc)
8551 goto err_out;
8552 }
8553
8554 if (bp->flags & BNXT_FLAG_TPA) {
8555 rc = bnxt_set_tpa(bp, true);
8556 if (rc)
8557 goto err_out;
8558 }
8559
8560 if (BNXT_VF(bp))
8561 bnxt_update_vf_mac(bp);
8562
8563 /* Filter for default vnic 0 */
8564 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8565 if (rc) {
8566 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8567 goto err_out;
8568 }
7d2837dd 8569 vnic->uc_filter_count = 1;
c0c050c5 8570
30e33848
MC
8571 vnic->rx_mask = 0;
8572 if (bp->dev->flags & IFF_BROADCAST)
8573 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
c0c050c5 8574
dd85fc0a 8575 if (bp->dev->flags & IFF_PROMISC)
7d2837dd
MC
8576 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8577
8578 if (bp->dev->flags & IFF_ALLMULTI) {
8579 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8580 vnic->mc_list_count = 0;
8581 } else {
8582 u32 mask = 0;
8583
8584 bnxt_mc_list_updated(bp, &mask);
8585 vnic->rx_mask |= mask;
8586 }
c0c050c5 8587
b664f008
MC
8588 rc = bnxt_cfg_rx_mode(bp);
8589 if (rc)
c0c050c5 8590 goto err_out;
c0c050c5
MC
8591
8592 rc = bnxt_hwrm_set_coal(bp);
8593 if (rc)
8594 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
dc52c6c7
PS
8595 rc);
8596
8597 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8598 rc = bnxt_setup_nitroa0_vnic(bp);
8599 if (rc)
8600 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8601 rc);
8602 }
c0c050c5 8603
cf6645f8
MC
8604 if (BNXT_VF(bp)) {
8605 bnxt_hwrm_func_qcfg(bp);
8606 netdev_update_features(bp->dev);
8607 }
8608
c0c050c5
MC
8609 return 0;
8610
8611err_out:
8612 bnxt_hwrm_resource_free(bp, 0, true);
8613
8614 return rc;
8615}
8616
8617static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8618{
8619 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8620 return 0;
8621}
8622
8623static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8624{
2247925f 8625 bnxt_init_cp_rings(bp);
c0c050c5
MC
8626 bnxt_init_rx_rings(bp);
8627 bnxt_init_tx_rings(bp);
8628 bnxt_init_ring_grps(bp, irq_re_init);
8629 bnxt_init_vnics(bp);
8630
8631 return bnxt_init_chip(bp, irq_re_init);
8632}
8633
c0c050c5
MC
8634static int bnxt_set_real_num_queues(struct bnxt *bp)
8635{
8636 int rc;
8637 struct net_device *dev = bp->dev;
8638
5f449249
MC
8639 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8640 bp->tx_nr_rings_xdp);
c0c050c5
MC
8641 if (rc)
8642 return rc;
8643
8644 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8645 if (rc)
8646 return rc;
8647
8648#ifdef CONFIG_RFS_ACCEL
45019a18 8649 if (bp->flags & BNXT_FLAG_RFS)
c0c050c5 8650 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
c0c050c5
MC
8651#endif
8652
8653 return rc;
8654}
8655
6e6c5a57
MC
8656static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8657 bool shared)
8658{
8659 int _rx = *rx, _tx = *tx;
8660
8661 if (shared) {
8662 *rx = min_t(int, _rx, max);
8663 *tx = min_t(int, _tx, max);
8664 } else {
8665 if (max < 2)
8666 return -ENOMEM;
8667
8668 while (_rx + _tx > max) {
8669 if (_rx > _tx && _rx > 1)
8670 _rx--;
8671 else if (_tx > 1)
8672 _tx--;
8673 }
8674 *rx = _rx;
8675 *tx = _tx;
8676 }
8677 return 0;
8678}
8679
7809592d
MC
8680static void bnxt_setup_msix(struct bnxt *bp)
8681{
8682 const int len = sizeof(bp->irq_tbl[0].name);
8683 struct net_device *dev = bp->dev;
8684 int tcs, i;
8685
8686 tcs = netdev_get_num_tc(dev);
18e4960c 8687 if (tcs) {
d1e7925e 8688 int i, off, count;
7809592d 8689
d1e7925e
MC
8690 for (i = 0; i < tcs; i++) {
8691 count = bp->tx_nr_rings_per_tc;
8692 off = i * count;
8693 netdev_set_tc_queue(dev, i, count, off);
7809592d
MC
8694 }
8695 }
8696
8697 for (i = 0; i < bp->cp_nr_rings; i++) {
e5811b8c 8698 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7809592d
MC
8699 char *attr;
8700
8701 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8702 attr = "TxRx";
8703 else if (i < bp->rx_nr_rings)
8704 attr = "rx";
8705 else
8706 attr = "tx";
8707
e5811b8c
MC
8708 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8709 attr, i);
8710 bp->irq_tbl[map_idx].handler = bnxt_msix;
7809592d
MC
8711 }
8712}
8713
8714static void bnxt_setup_inta(struct bnxt *bp)
8715{
8716 const int len = sizeof(bp->irq_tbl[0].name);
8717
8718 if (netdev_get_num_tc(bp->dev))
8719 netdev_reset_tc(bp->dev);
8720
8721 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8722 0);
8723 bp->irq_tbl[0].handler = bnxt_inta;
8724}
8725
20d7d1c5
EP
8726static int bnxt_init_int_mode(struct bnxt *bp);
8727
7809592d
MC
8728static int bnxt_setup_int_mode(struct bnxt *bp)
8729{
8730 int rc;
8731
20d7d1c5
EP
8732 if (!bp->irq_tbl) {
8733 rc = bnxt_init_int_mode(bp);
8734 if (rc || !bp->irq_tbl)
8735 return rc ?: -ENODEV;
8736 }
8737
7809592d
MC
8738 if (bp->flags & BNXT_FLAG_USING_MSIX)
8739 bnxt_setup_msix(bp);
8740 else
8741 bnxt_setup_inta(bp);
8742
8743 rc = bnxt_set_real_num_queues(bp);
8744 return rc;
8745}
8746
b7429954 8747#ifdef CONFIG_RFS_ACCEL
8079e8f1
MC
8748static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
8749{
6a4f2947 8750 return bp->hw_resc.max_rsscos_ctxs;
8079e8f1
MC
8751}
8752
8753static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
8754{
6a4f2947 8755 return bp->hw_resc.max_vnics;
8079e8f1 8756}
b7429954 8757#endif
8079e8f1 8758
e4060d30
MC
8759unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
8760{
6a4f2947 8761 return bp->hw_resc.max_stat_ctxs;
e4060d30
MC
8762}
8763
8764unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
8765{
6a4f2947 8766 return bp->hw_resc.max_cp_rings;
e4060d30
MC
8767}
8768
e916b081 8769static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
a588e458 8770{
c0b8cda0
MC
8771 unsigned int cp = bp->hw_resc.max_cp_rings;
8772
8773 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8774 cp -= bnxt_get_ulp_msix_num(bp);
8775
8776 return cp;
a588e458
MC
8777}
8778
ad95c27b 8779static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
7809592d 8780{
6a4f2947
MC
8781 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8782
f7588cd8
MC
8783 if (bp->flags & BNXT_FLAG_CHIP_P5)
8784 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
8785
6a4f2947 8786 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
7809592d
MC
8787}
8788
30f52947 8789static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
33c2657e 8790{
6a4f2947 8791 bp->hw_resc.max_irqs = max_irqs;
33c2657e
MC
8792}
8793
e916b081
MC
8794unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
8795{
8796 unsigned int cp;
8797
8798 cp = bnxt_get_max_func_cp_rings_for_en(bp);
8799 if (bp->flags & BNXT_FLAG_CHIP_P5)
8800 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8801 else
8802 return cp - bp->cp_nr_rings;
8803}
8804
c027c6b4
VV
8805unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8806{
d77b1ad8 8807 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
c027c6b4
VV
8808}
8809
fbcfc8e4
MC
8810int bnxt_get_avail_msix(struct bnxt *bp, int num)
8811{
8812 int max_cp = bnxt_get_max_func_cp_rings(bp);
8813 int max_irq = bnxt_get_max_func_irqs(bp);
8814 int total_req = bp->cp_nr_rings + num;
8815 int max_idx, avail_msix;
8816
75720e63
MC
8817 max_idx = bp->total_irqs;
8818 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8819 max_idx = min_t(int, bp->total_irqs, max_cp);
fbcfc8e4 8820 avail_msix = max_idx - bp->cp_nr_rings;
f1ca94de 8821 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
fbcfc8e4
MC
8822 return avail_msix;
8823
8824 if (max_irq < total_req) {
8825 num = max_irq - bp->cp_nr_rings;
8826 if (num <= 0)
8827 return 0;
8828 }
8829 return num;
8830}
8831
08654eb2
MC
8832static int bnxt_get_num_msix(struct bnxt *bp)
8833{
f1ca94de 8834 if (!BNXT_NEW_RM(bp))
08654eb2
MC
8835 return bnxt_get_max_func_irqs(bp);
8836
c0b8cda0 8837 return bnxt_nq_rings_in_use(bp);
08654eb2
MC
8838}
8839
7809592d 8840static int bnxt_init_msix(struct bnxt *bp)
c0c050c5 8841{
fbcfc8e4 8842 int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
7809592d 8843 struct msix_entry *msix_ent;
c0c050c5 8844
08654eb2
MC
8845 total_vecs = bnxt_get_num_msix(bp);
8846 max = bnxt_get_max_func_irqs(bp);
8847 if (total_vecs > max)
8848 total_vecs = max;
8849
2773dfb2
MC
8850 if (!total_vecs)
8851 return 0;
8852
c0c050c5
MC
8853 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8854 if (!msix_ent)
8855 return -ENOMEM;
8856
8857 for (i = 0; i < total_vecs; i++) {
8858 msix_ent[i].entry = i;
8859 msix_ent[i].vector = 0;
8860 }
8861
01657bcd
MC
8862 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8863 min = 2;
8864
8865 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
fbcfc8e4
MC
8866 ulp_msix = bnxt_get_ulp_msix_num(bp);
8867 if (total_vecs < 0 || total_vecs < ulp_msix) {
c0c050c5
MC
8868 rc = -ENODEV;
8869 goto msix_setup_exit;
8870 }
8871
8872 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8873 if (bp->irq_tbl) {
7809592d
MC
8874 for (i = 0; i < total_vecs; i++)
8875 bp->irq_tbl[i].vector = msix_ent[i].vector;
c0c050c5 8876
7809592d 8877 bp->total_irqs = total_vecs;
c0c050c5 8878 /* Trim rings based upon num of vectors allocated */
6e6c5a57 8879 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
fbcfc8e4 8880 total_vecs - ulp_msix, min == 1);
6e6c5a57
MC
8881 if (rc)
8882 goto msix_setup_exit;
8883
7809592d
MC
8884 bp->cp_nr_rings = (min == 1) ?
8885 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8886 bp->tx_nr_rings + bp->rx_nr_rings;
c0c050c5 8887
c0c050c5
MC
8888 } else {
8889 rc = -ENOMEM;
8890 goto msix_setup_exit;
8891 }
8892 bp->flags |= BNXT_FLAG_USING_MSIX;
8893 kfree(msix_ent);
8894 return 0;
8895
8896msix_setup_exit:
7809592d
MC
8897 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8898 kfree(bp->irq_tbl);
8899 bp->irq_tbl = NULL;
c0c050c5
MC
8900 pci_disable_msix(bp->pdev);
8901 kfree(msix_ent);
8902 return rc;
8903}
8904
7809592d 8905static int bnxt_init_inta(struct bnxt *bp)
c0c050c5 8906{
33dbcf60 8907 bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
7809592d
MC
8908 if (!bp->irq_tbl)
8909 return -ENOMEM;
8910
8911 bp->total_irqs = 1;
c0c050c5
MC
8912 bp->rx_nr_rings = 1;
8913 bp->tx_nr_rings = 1;
8914 bp->cp_nr_rings = 1;
01657bcd 8915 bp->flags |= BNXT_FLAG_SHARED_RINGS;
c0c050c5 8916 bp->irq_tbl[0].vector = bp->pdev->irq;
7809592d 8917 return 0;
c0c050c5
MC
8918}
8919
7809592d 8920static int bnxt_init_int_mode(struct bnxt *bp)
c0c050c5 8921{
20d7d1c5 8922 int rc = -ENODEV;
c0c050c5
MC
8923
8924 if (bp->flags & BNXT_FLAG_MSIX_CAP)
7809592d 8925 rc = bnxt_init_msix(bp);
c0c050c5 8926
1fa72e29 8927 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
c0c050c5 8928 /* fallback to INTA */
7809592d 8929 rc = bnxt_init_inta(bp);
c0c050c5
MC
8930 }
8931 return rc;
8932}
8933
7809592d
MC
8934static void bnxt_clear_int_mode(struct bnxt *bp)
8935{
8936 if (bp->flags & BNXT_FLAG_USING_MSIX)
8937 pci_disable_msix(bp->pdev);
8938
8939 kfree(bp->irq_tbl);
8940 bp->irq_tbl = NULL;
8941 bp->flags &= ~BNXT_FLAG_USING_MSIX;
8942}
8943
1b3f0b75 8944int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
674f50a5 8945{
674f50a5 8946 int tcs = netdev_get_num_tc(bp->dev);
1b3f0b75 8947 bool irq_cleared = false;
674f50a5
MC
8948 int rc;
8949
8950 if (!bnxt_need_reserve_rings(bp))
8951 return 0;
8952
1b3f0b75
MC
8953 if (irq_re_init && BNXT_NEW_RM(bp) &&
8954 bnxt_get_num_msix(bp) != bp->total_irqs) {
ec86f14e 8955 bnxt_ulp_irq_stop(bp);
674f50a5 8956 bnxt_clear_int_mode(bp);
1b3f0b75 8957 irq_cleared = true;
36d65be9
MC
8958 }
8959 rc = __bnxt_reserve_rings(bp);
1b3f0b75 8960 if (irq_cleared) {
36d65be9
MC
8961 if (!rc)
8962 rc = bnxt_init_int_mode(bp);
ec86f14e 8963 bnxt_ulp_irq_restart(bp, rc);
36d65be9
MC
8964 }
8965 if (rc) {
8966 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
8967 return rc;
674f50a5
MC
8968 }
8969 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
8970 netdev_err(bp->dev, "tx ring reservation failure\n");
8971 netdev_reset_tc(bp->dev);
8972 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8973 return -ENOMEM;
8974 }
674f50a5
MC
8975 return 0;
8976}
8977
c0c050c5
MC
8978static void bnxt_free_irq(struct bnxt *bp)
8979{
8980 struct bnxt_irq *irq;
8981 int i;
8982
8983#ifdef CONFIG_RFS_ACCEL
8984 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
8985 bp->dev->rx_cpu_rmap = NULL;
8986#endif
cb98526b 8987 if (!bp->irq_tbl || !bp->bnapi)
c0c050c5
MC
8988 return;
8989
8990 for (i = 0; i < bp->cp_nr_rings; i++) {
e5811b8c
MC
8991 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8992
8993 irq = &bp->irq_tbl[map_idx];
56f0fd80
VV
8994 if (irq->requested) {
8995 if (irq->have_cpumask) {
8996 irq_set_affinity_hint(irq->vector, NULL);
8997 free_cpumask_var(irq->cpu_mask);
8998 irq->have_cpumask = 0;
8999 }
c0c050c5 9000 free_irq(irq->vector, bp->bnapi[i]);
56f0fd80
VV
9001 }
9002
c0c050c5
MC
9003 irq->requested = 0;
9004 }
c0c050c5
MC
9005}
9006
9007static int bnxt_request_irq(struct bnxt *bp)
9008{
b81a90d3 9009 int i, j, rc = 0;
c0c050c5
MC
9010 unsigned long flags = 0;
9011#ifdef CONFIG_RFS_ACCEL
e5811b8c 9012 struct cpu_rmap *rmap;
c0c050c5
MC
9013#endif
9014
e5811b8c
MC
9015 rc = bnxt_setup_int_mode(bp);
9016 if (rc) {
9017 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
9018 rc);
9019 return rc;
9020 }
9021#ifdef CONFIG_RFS_ACCEL
9022 rmap = bp->dev->rx_cpu_rmap;
9023#endif
c0c050c5
MC
9024 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
9025 flags = IRQF_SHARED;
9026
b81a90d3 9027 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
e5811b8c
MC
9028 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9029 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
9030
c0c050c5 9031#ifdef CONFIG_RFS_ACCEL
b81a90d3 9032 if (rmap && bp->bnapi[i]->rx_ring) {
c0c050c5
MC
9033 rc = irq_cpu_rmap_add(rmap, irq->vector);
9034 if (rc)
9035 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
b81a90d3
MC
9036 j);
9037 j++;
c0c050c5
MC
9038 }
9039#endif
9040 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
9041 bp->bnapi[i]);
9042 if (rc)
9043 break;
9044
9045 irq->requested = 1;
56f0fd80
VV
9046
9047 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
9048 int numa_node = dev_to_node(&bp->pdev->dev);
9049
9050 irq->have_cpumask = 1;
9051 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
9052 irq->cpu_mask);
9053 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
9054 if (rc) {
9055 netdev_warn(bp->dev,
9056 "Set affinity failed, IRQ = %d\n",
9057 irq->vector);
9058 break;
9059 }
9060 }
c0c050c5
MC
9061 }
9062 return rc;
9063}
9064
9065static void bnxt_del_napi(struct bnxt *bp)
9066{
9067 int i;
9068
9069 if (!bp->bnapi)
9070 return;
9071
9072 for (i = 0; i < bp->cp_nr_rings; i++) {
9073 struct bnxt_napi *bnapi = bp->bnapi[i];
9074
5198d545 9075 __netif_napi_del(&bnapi->napi);
c0c050c5 9076 }
5198d545 9077 /* We called __netif_napi_del(), we need
e5f6f564
ED
9078 * to respect an RCU grace period before freeing napi structures.
9079 */
9080 synchronize_net();
c0c050c5
MC
9081}
9082
9083static void bnxt_init_napi(struct bnxt *bp)
9084{
9085 int i;
10bbdaf5 9086 unsigned int cp_nr_rings = bp->cp_nr_rings;
c0c050c5
MC
9087 struct bnxt_napi *bnapi;
9088
9089 if (bp->flags & BNXT_FLAG_USING_MSIX) {
0fcec985
MC
9090 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
9091
9092 if (bp->flags & BNXT_FLAG_CHIP_P5)
9093 poll_fn = bnxt_poll_p5;
9094 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
10bbdaf5
PS
9095 cp_nr_rings--;
9096 for (i = 0; i < cp_nr_rings; i++) {
c0c050c5 9097 bnapi = bp->bnapi[i];
0fcec985 9098 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
c0c050c5 9099 }
10bbdaf5
PS
9100 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9101 bnapi = bp->bnapi[cp_nr_rings];
9102 netif_napi_add(bp->dev, &bnapi->napi,
9103 bnxt_poll_nitroa0, 64);
10bbdaf5 9104 }
c0c050c5
MC
9105 } else {
9106 bnapi = bp->bnapi[0];
9107 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
c0c050c5
MC
9108 }
9109}
9110
9111static void bnxt_disable_napi(struct bnxt *bp)
9112{
9113 int i;
9114
e340a5c4
MC
9115 if (!bp->bnapi ||
9116 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
c0c050c5
MC
9117 return;
9118
0bc0b97f
AG
9119 for (i = 0; i < bp->cp_nr_rings; i++) {
9120 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
9121
01cca6b9 9122 napi_disable(&bp->bnapi[i]->napi);
0bc0b97f
AG
9123 if (bp->bnapi[i]->rx_ring)
9124 cancel_work_sync(&cpr->dim.work);
0bc0b97f 9125 }
c0c050c5
MC
9126}
9127
9128static void bnxt_enable_napi(struct bnxt *bp)
9129{
9130 int i;
9131
e340a5c4 9132 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
c0c050c5 9133 for (i = 0; i < bp->cp_nr_rings; i++) {
8a27d4b9
MC
9134 struct bnxt_napi *bnapi = bp->bnapi[i];
9135 struct bnxt_cp_ring_info *cpr;
9136
9137 cpr = &bnapi->cp_ring;
9138 if (bnapi->in_reset)
9139 cpr->sw_stats.rx.rx_resets++;
9140 bnapi->in_reset = false;
6a8788f2 9141
8a27d4b9 9142 if (bnapi->rx_ring) {
6a8788f2 9143 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
c002bd52 9144 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6a8788f2 9145 }
8a27d4b9 9146 napi_enable(&bnapi->napi);
c0c050c5
MC
9147 }
9148}
9149
7df4ae9f 9150void bnxt_tx_disable(struct bnxt *bp)
c0c050c5
MC
9151{
9152 int i;
c0c050c5 9153 struct bnxt_tx_ring_info *txr;
c0c050c5 9154
b6ab4b01 9155 if (bp->tx_ring) {
c0c050c5 9156 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 9157 txr = &bp->tx_ring[i];
3c603136 9158 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
c0c050c5
MC
9159 }
9160 }
3c603136
JK
9161 /* Make sure napi polls see @dev_state change */
9162 synchronize_net();
132e0b65
EP
9163 /* Drop carrier first to prevent TX timeout */
9164 netif_carrier_off(bp->dev);
c0c050c5
MC
9165 /* Stop all TX queues */
9166 netif_tx_disable(bp->dev);
c0c050c5
MC
9167}
9168
7df4ae9f 9169void bnxt_tx_enable(struct bnxt *bp)
c0c050c5
MC
9170{
9171 int i;
c0c050c5 9172 struct bnxt_tx_ring_info *txr;
c0c050c5
MC
9173
9174 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 9175 txr = &bp->tx_ring[i];
3c603136 9176 WRITE_ONCE(txr->dev_state, 0);
c0c050c5 9177 }
3c603136
JK
9178 /* Make sure napi polls see @dev_state change */
9179 synchronize_net();
c0c050c5
MC
9180 netif_tx_wake_all_queues(bp->dev);
9181 if (bp->link_info.link_up)
9182 netif_carrier_on(bp->dev);
9183}
9184
2046e3c3
MC
9185static char *bnxt_report_fec(struct bnxt_link_info *link_info)
9186{
9187 u8 active_fec = link_info->active_fec_sig_mode &
9188 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
9189
9190 switch (active_fec) {
9191 default:
9192 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
9193 return "None";
9194 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
9195 return "Clause 74 BaseR";
9196 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
9197 return "Clause 91 RS(528,514)";
9198 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
9199 return "Clause 91 RS544_1XN";
9200 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
9201 return "Clause 91 RS(544,514)";
9202 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
9203 return "Clause 91 RS272_1XN";
9204 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
9205 return "Clause 91 RS(272,257)";
9206 }
9207}
9208
c0c050c5
MC
9209static void bnxt_report_link(struct bnxt *bp)
9210{
9211 if (bp->link_info.link_up) {
1d2deb61 9212 const char *signal = "";
c0c050c5 9213 const char *flow_ctrl;
1d2deb61 9214 const char *duplex;
38a21b34
DK
9215 u32 speed;
9216 u16 fec;
c0c050c5
MC
9217
9218 netif_carrier_on(bp->dev);
8eddb3e7
MC
9219 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9220 if (speed == SPEED_UNKNOWN) {
9221 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9222 return;
9223 }
c0c050c5
MC
9224 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9225 duplex = "full";
9226 else
9227 duplex = "half";
9228 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9229 flow_ctrl = "ON - receive & transmit";
9230 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9231 flow_ctrl = "ON - transmit";
9232 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9233 flow_ctrl = "ON - receive";
9234 else
9235 flow_ctrl = "none";
1d2deb61
EP
9236 if (bp->link_info.phy_qcfg_resp.option_flags &
9237 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
9238 u8 sig_mode = bp->link_info.active_fec_sig_mode &
9239 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
9240 switch (sig_mode) {
9241 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
9242 signal = "(NRZ) ";
9243 break;
9244 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
9245 signal = "(PAM4) ";
9246 break;
9247 default:
9248 break;
9249 }
9250 }
9251 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
9252 speed, signal, duplex, flow_ctrl);
b0d28207 9253 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
170ce013
MC
9254 netdev_info(bp->dev, "EEE is %s\n",
9255 bp->eee.eee_active ? "active" :
9256 "not active");
e70c752f
MC
9257 fec = bp->link_info.fec_cfg;
9258 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
2046e3c3 9259 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
e70c752f 9260 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
2046e3c3 9261 bnxt_report_fec(&bp->link_info));
c0c050c5
MC
9262 } else {
9263 netif_carrier_off(bp->dev);
9264 netdev_err(bp->dev, "NIC Link is Down\n");
9265 }
9266}
9267
3128e811
MC
9268static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
9269{
9270 if (!resp->supported_speeds_auto_mode &&
9271 !resp->supported_speeds_force_mode &&
9272 !resp->supported_pam4_speeds_auto_mode &&
9273 !resp->supported_pam4_speeds_force_mode)
9274 return true;
9275 return false;
9276}
9277
170ce013
MC
9278static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9279{
93ed8117 9280 struct bnxt_link_info *link_info = &bp->link_info;
bbf33d1d
EP
9281 struct hwrm_port_phy_qcaps_output *resp;
9282 struct hwrm_port_phy_qcaps_input *req;
9283 int rc = 0;
170ce013
MC
9284
9285 if (bp->hwrm_spec_code < 0x10201)
9286 return 0;
9287
bbf33d1d
EP
9288 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
9289 if (rc)
9290 return rc;
170ce013 9291
bbf33d1d
EP
9292 resp = hwrm_req_hold(bp, req);
9293 rc = hwrm_req_send(bp, req);
170ce013
MC
9294 if (rc)
9295 goto hwrm_phy_qcaps_exit;
9296
b0d28207 9297 bp->phy_flags = resp->flags;
acb20054 9298 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
170ce013
MC
9299 struct ethtool_eee *eee = &bp->eee;
9300 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
9301
170ce013
MC
9302 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9303 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9304 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
9305 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9306 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
9307 }
fea6b333 9308
3128e811
MC
9309 if (bp->hwrm_spec_code >= 0x10a01) {
9310 if (bnxt_phy_qcaps_no_speed(resp)) {
9311 link_info->phy_state = BNXT_PHY_STATE_DISABLED;
9312 netdev_warn(bp->dev, "Ethernet link disabled\n");
9313 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
9314 link_info->phy_state = BNXT_PHY_STATE_ENABLED;
9315 netdev_info(bp->dev, "Ethernet link enabled\n");
9316 /* Phy re-enabled, reprobe the speeds */
9317 link_info->support_auto_speeds = 0;
9318 link_info->support_pam4_auto_speeds = 0;
9319 }
9320 }
520ad89a
MC
9321 if (resp->supported_speeds_auto_mode)
9322 link_info->support_auto_speeds =
9323 le16_to_cpu(resp->supported_speeds_auto_mode);
d058426e
EP
9324 if (resp->supported_pam4_speeds_auto_mode)
9325 link_info->support_pam4_auto_speeds =
9326 le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
170ce013 9327
d5430d31
MC
9328 bp->port_count = resp->port_cnt;
9329
170ce013 9330hwrm_phy_qcaps_exit:
bbf33d1d 9331 hwrm_req_drop(bp, req);
170ce013
MC
9332 return rc;
9333}
9334
c916062a
EP
9335static bool bnxt_support_dropped(u16 advertising, u16 supported)
9336{
9337 u16 diff = advertising ^ supported;
9338
9339 return ((supported | diff) != supported);
9340}
9341
ccd6a9dc 9342int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
c0c050c5 9343{
c0c050c5 9344 struct bnxt_link_info *link_info = &bp->link_info;
bbf33d1d
EP
9345 struct hwrm_port_phy_qcfg_output *resp;
9346 struct hwrm_port_phy_qcfg_input *req;
c0c050c5 9347 u8 link_up = link_info->link_up;
d058426e 9348 bool support_changed = false;
bbf33d1d 9349 int rc;
c0c050c5 9350
bbf33d1d
EP
9351 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
9352 if (rc)
9353 return rc;
c0c050c5 9354
bbf33d1d
EP
9355 resp = hwrm_req_hold(bp, req);
9356 rc = hwrm_req_send(bp, req);
c0c050c5 9357 if (rc) {
bbf33d1d 9358 hwrm_req_drop(bp, req);
c0c050c5
MC
9359 return rc;
9360 }
9361
9362 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
9363 link_info->phy_link_status = resp->link;
acb20054
MC
9364 link_info->duplex = resp->duplex_cfg;
9365 if (bp->hwrm_spec_code >= 0x10800)
9366 link_info->duplex = resp->duplex_state;
c0c050c5
MC
9367 link_info->pause = resp->pause;
9368 link_info->auto_mode = resp->auto_mode;
9369 link_info->auto_pause_setting = resp->auto_pause;
3277360e 9370 link_info->lp_pause = resp->link_partner_adv_pause;
c0c050c5 9371 link_info->force_pause_setting = resp->force_pause;
acb20054 9372 link_info->duplex_setting = resp->duplex_cfg;
c0c050c5
MC
9373 if (link_info->phy_link_status == BNXT_LINK_LINK)
9374 link_info->link_speed = le16_to_cpu(resp->link_speed);
9375 else
9376 link_info->link_speed = 0;
9377 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
d058426e
EP
9378 link_info->force_pam4_link_speed =
9379 le16_to_cpu(resp->force_pam4_link_speed);
c0c050c5 9380 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
d058426e 9381 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
c0c050c5 9382 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
d058426e
EP
9383 link_info->auto_pam4_link_speeds =
9384 le16_to_cpu(resp->auto_pam4_link_speed_mask);
3277360e
MC
9385 link_info->lp_auto_link_speeds =
9386 le16_to_cpu(resp->link_partner_adv_speeds);
d058426e
EP
9387 link_info->lp_auto_pam4_link_speeds =
9388 resp->link_partner_pam4_adv_speeds;
c0c050c5
MC
9389 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
9390 link_info->phy_ver[0] = resp->phy_maj;
9391 link_info->phy_ver[1] = resp->phy_min;
9392 link_info->phy_ver[2] = resp->phy_bld;
9393 link_info->media_type = resp->media_type;
03efbec0 9394 link_info->phy_type = resp->phy_type;
11f15ed3 9395 link_info->transceiver = resp->xcvr_pkg_type;
170ce013
MC
9396 link_info->phy_addr = resp->eee_config_phy_addr &
9397 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
42ee18fe 9398 link_info->module_status = resp->module_status;
170ce013 9399
b0d28207 9400 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
170ce013
MC
9401 struct ethtool_eee *eee = &bp->eee;
9402 u16 fw_speeds;
9403
9404 eee->eee_active = 0;
9405 if (resp->eee_config_phy_addr &
9406 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
9407 eee->eee_active = 1;
9408 fw_speeds = le16_to_cpu(
9409 resp->link_partner_adv_eee_link_speed_mask);
9410 eee->lp_advertised =
9411 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9412 }
9413
9414 /* Pull initial EEE config */
9415 if (!chng_link_state) {
9416 if (resp->eee_config_phy_addr &
9417 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
9418 eee->eee_enabled = 1;
c0c050c5 9419
170ce013
MC
9420 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
9421 eee->advertised =
9422 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9423
9424 if (resp->eee_config_phy_addr &
9425 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
9426 __le32 tmr;
9427
9428 eee->tx_lpi_enabled = 1;
9429 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
9430 eee->tx_lpi_timer = le32_to_cpu(tmr) &
9431 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
9432 }
9433 }
9434 }
e70c752f
MC
9435
9436 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
8b277589 9437 if (bp->hwrm_spec_code >= 0x10504) {
e70c752f 9438 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
8b277589
MC
9439 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
9440 }
c0c050c5
MC
9441 /* TODO: need to add more logic to report VF link */
9442 if (chng_link_state) {
9443 if (link_info->phy_link_status == BNXT_LINK_LINK)
9444 link_info->link_up = 1;
9445 else
9446 link_info->link_up = 0;
9447 if (link_up != link_info->link_up)
9448 bnxt_report_link(bp);
9449 } else {
9450 /* alwasy link down if not require to update link state */
9451 link_info->link_up = 0;
9452 }
bbf33d1d 9453 hwrm_req_drop(bp, req);
286ef9d6 9454
c7e457f4 9455 if (!BNXT_PHY_CFG_ABLE(bp))
dac04907
MC
9456 return 0;
9457
c916062a
EP
9458 /* Check if any advertised speeds are no longer supported. The caller
9459 * holds the link_lock mutex, so we can modify link_info settings.
9460 */
9461 if (bnxt_support_dropped(link_info->advertising,
9462 link_info->support_auto_speeds)) {
286ef9d6 9463 link_info->advertising = link_info->support_auto_speeds;
d058426e 9464 support_changed = true;
286ef9d6 9465 }
d058426e
EP
9466 if (bnxt_support_dropped(link_info->advertising_pam4,
9467 link_info->support_pam4_auto_speeds)) {
9468 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
9469 support_changed = true;
9470 }
9471 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
9472 bnxt_hwrm_set_link_setting(bp, true, false);
c0c050c5
MC
9473 return 0;
9474}
9475
10289bec
MC
9476static void bnxt_get_port_module_status(struct bnxt *bp)
9477{
9478 struct bnxt_link_info *link_info = &bp->link_info;
9479 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
9480 u8 module_status;
9481
9482 if (bnxt_update_link(bp, true))
9483 return;
9484
9485 module_status = link_info->module_status;
9486 switch (module_status) {
9487 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
9488 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
9489 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
9490 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9491 bp->pf.port_id);
9492 if (bp->hwrm_spec_code >= 0x10201) {
9493 netdev_warn(bp->dev, "Module part number %s\n",
9494 resp->phy_vendor_partnumber);
9495 }
9496 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
9497 netdev_warn(bp->dev, "TX is disabled\n");
9498 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
9499 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9500 }
9501}
9502
c0c050c5
MC
9503static void
9504bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9505{
9506 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
c9ee9516
MC
9507 if (bp->hwrm_spec_code >= 0x10201)
9508 req->auto_pause =
9509 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
c0c050c5
MC
9510 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9511 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
9512 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
49b5c7a1 9513 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
c0c050c5
MC
9514 req->enables |=
9515 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9516 } else {
9517 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9518 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
9519 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9520 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9521 req->enables |=
9522 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
c9ee9516
MC
9523 if (bp->hwrm_spec_code >= 0x10201) {
9524 req->auto_pause = req->force_pause;
9525 req->enables |= cpu_to_le32(
9526 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9527 }
c0c050c5
MC
9528 }
9529}
9530
d058426e 9531static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
c0c050c5 9532{
d058426e
EP
9533 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9534 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9535 if (bp->link_info.advertising) {
9536 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9537 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9538 }
9539 if (bp->link_info.advertising_pam4) {
9540 req->enables |=
9541 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
9542 req->auto_link_pam4_speed_mask =
9543 cpu_to_le16(bp->link_info.advertising_pam4);
9544 }
c0c050c5 9545 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
d058426e 9546 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
c0c050c5 9547 } else {
c0c050c5 9548 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
d058426e
EP
9549 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9550 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9551 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
9552 } else {
9553 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9554 }
c0c050c5
MC
9555 }
9556
c0c050c5
MC
9557 /* tell chimp that the setting takes effect immediately */
9558 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9559}
9560
9561int bnxt_hwrm_set_pause(struct bnxt *bp)
9562{
bbf33d1d 9563 struct hwrm_port_phy_cfg_input *req;
c0c050c5
MC
9564 int rc;
9565
bbf33d1d
EP
9566 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9567 if (rc)
9568 return rc;
9569
9570 bnxt_hwrm_set_pause_common(bp, req);
c0c050c5
MC
9571
9572 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9573 bp->link_info.force_link_chng)
bbf33d1d 9574 bnxt_hwrm_set_link_common(bp, req);
c0c050c5 9575
bbf33d1d 9576 rc = hwrm_req_send(bp, req);
c0c050c5
MC
9577 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9578 /* since changing of pause setting doesn't trigger any link
9579 * change event, the driver needs to update the current pause
9580 * result upon successfully return of the phy_cfg command
9581 */
9582 bp->link_info.pause =
9583 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9584 bp->link_info.auto_pause_setting = 0;
9585 if (!bp->link_info.force_link_chng)
9586 bnxt_report_link(bp);
9587 }
9588 bp->link_info.force_link_chng = false;
c0c050c5
MC
9589 return rc;
9590}
9591
939f7f0c
MC
9592static void bnxt_hwrm_set_eee(struct bnxt *bp,
9593 struct hwrm_port_phy_cfg_input *req)
9594{
9595 struct ethtool_eee *eee = &bp->eee;
9596
9597 if (eee->eee_enabled) {
9598 u16 eee_speeds;
9599 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9600
9601 if (eee->tx_lpi_enabled)
9602 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9603 else
9604 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9605
9606 req->flags |= cpu_to_le32(flags);
9607 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9608 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9609 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9610 } else {
9611 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9612 }
9613}
9614
9615int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
c0c050c5 9616{
bbf33d1d
EP
9617 struct hwrm_port_phy_cfg_input *req;
9618 int rc;
9619
9620 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9621 if (rc)
9622 return rc;
c0c050c5 9623
c0c050c5 9624 if (set_pause)
bbf33d1d 9625 bnxt_hwrm_set_pause_common(bp, req);
c0c050c5 9626
bbf33d1d 9627 bnxt_hwrm_set_link_common(bp, req);
939f7f0c
MC
9628
9629 if (set_eee)
bbf33d1d
EP
9630 bnxt_hwrm_set_eee(bp, req);
9631 return hwrm_req_send(bp, req);
c0c050c5
MC
9632}
9633
33f7d55f
MC
9634static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9635{
bbf33d1d
EP
9636 struct hwrm_port_phy_cfg_input *req;
9637 int rc;
33f7d55f 9638
567b2abe 9639 if (!BNXT_SINGLE_PF(bp))
33f7d55f
MC
9640 return 0;
9641
d5ca9905
MC
9642 if (pci_num_vf(bp->pdev) &&
9643 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
33f7d55f
MC
9644 return 0;
9645
bbf33d1d
EP
9646 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9647 if (rc)
9648 return rc;
9649
9650 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
9651 return hwrm_req_send(bp, req);
33f7d55f
MC
9652}
9653
ec5d31e3
MC
9654static int bnxt_fw_init_one(struct bnxt *bp);
9655
b187e4ba
EP
9656static int bnxt_fw_reset_via_optee(struct bnxt *bp)
9657{
9658#ifdef CONFIG_TEE_BNXT_FW
9659 int rc = tee_bnxt_fw_load();
9660
9661 if (rc)
9662 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
9663
9664 return rc;
9665#else
9666 netdev_err(bp->dev, "OP-TEE not supported\n");
9667 return -ENODEV;
9668#endif
9669}
9670
9671static int bnxt_try_recover_fw(struct bnxt *bp)
9672{
9673 if (bp->fw_health && bp->fw_health->status_reliable) {
d1cbd165
MC
9674 int retry = 0, rc;
9675 u32 sts;
9676
d1cbd165 9677 do {
d1cbd165 9678 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
7b370ad7 9679 rc = bnxt_hwrm_poll(bp);
17e1be34
MC
9680 if (!BNXT_FW_IS_BOOTING(sts) &&
9681 !BNXT_FW_IS_RECOVERING(sts))
d1cbd165
MC
9682 break;
9683 retry++;
9684 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
b187e4ba 9685
d1cbd165
MC
9686 if (!BNXT_FW_IS_HEALTHY(sts)) {
9687 netdev_err(bp->dev,
9688 "Firmware not responding, status: 0x%x\n",
9689 sts);
9690 rc = -ENODEV;
9691 }
b187e4ba
EP
9692 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
9693 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
9694 return bnxt_fw_reset_via_optee(bp);
9695 }
d1cbd165 9696 return rc;
b187e4ba
EP
9697 }
9698
9699 return -ENODEV;
9700}
9701
25e1acd6
MC
9702static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
9703{
bbf33d1d
EP
9704 struct hwrm_func_drv_if_change_output *resp;
9705 struct hwrm_func_drv_if_change_input *req;
20d7d1c5
EP
9706 bool fw_reset = !bp->irq_tbl;
9707 bool resc_reinit = false;
5d06eb5c 9708 int rc, retry = 0;
ec5d31e3 9709 u32 flags = 0;
25e1acd6
MC
9710
9711 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
9712 return 0;
9713
bbf33d1d
EP
9714 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
9715 if (rc)
9716 return rc;
9717
25e1acd6 9718 if (up)
bbf33d1d
EP
9719 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
9720 resp = hwrm_req_hold(bp, req);
9721
9722 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
5d06eb5c 9723 while (retry < BNXT_FW_IF_RETRY) {
bbf33d1d 9724 rc = hwrm_req_send(bp, req);
5d06eb5c
VV
9725 if (rc != -EAGAIN)
9726 break;
9727
9728 msleep(50);
9729 retry++;
9730 }
5d06eb5c 9731
bbf33d1d
EP
9732 if (rc == -EAGAIN) {
9733 hwrm_req_drop(bp, req);
5d06eb5c 9734 return rc;
bbf33d1d
EP
9735 } else if (!rc) {
9736 flags = le32_to_cpu(resp->flags);
9737 } else if (up) {
b187e4ba
EP
9738 rc = bnxt_try_recover_fw(bp);
9739 fw_reset = true;
9740 }
bbf33d1d 9741 hwrm_req_drop(bp, req);
ec5d31e3
MC
9742 if (rc)
9743 return rc;
25e1acd6 9744
43a440c4
MC
9745 if (!up) {
9746 bnxt_inv_fw_health_reg(bp);
ec5d31e3 9747 return 0;
43a440c4 9748 }
25e1acd6 9749
ec5d31e3
MC
9750 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
9751 resc_reinit = true;
9752 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
9753 fw_reset = true;
43a440c4
MC
9754 else if (bp->fw_health && !bp->fw_health->status_reliable)
9755 bnxt_try_map_fw_health_reg(bp);
ec5d31e3 9756
3bc7d4a3
MC
9757 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
9758 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
20d7d1c5 9759 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
3bc7d4a3
MC
9760 return -ENODEV;
9761 }
ec5d31e3
MC
9762 if (resc_reinit || fw_reset) {
9763 if (fw_reset) {
2924ad95 9764 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
f3a6d206
VV
9765 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9766 bnxt_ulp_stop(bp);
325f85f3
MC
9767 bnxt_free_ctx_mem(bp);
9768 kfree(bp->ctx);
9769 bp->ctx = NULL;
843d699d 9770 bnxt_dcb_free(bp);
ec5d31e3
MC
9771 rc = bnxt_fw_init_one(bp);
9772 if (rc) {
2924ad95 9773 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
ec5d31e3
MC
9774 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9775 return rc;
9776 }
9777 bnxt_clear_int_mode(bp);
9778 rc = bnxt_init_int_mode(bp);
9779 if (rc) {
2924ad95 9780 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
ec5d31e3
MC
9781 netdev_err(bp->dev, "init int mode failed\n");
9782 return rc;
9783 }
ec5d31e3
MC
9784 }
9785 if (BNXT_NEW_RM(bp)) {
9786 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9787
9788 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
15a7deb8
SB
9789 if (rc)
9790 netdev_err(bp->dev, "resc_qcaps failed\n");
9791
ec5d31e3
MC
9792 hw_resc->resv_cp_rings = 0;
9793 hw_resc->resv_stat_ctxs = 0;
9794 hw_resc->resv_irqs = 0;
9795 hw_resc->resv_tx_rings = 0;
9796 hw_resc->resv_rx_rings = 0;
9797 hw_resc->resv_hw_ring_grps = 0;
9798 hw_resc->resv_vnics = 0;
9799 if (!fw_reset) {
9800 bp->tx_nr_rings = 0;
9801 bp->rx_nr_rings = 0;
9802 }
9803 }
25e1acd6 9804 }
15a7deb8 9805 return rc;
25e1acd6
MC
9806}
9807
5ad2cbee
MC
9808static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
9809{
bbf33d1d
EP
9810 struct hwrm_port_led_qcaps_output *resp;
9811 struct hwrm_port_led_qcaps_input *req;
5ad2cbee
MC
9812 struct bnxt_pf_info *pf = &bp->pf;
9813 int rc;
9814
ba642ab7 9815 bp->num_leds = 0;
5ad2cbee
MC
9816 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
9817 return 0;
9818
bbf33d1d
EP
9819 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
9820 if (rc)
9821 return rc;
9822
9823 req->port_id = cpu_to_le16(pf->port_id);
9824 resp = hwrm_req_hold(bp, req);
9825 rc = hwrm_req_send(bp, req);
5ad2cbee 9826 if (rc) {
bbf33d1d 9827 hwrm_req_drop(bp, req);
5ad2cbee
MC
9828 return rc;
9829 }
9830 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
9831 int i;
9832
9833 bp->num_leds = resp->num_leds;
9834 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
9835 bp->num_leds);
9836 for (i = 0; i < bp->num_leds; i++) {
9837 struct bnxt_led_info *led = &bp->leds[i];
9838 __le16 caps = led->led_state_caps;
9839
9840 if (!led->led_group_id ||
9841 !BNXT_LED_ALT_BLINK_CAP(caps)) {
9842 bp->num_leds = 0;
9843 break;
9844 }
9845 }
9846 }
bbf33d1d 9847 hwrm_req_drop(bp, req);
5ad2cbee
MC
9848 return 0;
9849}
9850
5282db6c
MC
9851int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
9852{
bbf33d1d
EP
9853 struct hwrm_wol_filter_alloc_output *resp;
9854 struct hwrm_wol_filter_alloc_input *req;
5282db6c
MC
9855 int rc;
9856
bbf33d1d
EP
9857 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
9858 if (rc)
9859 return rc;
9860
9861 req->port_id = cpu_to_le16(bp->pf.port_id);
9862 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
9863 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
9864 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
9865
9866 resp = hwrm_req_hold(bp, req);
9867 rc = hwrm_req_send(bp, req);
5282db6c
MC
9868 if (!rc)
9869 bp->wol_filter_id = resp->wol_filter_id;
bbf33d1d 9870 hwrm_req_drop(bp, req);
5282db6c
MC
9871 return rc;
9872}
9873
9874int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
9875{
bbf33d1d
EP
9876 struct hwrm_wol_filter_free_input *req;
9877 int rc;
5282db6c 9878
bbf33d1d
EP
9879 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
9880 if (rc)
9881 return rc;
9882
9883 req->port_id = cpu_to_le16(bp->pf.port_id);
9884 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
9885 req->wol_filter_id = bp->wol_filter_id;
9886
9887 return hwrm_req_send(bp, req);
5282db6c
MC
9888}
9889
c1ef146a
MC
9890static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
9891{
bbf33d1d
EP
9892 struct hwrm_wol_filter_qcfg_output *resp;
9893 struct hwrm_wol_filter_qcfg_input *req;
c1ef146a
MC
9894 u16 next_handle = 0;
9895 int rc;
9896
bbf33d1d
EP
9897 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
9898 if (rc)
9899 return rc;
9900
9901 req->port_id = cpu_to_le16(bp->pf.port_id);
9902 req->handle = cpu_to_le16(handle);
9903 resp = hwrm_req_hold(bp, req);
9904 rc = hwrm_req_send(bp, req);
c1ef146a
MC
9905 if (!rc) {
9906 next_handle = le16_to_cpu(resp->next_handle);
9907 if (next_handle != 0) {
9908 if (resp->wol_type ==
9909 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
9910 bp->wol = 1;
9911 bp->wol_filter_id = resp->wol_filter_id;
9912 }
9913 }
9914 }
bbf33d1d 9915 hwrm_req_drop(bp, req);
c1ef146a
MC
9916 return next_handle;
9917}
9918
9919static void bnxt_get_wol_settings(struct bnxt *bp)
9920{
9921 u16 handle = 0;
9922
ba642ab7 9923 bp->wol = 0;
c1ef146a
MC
9924 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
9925 return;
9926
9927 do {
9928 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
9929 } while (handle && handle != 0xffff);
9930}
9931
cde49a42
VV
9932#ifdef CONFIG_BNXT_HWMON
9933static ssize_t bnxt_show_temp(struct device *dev,
9934 struct device_attribute *devattr, char *buf)
9935{
cde49a42 9936 struct hwrm_temp_monitor_query_output *resp;
bbf33d1d 9937 struct hwrm_temp_monitor_query_input *req;
cde49a42 9938 struct bnxt *bp = dev_get_drvdata(dev);
12cce90b 9939 u32 len = 0;
d69753fa 9940 int rc;
cde49a42 9941
bbf33d1d
EP
9942 rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
9943 if (rc)
9944 return rc;
9945 resp = hwrm_req_hold(bp, req);
9946 rc = hwrm_req_send(bp, req);
d69753fa 9947 if (!rc)
12cce90b 9948 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
bbf33d1d 9949 hwrm_req_drop(bp, req);
27537929
DC
9950 if (rc)
9951 return rc;
9952 return len;
cde49a42
VV
9953}
9954static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
9955
9956static struct attribute *bnxt_attrs[] = {
9957 &sensor_dev_attr_temp1_input.dev_attr.attr,
9958 NULL
9959};
9960ATTRIBUTE_GROUPS(bnxt);
9961
9962static void bnxt_hwmon_close(struct bnxt *bp)
9963{
9964 if (bp->hwmon_dev) {
9965 hwmon_device_unregister(bp->hwmon_dev);
9966 bp->hwmon_dev = NULL;
9967 }
9968}
9969
9970static void bnxt_hwmon_open(struct bnxt *bp)
9971{
bbf33d1d 9972 struct hwrm_temp_monitor_query_input *req;
cde49a42 9973 struct pci_dev *pdev = bp->pdev;
d69753fa
EP
9974 int rc;
9975
bbf33d1d
EP
9976 rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
9977 if (!rc)
9978 rc = hwrm_req_send_silent(bp, req);
d69753fa
EP
9979 if (rc == -EACCES || rc == -EOPNOTSUPP) {
9980 bnxt_hwmon_close(bp);
9981 return;
9982 }
cde49a42 9983
ba642ab7
MC
9984 if (bp->hwmon_dev)
9985 return;
9986
cde49a42
VV
9987 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
9988 DRV_MODULE_NAME, bp,
9989 bnxt_groups);
9990 if (IS_ERR(bp->hwmon_dev)) {
9991 bp->hwmon_dev = NULL;
9992 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
9993 }
9994}
9995#else
9996static void bnxt_hwmon_close(struct bnxt *bp)
9997{
9998}
9999
10000static void bnxt_hwmon_open(struct bnxt *bp)
10001{
10002}
10003#endif
10004
939f7f0c
MC
10005static bool bnxt_eee_config_ok(struct bnxt *bp)
10006{
10007 struct ethtool_eee *eee = &bp->eee;
10008 struct bnxt_link_info *link_info = &bp->link_info;
10009
b0d28207 10010 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
939f7f0c
MC
10011 return true;
10012
10013 if (eee->eee_enabled) {
10014 u32 advertising =
10015 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
10016
10017 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10018 eee->eee_enabled = 0;
10019 return false;
10020 }
10021 if (eee->advertised & ~advertising) {
10022 eee->advertised = advertising & eee->supported;
10023 return false;
10024 }
10025 }
10026 return true;
10027}
10028
c0c050c5
MC
10029static int bnxt_update_phy_setting(struct bnxt *bp)
10030{
10031 int rc;
10032 bool update_link = false;
10033 bool update_pause = false;
939f7f0c 10034 bool update_eee = false;
c0c050c5
MC
10035 struct bnxt_link_info *link_info = &bp->link_info;
10036
10037 rc = bnxt_update_link(bp, true);
10038 if (rc) {
10039 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
10040 rc);
10041 return rc;
10042 }
33dac24a
MC
10043 if (!BNXT_SINGLE_PF(bp))
10044 return 0;
10045
c0c050c5 10046 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
c9ee9516
MC
10047 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
10048 link_info->req_flow_ctrl)
c0c050c5
MC
10049 update_pause = true;
10050 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10051 link_info->force_pause_setting != link_info->req_flow_ctrl)
10052 update_pause = true;
c0c050c5
MC
10053 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10054 if (BNXT_AUTO_MODE(link_info->auto_mode))
10055 update_link = true;
d058426e
EP
10056 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
10057 link_info->req_link_speed != link_info->force_link_speed)
10058 update_link = true;
10059 else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
10060 link_info->req_link_speed != link_info->force_pam4_link_speed)
c0c050c5 10061 update_link = true;
de73018f
MC
10062 if (link_info->req_duplex != link_info->duplex_setting)
10063 update_link = true;
c0c050c5
MC
10064 } else {
10065 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
10066 update_link = true;
d058426e
EP
10067 if (link_info->advertising != link_info->auto_link_speeds ||
10068 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
c0c050c5 10069 update_link = true;
c0c050c5
MC
10070 }
10071
16d663a6
MC
10072 /* The last close may have shutdown the link, so need to call
10073 * PHY_CFG to bring it back up.
10074 */
83d8f5e9 10075 if (!bp->link_info.link_up)
16d663a6
MC
10076 update_link = true;
10077
939f7f0c
MC
10078 if (!bnxt_eee_config_ok(bp))
10079 update_eee = true;
10080
c0c050c5 10081 if (update_link)
939f7f0c 10082 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
c0c050c5
MC
10083 else if (update_pause)
10084 rc = bnxt_hwrm_set_pause(bp);
10085 if (rc) {
10086 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
10087 rc);
10088 return rc;
10089 }
10090
10091 return rc;
10092}
10093
11809490
JH
10094/* Common routine to pre-map certain register block to different GRC window.
10095 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
10096 * in PF and 3 windows in VF that can be customized to map in different
10097 * register blocks.
10098 */
10099static void bnxt_preset_reg_win(struct bnxt *bp)
10100{
10101 if (BNXT_PF(bp)) {
10102 /* CAG registers map to GRC window #4 */
10103 writel(BNXT_CAG_REG_BASE,
10104 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
10105 }
10106}
10107
47558acd
MC
10108static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
10109
6882c36c
EP
10110static int bnxt_reinit_after_abort(struct bnxt *bp)
10111{
10112 int rc;
10113
10114 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10115 return -EBUSY;
10116
d20cd745
VV
10117 if (bp->dev->reg_state == NETREG_UNREGISTERED)
10118 return -ENODEV;
10119
6882c36c
EP
10120 rc = bnxt_fw_init_one(bp);
10121 if (!rc) {
10122 bnxt_clear_int_mode(bp);
10123 rc = bnxt_init_int_mode(bp);
10124 if (!rc) {
10125 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10126 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10127 }
10128 }
10129 return rc;
10130}
10131
c0c050c5
MC
10132static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10133{
10134 int rc = 0;
10135
11809490 10136 bnxt_preset_reg_win(bp);
c0c050c5
MC
10137 netif_carrier_off(bp->dev);
10138 if (irq_re_init) {
47558acd
MC
10139 /* Reserve rings now if none were reserved at driver probe. */
10140 rc = bnxt_init_dflt_ring_mode(bp);
10141 if (rc) {
10142 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
10143 return rc;
10144 }
c0c050c5 10145 }
1b3f0b75 10146 rc = bnxt_reserve_rings(bp, irq_re_init);
41e8d798
MC
10147 if (rc)
10148 return rc;
c0c050c5
MC
10149 if ((bp->flags & BNXT_FLAG_RFS) &&
10150 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
10151 /* disable RFS if falling back to INTA */
10152 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
10153 bp->flags &= ~BNXT_FLAG_RFS;
10154 }
10155
10156 rc = bnxt_alloc_mem(bp, irq_re_init);
10157 if (rc) {
10158 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10159 goto open_err_free_mem;
10160 }
10161
10162 if (irq_re_init) {
10163 bnxt_init_napi(bp);
10164 rc = bnxt_request_irq(bp);
10165 if (rc) {
10166 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
c58387ab 10167 goto open_err_irq;
c0c050c5
MC
10168 }
10169 }
10170
c0c050c5
MC
10171 rc = bnxt_init_nic(bp, irq_re_init);
10172 if (rc) {
10173 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
96ecdcc9 10174 goto open_err_irq;
c0c050c5
MC
10175 }
10176
96ecdcc9
JK
10177 bnxt_enable_napi(bp);
10178 bnxt_debug_dev_init(bp);
10179
c0c050c5 10180 if (link_re_init) {
e2dc9b6e 10181 mutex_lock(&bp->link_lock);
c0c050c5 10182 rc = bnxt_update_phy_setting(bp);
e2dc9b6e 10183 mutex_unlock(&bp->link_lock);
a1ef4a79 10184 if (rc) {
ba41d46f 10185 netdev_warn(bp->dev, "failed to update phy settings\n");
a1ef4a79
MC
10186 if (BNXT_SINGLE_PF(bp)) {
10187 bp->link_info.phy_retry = true;
10188 bp->link_info.phy_retry_expires =
10189 jiffies + 5 * HZ;
10190 }
10191 }
c0c050c5
MC
10192 }
10193
7cdd5fc3 10194 if (irq_re_init)
442a35a5 10195 udp_tunnel_nic_reset_ntf(bp->dev);
c0c050c5 10196
caefe526 10197 set_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
10198 bnxt_enable_int(bp);
10199 /* Enable TX queues */
10200 bnxt_tx_enable(bp);
10201 mod_timer(&bp->timer, jiffies + bp->current_interval);
10289bec 10202 /* Poll link status and check for SFP+ module status */
3c10ed49 10203 mutex_lock(&bp->link_lock);
10289bec 10204 bnxt_get_port_module_status(bp);
3c10ed49 10205 mutex_unlock(&bp->link_lock);
c0c050c5 10206
ee5c7fb3
SP
10207 /* VF-reps may need to be re-opened after the PF is re-opened */
10208 if (BNXT_PF(bp))
10209 bnxt_vf_reps_open(bp);
c0c050c5
MC
10210 return 0;
10211
c58387ab 10212open_err_irq:
c0c050c5
MC
10213 bnxt_del_napi(bp);
10214
10215open_err_free_mem:
10216 bnxt_free_skbs(bp);
10217 bnxt_free_irq(bp);
10218 bnxt_free_mem(bp, true);
10219 return rc;
10220}
10221
10222/* rtnl_lock held */
10223int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10224{
10225 int rc = 0;
10226
a1301f08
MC
10227 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
10228 rc = -EIO;
10229 if (!rc)
10230 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
c0c050c5
MC
10231 if (rc) {
10232 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
10233 dev_close(bp->dev);
10234 }
10235 return rc;
10236}
10237
f7dc1ea6
MC
10238/* rtnl_lock held, open the NIC half way by allocating all resources, but
10239 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
10240 * self tests.
10241 */
10242int bnxt_half_open_nic(struct bnxt *bp)
10243{
10244 int rc = 0;
10245
11a39259
SK
10246 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10247 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
10248 rc = -ENODEV;
10249 goto half_open_err;
10250 }
10251
f7dc1ea6
MC
10252 rc = bnxt_alloc_mem(bp, false);
10253 if (rc) {
10254 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10255 goto half_open_err;
10256 }
10257 rc = bnxt_init_nic(bp, false);
10258 if (rc) {
10259 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10260 goto half_open_err;
10261 }
10262 return 0;
10263
10264half_open_err:
10265 bnxt_free_skbs(bp);
10266 bnxt_free_mem(bp, false);
10267 dev_close(bp->dev);
10268 return rc;
10269}
10270
10271/* rtnl_lock held, this call can only be made after a previous successful
10272 * call to bnxt_half_open_nic().
10273 */
10274void bnxt_half_close_nic(struct bnxt *bp)
10275{
10276 bnxt_hwrm_resource_free(bp, false, false);
10277 bnxt_free_skbs(bp);
10278 bnxt_free_mem(bp, false);
10279}
10280
c16d4ee0
MC
10281static void bnxt_reenable_sriov(struct bnxt *bp)
10282{
10283 if (BNXT_PF(bp)) {
10284 struct bnxt_pf_info *pf = &bp->pf;
10285 int n = pf->active_vfs;
10286
10287 if (n)
10288 bnxt_cfg_hw_sriov(bp, &n, true);
10289 }
10290}
10291
c0c050c5
MC
10292static int bnxt_open(struct net_device *dev)
10293{
10294 struct bnxt *bp = netdev_priv(dev);
25e1acd6 10295 int rc;
c0c050c5 10296
ec5d31e3 10297 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
6882c36c
EP
10298 rc = bnxt_reinit_after_abort(bp);
10299 if (rc) {
10300 if (rc == -EBUSY)
10301 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
10302 else
10303 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
10304 return -ENODEV;
10305 }
ec5d31e3
MC
10306 }
10307
10308 rc = bnxt_hwrm_if_change(bp, true);
25e1acd6 10309 if (rc)
ec5d31e3 10310 return rc;
d7859afb 10311
ec5d31e3
MC
10312 rc = __bnxt_open_nic(bp, true, true);
10313 if (rc) {
25e1acd6 10314 bnxt_hwrm_if_change(bp, false);
ec5d31e3 10315 } else {
f3a6d206 10316 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
12de2ead 10317 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
f3a6d206 10318 bnxt_ulp_start(bp, 0);
12de2ead
MC
10319 bnxt_reenable_sriov(bp);
10320 }
ec5d31e3
MC
10321 }
10322 bnxt_hwmon_open(bp);
10323 }
cde49a42 10324
25e1acd6 10325 return rc;
c0c050c5
MC
10326}
10327
f9b76ebd
MC
10328static bool bnxt_drv_busy(struct bnxt *bp)
10329{
10330 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
10331 test_bit(BNXT_STATE_READ_STATS, &bp->state));
10332}
10333
b8875ca3
MC
10334static void bnxt_get_ring_stats(struct bnxt *bp,
10335 struct rtnl_link_stats64 *stats);
10336
86e953db
MC
10337static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
10338 bool link_re_init)
c0c050c5 10339{
ee5c7fb3
SP
10340 /* Close the VF-reps before closing PF */
10341 if (BNXT_PF(bp))
10342 bnxt_vf_reps_close(bp);
86e953db 10343
c0c050c5
MC
10344 /* Change device state to avoid TX queue wake up's */
10345 bnxt_tx_disable(bp);
10346
caefe526 10347 clear_bit(BNXT_STATE_OPEN, &bp->state);
4cebdcec 10348 smp_mb__after_atomic();
f9b76ebd 10349 while (bnxt_drv_busy(bp))
4cebdcec 10350 msleep(20);
c0c050c5 10351
9d8bc097 10352 /* Flush rings and and disable interrupts */
c0c050c5
MC
10353 bnxt_shutdown_nic(bp, irq_re_init);
10354
10355 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
10356
cabfb09d 10357 bnxt_debug_dev_exit(bp);
c0c050c5 10358 bnxt_disable_napi(bp);
c0c050c5
MC
10359 del_timer_sync(&bp->timer);
10360 bnxt_free_skbs(bp);
10361
b8875ca3 10362 /* Save ring stats before shutdown */
b8056e84 10363 if (bp->bnapi && irq_re_init)
b8875ca3 10364 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
c0c050c5
MC
10365 if (irq_re_init) {
10366 bnxt_free_irq(bp);
10367 bnxt_del_napi(bp);
10368 }
10369 bnxt_free_mem(bp, irq_re_init);
86e953db
MC
10370}
10371
10372int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10373{
10374 int rc = 0;
10375
3bc7d4a3
MC
10376 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10377 /* If we get here, it means firmware reset is in progress
10378 * while we are trying to close. We can safely proceed with
10379 * the close because we are holding rtnl_lock(). Some firmware
10380 * messages may fail as we proceed to close. We set the
10381 * ABORT_ERR flag here so that the FW reset thread will later
10382 * abort when it gets the rtnl_lock() and sees the flag.
10383 */
10384 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10385 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10386 }
10387
86e953db
MC
10388#ifdef CONFIG_BNXT_SRIOV
10389 if (bp->sriov_cfg) {
10390 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10391 !bp->sriov_cfg,
10392 BNXT_SRIOV_CFG_WAIT_TMO);
10393 if (rc)
10394 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
10395 }
10396#endif
10397 __bnxt_close_nic(bp, irq_re_init, link_re_init);
c0c050c5
MC
10398 return rc;
10399}
10400
10401static int bnxt_close(struct net_device *dev)
10402{
10403 struct bnxt *bp = netdev_priv(dev);
10404
cde49a42 10405 bnxt_hwmon_close(bp);
c0c050c5 10406 bnxt_close_nic(bp, true, true);
33f7d55f 10407 bnxt_hwrm_shutdown_link(bp);
25e1acd6 10408 bnxt_hwrm_if_change(bp, false);
c0c050c5
MC
10409 return 0;
10410}
10411
0ca12be9
VV
10412static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10413 u16 *val)
10414{
bbf33d1d
EP
10415 struct hwrm_port_phy_mdio_read_output *resp;
10416 struct hwrm_port_phy_mdio_read_input *req;
0ca12be9
VV
10417 int rc;
10418
10419 if (bp->hwrm_spec_code < 0x10a00)
10420 return -EOPNOTSUPP;
10421
bbf33d1d
EP
10422 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
10423 if (rc)
10424 return rc;
10425
10426 req->port_id = cpu_to_le16(bp->pf.port_id);
10427 req->phy_addr = phy_addr;
10428 req->reg_addr = cpu_to_le16(reg & 0x1f);
2730214d 10429 if (mdio_phy_id_is_c45(phy_addr)) {
bbf33d1d
EP
10430 req->cl45_mdio = 1;
10431 req->phy_addr = mdio_phy_id_prtad(phy_addr);
10432 req->dev_addr = mdio_phy_id_devad(phy_addr);
10433 req->reg_addr = cpu_to_le16(reg);
0ca12be9
VV
10434 }
10435
bbf33d1d
EP
10436 resp = hwrm_req_hold(bp, req);
10437 rc = hwrm_req_send(bp, req);
0ca12be9
VV
10438 if (!rc)
10439 *val = le16_to_cpu(resp->reg_data);
bbf33d1d 10440 hwrm_req_drop(bp, req);
0ca12be9
VV
10441 return rc;
10442}
10443
10444static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10445 u16 val)
10446{
bbf33d1d
EP
10447 struct hwrm_port_phy_mdio_write_input *req;
10448 int rc;
0ca12be9
VV
10449
10450 if (bp->hwrm_spec_code < 0x10a00)
10451 return -EOPNOTSUPP;
10452
bbf33d1d
EP
10453 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
10454 if (rc)
10455 return rc;
10456
10457 req->port_id = cpu_to_le16(bp->pf.port_id);
10458 req->phy_addr = phy_addr;
10459 req->reg_addr = cpu_to_le16(reg & 0x1f);
2730214d 10460 if (mdio_phy_id_is_c45(phy_addr)) {
bbf33d1d
EP
10461 req->cl45_mdio = 1;
10462 req->phy_addr = mdio_phy_id_prtad(phy_addr);
10463 req->dev_addr = mdio_phy_id_devad(phy_addr);
10464 req->reg_addr = cpu_to_le16(reg);
0ca12be9 10465 }
bbf33d1d 10466 req->reg_data = cpu_to_le16(val);
0ca12be9 10467
bbf33d1d 10468 return hwrm_req_send(bp, req);
0ca12be9
VV
10469}
10470
c0c050c5
MC
10471/* rtnl_lock held */
10472static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10473{
0ca12be9
VV
10474 struct mii_ioctl_data *mdio = if_mii(ifr);
10475 struct bnxt *bp = netdev_priv(dev);
10476 int rc;
10477
c0c050c5
MC
10478 switch (cmd) {
10479 case SIOCGMIIPHY:
0ca12be9
VV
10480 mdio->phy_id = bp->link_info.phy_addr;
10481
df561f66 10482 fallthrough;
c0c050c5 10483 case SIOCGMIIREG: {
0ca12be9
VV
10484 u16 mii_regval = 0;
10485
c0c050c5
MC
10486 if (!netif_running(dev))
10487 return -EAGAIN;
10488
0ca12be9
VV
10489 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10490 &mii_regval);
10491 mdio->val_out = mii_regval;
10492 return rc;
c0c050c5
MC
10493 }
10494
10495 case SIOCSMIIREG:
10496 if (!netif_running(dev))
10497 return -EAGAIN;
10498
0ca12be9
VV
10499 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10500 mdio->val_in);
c0c050c5 10501
118612d5
MC
10502 case SIOCSHWTSTAMP:
10503 return bnxt_hwtstamp_set(dev, ifr);
10504
10505 case SIOCGHWTSTAMP:
10506 return bnxt_hwtstamp_get(dev, ifr);
10507
c0c050c5
MC
10508 default:
10509 /* do nothing */
10510 break;
10511 }
10512 return -EOPNOTSUPP;
10513}
10514
b8875ca3
MC
10515static void bnxt_get_ring_stats(struct bnxt *bp,
10516 struct rtnl_link_stats64 *stats)
c0c050c5 10517{
b8875ca3 10518 int i;
c0c050c5 10519
c0c050c5
MC
10520 for (i = 0; i < bp->cp_nr_rings; i++) {
10521 struct bnxt_napi *bnapi = bp->bnapi[i];
10522 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
a0c30621 10523 u64 *sw = cpr->stats.sw_stats;
c0c050c5 10524
a0c30621
MC
10525 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
10526 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10527 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
c0c050c5 10528
a0c30621
MC
10529 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
10530 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
10531 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
c0c050c5 10532
a0c30621
MC
10533 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
10534 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
10535 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
c0c050c5 10536
a0c30621
MC
10537 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
10538 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
10539 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
c0c050c5
MC
10540
10541 stats->rx_missed_errors +=
a0c30621 10542 BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
c0c050c5 10543
a0c30621 10544 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
c0c050c5 10545
a0c30621 10546 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
40bedf7c 10547
907fd4a2
JK
10548 stats->rx_dropped +=
10549 cpr->sw_stats.rx.rx_netpoll_discards +
10550 cpr->sw_stats.rx.rx_oom_discards;
c0c050c5 10551 }
b8875ca3
MC
10552}
10553
10554static void bnxt_add_prev_stats(struct bnxt *bp,
10555 struct rtnl_link_stats64 *stats)
10556{
10557 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10558
10559 stats->rx_packets += prev_stats->rx_packets;
10560 stats->tx_packets += prev_stats->tx_packets;
10561 stats->rx_bytes += prev_stats->rx_bytes;
10562 stats->tx_bytes += prev_stats->tx_bytes;
10563 stats->rx_missed_errors += prev_stats->rx_missed_errors;
10564 stats->multicast += prev_stats->multicast;
40bedf7c 10565 stats->rx_dropped += prev_stats->rx_dropped;
b8875ca3
MC
10566 stats->tx_dropped += prev_stats->tx_dropped;
10567}
10568
10569static void
10570bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
10571{
10572 struct bnxt *bp = netdev_priv(dev);
10573
10574 set_bit(BNXT_STATE_READ_STATS, &bp->state);
10575 /* Make sure bnxt_close_nic() sees that we are reading stats before
10576 * we check the BNXT_STATE_OPEN flag.
10577 */
10578 smp_mb__after_atomic();
10579 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10580 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10581 *stats = bp->net_stats_prev;
10582 return;
10583 }
10584
10585 bnxt_get_ring_stats(bp, stats);
10586 bnxt_add_prev_stats(bp, stats);
c0c050c5 10587
9947f83f 10588 if (bp->flags & BNXT_FLAG_PORT_STATS) {
a0c30621
MC
10589 u64 *rx = bp->port_stats.sw_stats;
10590 u64 *tx = bp->port_stats.sw_stats +
10591 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10592
10593 stats->rx_crc_errors =
10594 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
10595 stats->rx_frame_errors =
10596 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
10597 stats->rx_length_errors =
10598 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
10599 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
10600 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
10601 stats->rx_errors =
10602 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
10603 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
10604 stats->collisions =
10605 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
10606 stats->tx_fifo_errors =
10607 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
10608 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
9947f83f 10609 }
f9b76ebd 10610 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
c0c050c5
MC
10611}
10612
10613static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10614{
10615 struct net_device *dev = bp->dev;
10616 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10617 struct netdev_hw_addr *ha;
10618 u8 *haddr;
10619 int mc_count = 0;
10620 bool update = false;
10621 int off = 0;
10622
10623 netdev_for_each_mc_addr(ha, dev) {
10624 if (mc_count >= BNXT_MAX_MC_ADDRS) {
10625 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10626 vnic->mc_list_count = 0;
10627 return false;
10628 }
10629 haddr = ha->addr;
10630 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
10631 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
10632 update = true;
10633 }
10634 off += ETH_ALEN;
10635 mc_count++;
10636 }
10637 if (mc_count)
10638 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
10639
10640 if (mc_count != vnic->mc_list_count) {
10641 vnic->mc_list_count = mc_count;
10642 update = true;
10643 }
10644 return update;
10645}
10646
10647static bool bnxt_uc_list_updated(struct bnxt *bp)
10648{
10649 struct net_device *dev = bp->dev;
10650 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10651 struct netdev_hw_addr *ha;
10652 int off = 0;
10653
10654 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
10655 return true;
10656
10657 netdev_for_each_uc_addr(ha, dev) {
10658 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
10659 return true;
10660
10661 off += ETH_ALEN;
10662 }
10663 return false;
10664}
10665
10666static void bnxt_set_rx_mode(struct net_device *dev)
10667{
10668 struct bnxt *bp = netdev_priv(dev);
268d0895 10669 struct bnxt_vnic_info *vnic;
c0c050c5
MC
10670 bool mc_update = false;
10671 bool uc_update;
268d0895 10672 u32 mask;
c0c050c5 10673
268d0895 10674 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
c0c050c5
MC
10675 return;
10676
268d0895
MC
10677 vnic = &bp->vnic_info[0];
10678 mask = vnic->rx_mask;
c0c050c5
MC
10679 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
10680 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
30e33848
MC
10681 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
10682 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
c0c050c5 10683
dd85fc0a 10684 if (dev->flags & IFF_PROMISC)
c0c050c5
MC
10685 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10686
10687 uc_update = bnxt_uc_list_updated(bp);
10688
30e33848
MC
10689 if (dev->flags & IFF_BROADCAST)
10690 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
c0c050c5
MC
10691 if (dev->flags & IFF_ALLMULTI) {
10692 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10693 vnic->mc_list_count = 0;
10694 } else {
10695 mc_update = bnxt_mc_list_updated(bp, &mask);
10696 }
10697
10698 if (mask != vnic->rx_mask || uc_update || mc_update) {
10699 vnic->rx_mask = mask;
10700
10701 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
c213eae8 10702 bnxt_queue_sp_work(bp);
c0c050c5
MC
10703 }
10704}
10705
b664f008 10706static int bnxt_cfg_rx_mode(struct bnxt *bp)
c0c050c5
MC
10707{
10708 struct net_device *dev = bp->dev;
10709 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
bbf33d1d 10710 struct hwrm_cfa_l2_filter_free_input *req;
c0c050c5
MC
10711 struct netdev_hw_addr *ha;
10712 int i, off = 0, rc;
10713 bool uc_update;
10714
10715 netif_addr_lock_bh(dev);
10716 uc_update = bnxt_uc_list_updated(bp);
10717 netif_addr_unlock_bh(dev);
10718
10719 if (!uc_update)
10720 goto skip_uc;
10721
bbf33d1d
EP
10722 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
10723 if (rc)
10724 return rc;
10725 hwrm_req_hold(bp, req);
c0c050c5 10726 for (i = 1; i < vnic->uc_filter_count; i++) {
bbf33d1d 10727 req->l2_filter_id = vnic->fw_l2_filter_id[i];
c0c050c5 10728
bbf33d1d 10729 rc = hwrm_req_send(bp, req);
c0c050c5 10730 }
bbf33d1d 10731 hwrm_req_drop(bp, req);
c0c050c5
MC
10732
10733 vnic->uc_filter_count = 1;
10734
10735 netif_addr_lock_bh(dev);
10736 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
10737 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10738 } else {
10739 netdev_for_each_uc_addr(ha, dev) {
10740 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
10741 off += ETH_ALEN;
10742 vnic->uc_filter_count++;
10743 }
10744 }
10745 netif_addr_unlock_bh(dev);
10746
10747 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
10748 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
10749 if (rc) {
10750 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
10751 rc);
10752 vnic->uc_filter_count = i;
b664f008 10753 return rc;
c0c050c5
MC
10754 }
10755 }
10756
10757skip_uc:
dd85fc0a
EP
10758 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
10759 !bnxt_promisc_ok(bp))
10760 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
c0c050c5 10761 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
b4e30e8e
MC
10762 if (rc && vnic->mc_list_count) {
10763 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
10764 rc);
10765 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10766 vnic->mc_list_count = 0;
10767 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10768 }
c0c050c5 10769 if (rc)
b4e30e8e 10770 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
c0c050c5 10771 rc);
b664f008
MC
10772
10773 return rc;
c0c050c5
MC
10774}
10775
2773dfb2
MC
10776static bool bnxt_can_reserve_rings(struct bnxt *bp)
10777{
10778#ifdef CONFIG_BNXT_SRIOV
f1ca94de 10779 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
2773dfb2
MC
10780 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10781
10782 /* No minimum rings were provisioned by the PF. Don't
10783 * reserve rings by default when device is down.
10784 */
10785 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
10786 return true;
10787
10788 if (!netif_running(bp->dev))
10789 return false;
10790 }
10791#endif
10792 return true;
10793}
10794
8079e8f1
MC
10795/* If the chip and firmware supports RFS */
10796static bool bnxt_rfs_supported(struct bnxt *bp)
10797{
e969ae5b 10798 if (bp->flags & BNXT_FLAG_CHIP_P5) {
41136ab3 10799 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
e969ae5b 10800 return true;
41e8d798 10801 return false;
e969ae5b 10802 }
976e52b7
MC
10803 /* 212 firmware is broken for aRFS */
10804 if (BNXT_FW_MAJ(bp) == 212)
10805 return false;
8079e8f1
MC
10806 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
10807 return true;
ae10ae74
MC
10808 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10809 return true;
8079e8f1
MC
10810 return false;
10811}
10812
10813/* If runtime conditions support RFS */
2bcfa6f6
MC
10814static bool bnxt_rfs_capable(struct bnxt *bp)
10815{
10816#ifdef CONFIG_RFS_ACCEL
8079e8f1 10817 int vnics, max_vnics, max_rss_ctxs;
2bcfa6f6 10818
41e8d798 10819 if (bp->flags & BNXT_FLAG_CHIP_P5)
ac33906c 10820 return bnxt_rfs_supported(bp);
2773dfb2 10821 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
2bcfa6f6
MC
10822 return false;
10823
10824 vnics = 1 + bp->rx_nr_rings;
8079e8f1
MC
10825 max_vnics = bnxt_get_max_func_vnics(bp);
10826 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
ae10ae74
MC
10827
10828 /* RSS contexts not a limiting factor */
10829 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10830 max_rss_ctxs = max_vnics;
8079e8f1 10831 if (vnics > max_vnics || vnics > max_rss_ctxs) {
6a1eef5b
MC
10832 if (bp->rx_nr_rings > 1)
10833 netdev_warn(bp->dev,
10834 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
10835 min(max_rss_ctxs - 1, max_vnics - 1));
2bcfa6f6 10836 return false;
a2304909 10837 }
2bcfa6f6 10838
f1ca94de 10839 if (!BNXT_NEW_RM(bp))
6a1eef5b
MC
10840 return true;
10841
10842 if (vnics == bp->hw_resc.resv_vnics)
10843 return true;
10844
780baad4 10845 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
6a1eef5b
MC
10846 if (vnics <= bp->hw_resc.resv_vnics)
10847 return true;
10848
10849 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
780baad4 10850 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
6a1eef5b 10851 return false;
2bcfa6f6
MC
10852#else
10853 return false;
10854#endif
10855}
10856
c0c050c5
MC
10857static netdev_features_t bnxt_fix_features(struct net_device *dev,
10858 netdev_features_t features)
10859{
2bcfa6f6 10860 struct bnxt *bp = netdev_priv(dev);
c72cb303 10861 netdev_features_t vlan_features;
2bcfa6f6 10862
a2304909 10863 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
2bcfa6f6 10864 features &= ~NETIF_F_NTUPLE;
5a9f6b23 10865
1054aee8
MC
10866 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10867 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10868
10869 if (!(features & NETIF_F_GRO))
10870 features &= ~NETIF_F_GRO_HW;
10871
10872 if (features & NETIF_F_GRO_HW)
10873 features &= ~NETIF_F_LRO;
10874
5a9f6b23
MC
10875 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
10876 * turned on or off together.
10877 */
a196e96b
EP
10878 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
10879 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
10880 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10881 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
c72cb303 10882 else if (vlan_features)
a196e96b 10883 features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
5a9f6b23 10884 }
cf6645f8 10885#ifdef CONFIG_BNXT_SRIOV
a196e96b
EP
10886 if (BNXT_VF(bp) && bp->vf.vlan)
10887 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
cf6645f8 10888#endif
c0c050c5
MC
10889 return features;
10890}
10891
10892static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
10893{
10894 struct bnxt *bp = netdev_priv(dev);
10895 u32 flags = bp->flags;
10896 u32 changes;
10897 int rc = 0;
10898 bool re_init = false;
10899 bool update_tpa = false;
10900
10901 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
1054aee8 10902 if (features & NETIF_F_GRO_HW)
c0c050c5 10903 flags |= BNXT_FLAG_GRO;
1054aee8 10904 else if (features & NETIF_F_LRO)
c0c050c5
MC
10905 flags |= BNXT_FLAG_LRO;
10906
bdbd1eb5
MC
10907 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10908 flags &= ~BNXT_FLAG_TPA;
10909
a196e96b 10910 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
c0c050c5
MC
10911 flags |= BNXT_FLAG_STRIP_VLAN;
10912
10913 if (features & NETIF_F_NTUPLE)
10914 flags |= BNXT_FLAG_RFS;
10915
10916 changes = flags ^ bp->flags;
10917 if (changes & BNXT_FLAG_TPA) {
10918 update_tpa = true;
10919 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
f45b7b78
MC
10920 (flags & BNXT_FLAG_TPA) == 0 ||
10921 (bp->flags & BNXT_FLAG_CHIP_P5))
c0c050c5
MC
10922 re_init = true;
10923 }
10924
10925 if (changes & ~BNXT_FLAG_TPA)
10926 re_init = true;
10927
10928 if (flags != bp->flags) {
10929 u32 old_flags = bp->flags;
10930
2bcfa6f6 10931 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
f45b7b78 10932 bp->flags = flags;
c0c050c5
MC
10933 if (update_tpa)
10934 bnxt_set_ring_params(bp);
10935 return rc;
10936 }
10937
10938 if (re_init) {
10939 bnxt_close_nic(bp, false, false);
f45b7b78 10940 bp->flags = flags;
c0c050c5
MC
10941 if (update_tpa)
10942 bnxt_set_ring_params(bp);
10943
10944 return bnxt_open_nic(bp, false, false);
10945 }
10946 if (update_tpa) {
f45b7b78 10947 bp->flags = flags;
c0c050c5
MC
10948 rc = bnxt_set_tpa(bp,
10949 (flags & BNXT_FLAG_TPA) ?
10950 true : false);
10951 if (rc)
10952 bp->flags = old_flags;
10953 }
10954 }
10955 return rc;
10956}
10957
aa473d6c
MC
10958static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
10959 u8 **nextp)
10960{
10961 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
10962 int hdr_count = 0;
10963 u8 *nexthdr;
10964 int start;
10965
10966 /* Check that there are at most 2 IPv6 extension headers, no
10967 * fragment header, and each is <= 64 bytes.
10968 */
10969 start = nw_off + sizeof(*ip6h);
10970 nexthdr = &ip6h->nexthdr;
10971 while (ipv6_ext_hdr(*nexthdr)) {
10972 struct ipv6_opt_hdr *hp;
10973 int hdrlen;
10974
10975 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
10976 *nexthdr == NEXTHDR_FRAGMENT)
10977 return false;
10978 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
10979 skb_headlen(skb), NULL);
10980 if (!hp)
10981 return false;
10982 if (*nexthdr == NEXTHDR_AUTH)
10983 hdrlen = ipv6_authlen(hp);
10984 else
10985 hdrlen = ipv6_optlen(hp);
10986
10987 if (hdrlen > 64)
10988 return false;
10989 nexthdr = &hp->nexthdr;
10990 start += hdrlen;
10991 hdr_count++;
10992 }
10993 if (nextp) {
10994 /* Caller will check inner protocol */
10995 if (skb->encapsulation) {
10996 *nextp = nexthdr;
10997 return true;
10998 }
10999 *nextp = NULL;
11000 }
11001 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
11002 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
11003}
11004
11005/* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
11006static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
11007{
11008 struct udphdr *uh = udp_hdr(skb);
11009 __be16 udp_port = uh->dest;
11010
11011 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
11012 return false;
11013 if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
11014 struct ethhdr *eh = inner_eth_hdr(skb);
11015
11016 switch (eh->h_proto) {
11017 case htons(ETH_P_IP):
11018 return true;
11019 case htons(ETH_P_IPV6):
11020 return bnxt_exthdr_check(bp, skb,
11021 skb_inner_network_offset(skb),
11022 NULL);
11023 }
11024 }
11025 return false;
11026}
11027
11028static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
11029{
11030 switch (l4_proto) {
11031 case IPPROTO_UDP:
11032 return bnxt_udp_tunl_check(bp, skb);
11033 case IPPROTO_IPIP:
11034 return true;
11035 case IPPROTO_GRE: {
11036 switch (skb->inner_protocol) {
11037 default:
11038 return false;
11039 case htons(ETH_P_IP):
11040 return true;
11041 case htons(ETH_P_IPV6):
11042 fallthrough;
11043 }
11044 }
11045 case IPPROTO_IPV6:
11046 /* Check ext headers of inner ipv6 */
11047 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
11048 NULL);
11049 }
11050 return false;
11051}
11052
1698d600
MC
11053static netdev_features_t bnxt_features_check(struct sk_buff *skb,
11054 struct net_device *dev,
11055 netdev_features_t features)
11056{
aa473d6c
MC
11057 struct bnxt *bp = netdev_priv(dev);
11058 u8 *l4_proto;
1698d600
MC
11059
11060 features = vlan_features_check(skb, features);
1698d600
MC
11061 switch (vlan_get_protocol(skb)) {
11062 case htons(ETH_P_IP):
aa473d6c
MC
11063 if (!skb->encapsulation)
11064 return features;
11065 l4_proto = &ip_hdr(skb)->protocol;
11066 if (bnxt_tunl_check(bp, skb, *l4_proto))
11067 return features;
1698d600
MC
11068 break;
11069 case htons(ETH_P_IPV6):
aa473d6c
MC
11070 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
11071 &l4_proto))
11072 break;
11073 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
11074 return features;
1698d600 11075 break;
1698d600 11076 }
1698d600
MC
11077 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11078}
11079
b5d600b0
VV
11080int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
11081 u32 *reg_buf)
11082{
bbf33d1d
EP
11083 struct hwrm_dbg_read_direct_output *resp;
11084 struct hwrm_dbg_read_direct_input *req;
b5d600b0
VV
11085 __le32 *dbg_reg_buf;
11086 dma_addr_t mapping;
11087 int rc, i;
11088
bbf33d1d
EP
11089 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
11090 if (rc)
11091 return rc;
11092
11093 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
11094 &mapping);
11095 if (!dbg_reg_buf) {
11096 rc = -ENOMEM;
11097 goto dbg_rd_reg_exit;
11098 }
11099
11100 req->host_dest_addr = cpu_to_le64(mapping);
11101
11102 resp = hwrm_req_hold(bp, req);
11103 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
11104 req->read_len32 = cpu_to_le32(num_words);
11105
11106 rc = hwrm_req_send(bp, req);
b5d600b0
VV
11107 if (rc || resp->error_code) {
11108 rc = -EIO;
11109 goto dbg_rd_reg_exit;
11110 }
11111 for (i = 0; i < num_words; i++)
11112 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
11113
11114dbg_rd_reg_exit:
bbf33d1d 11115 hwrm_req_drop(bp, req);
b5d600b0
VV
11116 return rc;
11117}
11118
ffd77621
MC
11119static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
11120 u32 ring_id, u32 *prod, u32 *cons)
11121{
bbf33d1d
EP
11122 struct hwrm_dbg_ring_info_get_output *resp;
11123 struct hwrm_dbg_ring_info_get_input *req;
ffd77621
MC
11124 int rc;
11125
bbf33d1d
EP
11126 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
11127 if (rc)
11128 return rc;
11129
11130 req->ring_type = ring_type;
11131 req->fw_ring_id = cpu_to_le32(ring_id);
11132 resp = hwrm_req_hold(bp, req);
11133 rc = hwrm_req_send(bp, req);
ffd77621
MC
11134 if (!rc) {
11135 *prod = le32_to_cpu(resp->producer_index);
11136 *cons = le32_to_cpu(resp->consumer_index);
11137 }
bbf33d1d 11138 hwrm_req_drop(bp, req);
ffd77621
MC
11139 return rc;
11140}
11141
9f554590
MC
11142static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
11143{
b6ab4b01 11144 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
9f554590
MC
11145 int i = bnapi->index;
11146
3b2b7d9d
MC
11147 if (!txr)
11148 return;
11149
9f554590
MC
11150 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
11151 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
11152 txr->tx_cons);
11153}
11154
11155static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
11156{
b6ab4b01 11157 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
9f554590
MC
11158 int i = bnapi->index;
11159
3b2b7d9d
MC
11160 if (!rxr)
11161 return;
11162
9f554590
MC
11163 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
11164 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
11165 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
11166 rxr->rx_sw_agg_prod);
11167}
11168
11169static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
11170{
11171 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
11172 int i = bnapi->index;
11173
11174 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
11175 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
11176}
11177
c0c050c5
MC
11178static void bnxt_dbg_dump_states(struct bnxt *bp)
11179{
11180 int i;
11181 struct bnxt_napi *bnapi;
c0c050c5
MC
11182
11183 for (i = 0; i < bp->cp_nr_rings; i++) {
11184 bnapi = bp->bnapi[i];
c0c050c5 11185 if (netif_msg_drv(bp)) {
9f554590
MC
11186 bnxt_dump_tx_sw_state(bnapi);
11187 bnxt_dump_rx_sw_state(bnapi);
11188 bnxt_dump_cp_sw_state(bnapi);
c0c050c5
MC
11189 }
11190 }
11191}
11192
8fbf58e1
MC
11193static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
11194{
11195 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
bbf33d1d 11196 struct hwrm_ring_reset_input *req;
8fbf58e1
MC
11197 struct bnxt_napi *bnapi = rxr->bnapi;
11198 struct bnxt_cp_ring_info *cpr;
11199 u16 cp_ring_id;
bbf33d1d
EP
11200 int rc;
11201
11202 rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
11203 if (rc)
11204 return rc;
8fbf58e1
MC
11205
11206 cpr = &bnapi->cp_ring;
11207 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
bbf33d1d
EP
11208 req->cmpl_ring = cpu_to_le16(cp_ring_id);
11209 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
11210 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
11211 return hwrm_req_send_silent(bp, req);
8fbf58e1
MC
11212}
11213
6988bd92 11214static void bnxt_reset_task(struct bnxt *bp, bool silent)
c0c050c5 11215{
6988bd92
MC
11216 if (!silent)
11217 bnxt_dbg_dump_states(bp);
028de140 11218 if (netif_running(bp->dev)) {
b386cd36
MC
11219 int rc;
11220
aa46dfff
VV
11221 if (silent) {
11222 bnxt_close_nic(bp, false, false);
11223 bnxt_open_nic(bp, false, false);
11224 } else {
b386cd36 11225 bnxt_ulp_stop(bp);
aa46dfff
VV
11226 bnxt_close_nic(bp, true, false);
11227 rc = bnxt_open_nic(bp, true, false);
11228 bnxt_ulp_start(bp, rc);
11229 }
028de140 11230 }
c0c050c5
MC
11231}
11232
0290bd29 11233static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
c0c050c5
MC
11234{
11235 struct bnxt *bp = netdev_priv(dev);
11236
11237 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
11238 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
c213eae8 11239 bnxt_queue_sp_work(bp);
c0c050c5
MC
11240}
11241
acfb50e4
VV
11242static void bnxt_fw_health_check(struct bnxt *bp)
11243{
11244 struct bnxt_fw_health *fw_health = bp->fw_health;
11245 u32 val;
11246
0797c10d 11247 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
acfb50e4
VV
11248 return;
11249
11250 if (fw_health->tmr_counter) {
11251 fw_health->tmr_counter--;
11252 return;
11253 }
11254
11255 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11256 if (val == fw_health->last_fw_heartbeat)
11257 goto fw_reset;
11258
11259 fw_health->last_fw_heartbeat = val;
11260
11261 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11262 if (val != fw_health->last_fw_reset_cnt)
11263 goto fw_reset;
11264
11265 fw_health->tmr_counter = fw_health->tmr_multiplier;
11266 return;
11267
11268fw_reset:
11269 set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
11270 bnxt_queue_sp_work(bp);
11271}
11272
e99e88a9 11273static void bnxt_timer(struct timer_list *t)
c0c050c5 11274{
e99e88a9 11275 struct bnxt *bp = from_timer(bp, t, timer);
c0c050c5
MC
11276 struct net_device *dev = bp->dev;
11277
e0009404 11278 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
c0c050c5
MC
11279 return;
11280
11281 if (atomic_read(&bp->intr_sem) != 0)
11282 goto bnxt_restart_timer;
11283
acfb50e4
VV
11284 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
11285 bnxt_fw_health_check(bp);
11286
fea6b333 11287 if (bp->link_info.link_up && bp->stats_coal_ticks) {
3bdf56c4 11288 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
c213eae8 11289 bnxt_queue_sp_work(bp);
3bdf56c4 11290 }
5a84acbe
SP
11291
11292 if (bnxt_tc_flower_enabled(bp)) {
11293 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
11294 bnxt_queue_sp_work(bp);
11295 }
a1ef4a79 11296
87d67f59
PC
11297#ifdef CONFIG_RFS_ACCEL
11298 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
11299 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11300 bnxt_queue_sp_work(bp);
11301 }
11302#endif /*CONFIG_RFS_ACCEL*/
11303
a1ef4a79
MC
11304 if (bp->link_info.phy_retry) {
11305 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
acda6180 11306 bp->link_info.phy_retry = false;
a1ef4a79
MC
11307 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
11308 } else {
11309 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
11310 bnxt_queue_sp_work(bp);
11311 }
11312 }
ffd77621 11313
5313845f
MC
11314 if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
11315 netif_carrier_ok(dev)) {
ffd77621
MC
11316 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
11317 bnxt_queue_sp_work(bp);
11318 }
c0c050c5
MC
11319bnxt_restart_timer:
11320 mod_timer(&bp->timer, jiffies + bp->current_interval);
11321}
11322
a551ee94 11323static void bnxt_rtnl_lock_sp(struct bnxt *bp)
6988bd92 11324{
a551ee94
MC
11325 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
11326 * set. If the device is being closed, bnxt_close() may be holding
6988bd92
MC
11327 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
11328 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
11329 */
11330 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11331 rtnl_lock();
a551ee94
MC
11332}
11333
11334static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
11335{
6988bd92
MC
11336 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11337 rtnl_unlock();
11338}
11339
a551ee94
MC
11340/* Only called from bnxt_sp_task() */
11341static void bnxt_reset(struct bnxt *bp, bool silent)
11342{
11343 bnxt_rtnl_lock_sp(bp);
11344 if (test_bit(BNXT_STATE_OPEN, &bp->state))
11345 bnxt_reset_task(bp, silent);
11346 bnxt_rtnl_unlock_sp(bp);
11347}
11348
8fbf58e1
MC
11349/* Only called from bnxt_sp_task() */
11350static void bnxt_rx_ring_reset(struct bnxt *bp)
11351{
11352 int i;
11353
11354 bnxt_rtnl_lock_sp(bp);
11355 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11356 bnxt_rtnl_unlock_sp(bp);
11357 return;
11358 }
11359 /* Disable and flush TPA before resetting the RX ring */
11360 if (bp->flags & BNXT_FLAG_TPA)
11361 bnxt_set_tpa(bp, false);
11362 for (i = 0; i < bp->rx_nr_rings; i++) {
11363 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
11364 struct bnxt_cp_ring_info *cpr;
11365 int rc;
11366
11367 if (!rxr->bnapi->in_reset)
11368 continue;
11369
11370 rc = bnxt_hwrm_rx_ring_reset(bp, i);
11371 if (rc) {
11372 if (rc == -EINVAL || rc == -EOPNOTSUPP)
11373 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
11374 else
11375 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
11376 rc);
8fb35cd3 11377 bnxt_reset_task(bp, true);
8fbf58e1
MC
11378 break;
11379 }
11380 bnxt_free_one_rx_ring_skbs(bp, i);
11381 rxr->rx_prod = 0;
11382 rxr->rx_agg_prod = 0;
11383 rxr->rx_sw_agg_prod = 0;
11384 rxr->rx_next_cons = 0;
11385 rxr->bnapi->in_reset = false;
11386 bnxt_alloc_one_rx_ring(bp, i);
11387 cpr = &rxr->bnapi->cp_ring;
8a27d4b9 11388 cpr->sw_stats.rx.rx_resets++;
8fbf58e1
MC
11389 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11390 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
11391 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
11392 }
11393 if (bp->flags & BNXT_FLAG_TPA)
11394 bnxt_set_tpa(bp, true);
11395 bnxt_rtnl_unlock_sp(bp);
11396}
11397
230d1f0d
MC
11398static void bnxt_fw_reset_close(struct bnxt *bp)
11399{
f3a6d206 11400 bnxt_ulp_stop(bp);
4f036b2e
MC
11401 /* When firmware is in fatal state, quiesce device and disable
11402 * bus master to prevent any potential bad DMAs before freeing
11403 * kernel memory.
d4073028 11404 */
4f036b2e 11405 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
dab62e7c
MC
11406 u16 val = 0;
11407
11408 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
11409 if (val == 0xffff)
11410 bp->fw_reset_min_dsecs = 0;
4f036b2e
MC
11411 bnxt_tx_disable(bp);
11412 bnxt_disable_napi(bp);
11413 bnxt_disable_int_sync(bp);
11414 bnxt_free_irq(bp);
11415 bnxt_clear_int_mode(bp);
d4073028 11416 pci_disable_device(bp->pdev);
4f036b2e 11417 }
230d1f0d 11418 __bnxt_close_nic(bp, true, false);
ac797ced 11419 bnxt_vf_reps_free(bp);
230d1f0d
MC
11420 bnxt_clear_int_mode(bp);
11421 bnxt_hwrm_func_drv_unrgtr(bp);
d4073028
VV
11422 if (pci_is_enabled(bp->pdev))
11423 pci_disable_device(bp->pdev);
230d1f0d
MC
11424 bnxt_free_ctx_mem(bp);
11425 kfree(bp->ctx);
11426 bp->ctx = NULL;
11427}
11428
acfb50e4
VV
11429static bool is_bnxt_fw_ok(struct bnxt *bp)
11430{
11431 struct bnxt_fw_health *fw_health = bp->fw_health;
11432 bool no_heartbeat = false, has_reset = false;
11433 u32 val;
11434
11435 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11436 if (val == fw_health->last_fw_heartbeat)
11437 no_heartbeat = true;
11438
11439 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11440 if (val != fw_health->last_fw_reset_cnt)
11441 has_reset = true;
11442
11443 if (!no_heartbeat && has_reset)
11444 return true;
11445
11446 return false;
11447}
11448
d1db9e16
MC
11449/* rtnl_lock is acquired before calling this function */
11450static void bnxt_force_fw_reset(struct bnxt *bp)
11451{
11452 struct bnxt_fw_health *fw_health = bp->fw_health;
30e96f48 11453 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
d1db9e16
MC
11454 u32 wait_dsecs;
11455
11456 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
11457 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11458 return;
11459
30e96f48
MC
11460 if (ptp) {
11461 spin_lock_bh(&ptp->ptp_lock);
11462 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11463 spin_unlock_bh(&ptp->ptp_lock);
11464 } else {
11465 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11466 }
d1db9e16
MC
11467 bnxt_fw_reset_close(bp);
11468 wait_dsecs = fw_health->master_func_wait_dsecs;
11469 if (fw_health->master) {
11470 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
11471 wait_dsecs = 0;
11472 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11473 } else {
11474 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
11475 wait_dsecs = fw_health->normal_func_wait_dsecs;
11476 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11477 }
4037eb71
VV
11478
11479 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
d1db9e16
MC
11480 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
11481 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11482}
11483
11484void bnxt_fw_exception(struct bnxt *bp)
11485{
a2b31e27 11486 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
d1db9e16
MC
11487 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11488 bnxt_rtnl_lock_sp(bp);
11489 bnxt_force_fw_reset(bp);
11490 bnxt_rtnl_unlock_sp(bp);
11491}
11492
e72cb7d6
MC
11493/* Returns the number of registered VFs, or 1 if VF configuration is pending, or
11494 * < 0 on error.
11495 */
11496static int bnxt_get_registered_vfs(struct bnxt *bp)
230d1f0d 11497{
e72cb7d6 11498#ifdef CONFIG_BNXT_SRIOV
230d1f0d
MC
11499 int rc;
11500
e72cb7d6
MC
11501 if (!BNXT_PF(bp))
11502 return 0;
11503
11504 rc = bnxt_hwrm_func_qcfg(bp);
11505 if (rc) {
11506 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
11507 return rc;
11508 }
11509 if (bp->pf.registered_vfs)
11510 return bp->pf.registered_vfs;
11511 if (bp->sriov_cfg)
11512 return 1;
11513#endif
11514 return 0;
11515}
11516
11517void bnxt_fw_reset(struct bnxt *bp)
11518{
230d1f0d
MC
11519 bnxt_rtnl_lock_sp(bp);
11520 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11521 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
30e96f48 11522 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
4037eb71 11523 int n = 0, tmo;
e72cb7d6 11524
30e96f48
MC
11525 if (ptp) {
11526 spin_lock_bh(&ptp->ptp_lock);
11527 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11528 spin_unlock_bh(&ptp->ptp_lock);
11529 } else {
11530 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11531 }
e72cb7d6
MC
11532 if (bp->pf.active_vfs &&
11533 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11534 n = bnxt_get_registered_vfs(bp);
11535 if (n < 0) {
11536 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11537 n);
11538 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11539 dev_close(bp->dev);
11540 goto fw_reset_exit;
11541 } else if (n > 0) {
11542 u16 vf_tmo_dsecs = n * 10;
11543
11544 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11545 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11546 bp->fw_reset_state =
11547 BNXT_FW_RESET_STATE_POLL_VF;
11548 bnxt_queue_fw_reset_work(bp, HZ / 10);
11549 goto fw_reset_exit;
230d1f0d
MC
11550 }
11551 bnxt_fw_reset_close(bp);
4037eb71
VV
11552 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11553 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11554 tmo = HZ / 10;
11555 } else {
11556 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11557 tmo = bp->fw_reset_min_dsecs * HZ / 10;
11558 }
11559 bnxt_queue_fw_reset_work(bp, tmo);
230d1f0d
MC
11560 }
11561fw_reset_exit:
11562 bnxt_rtnl_unlock_sp(bp);
11563}
11564
ffd77621
MC
11565static void bnxt_chk_missed_irq(struct bnxt *bp)
11566{
11567 int i;
11568
11569 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11570 return;
11571
11572 for (i = 0; i < bp->cp_nr_rings; i++) {
11573 struct bnxt_napi *bnapi = bp->bnapi[i];
11574 struct bnxt_cp_ring_info *cpr;
11575 u32 fw_ring_id;
11576 int j;
11577
11578 if (!bnapi)
11579 continue;
11580
11581 cpr = &bnapi->cp_ring;
11582 for (j = 0; j < 2; j++) {
11583 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
11584 u32 val[2];
11585
11586 if (!cpr2 || cpr2->has_more_work ||
11587 !bnxt_has_work(bp, cpr2))
11588 continue;
11589
11590 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
11591 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
11592 continue;
11593 }
11594 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
11595 bnxt_dbg_hwrm_ring_info_get(bp,
11596 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
11597 fw_ring_id, &val[0], &val[1]);
9d8b5f05 11598 cpr->sw_stats.cmn.missed_irqs++;
ffd77621
MC
11599 }
11600 }
11601}
11602
c0c050c5
MC
11603static void bnxt_cfg_ntp_filters(struct bnxt *);
11604
8119e49b
MC
11605static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
11606{
11607 struct bnxt_link_info *link_info = &bp->link_info;
11608
11609 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
11610 link_info->autoneg = BNXT_AUTONEG_SPEED;
11611 if (bp->hwrm_spec_code >= 0x10201) {
11612 if (link_info->auto_pause_setting &
11613 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
11614 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11615 } else {
11616 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11617 }
11618 link_info->advertising = link_info->auto_link_speeds;
d058426e 11619 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
8119e49b
MC
11620 } else {
11621 link_info->req_link_speed = link_info->force_link_speed;
d058426e
EP
11622 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
11623 if (link_info->force_pam4_link_speed) {
11624 link_info->req_link_speed =
11625 link_info->force_pam4_link_speed;
11626 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
11627 }
8119e49b
MC
11628 link_info->req_duplex = link_info->duplex_setting;
11629 }
11630 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
11631 link_info->req_flow_ctrl =
11632 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
11633 else
11634 link_info->req_flow_ctrl = link_info->force_pause_setting;
11635}
11636
df97b34d
MC
11637static void bnxt_fw_echo_reply(struct bnxt *bp)
11638{
11639 struct bnxt_fw_health *fw_health = bp->fw_health;
bbf33d1d
EP
11640 struct hwrm_func_echo_response_input *req;
11641 int rc;
df97b34d 11642
bbf33d1d
EP
11643 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
11644 if (rc)
11645 return;
11646 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
11647 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
11648 hwrm_req_send(bp, req);
df97b34d
MC
11649}
11650
c0c050c5
MC
11651static void bnxt_sp_task(struct work_struct *work)
11652{
11653 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
c0c050c5 11654
4cebdcec
MC
11655 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11656 smp_mb__after_atomic();
11657 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11658 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5 11659 return;
4cebdcec 11660 }
c0c050c5
MC
11661
11662 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
11663 bnxt_cfg_rx_mode(bp);
11664
11665 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
11666 bnxt_cfg_ntp_filters(bp);
c0c050c5
MC
11667 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
11668 bnxt_hwrm_exec_fwd_req(bp);
00db3cba 11669 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
531d1d26
MC
11670 bnxt_hwrm_port_qstats(bp, 0);
11671 bnxt_hwrm_port_qstats_ext(bp, 0);
fea6b333 11672 bnxt_accumulate_all_stats(bp);
00db3cba 11673 }
3bdf56c4 11674
0eaa24b9 11675 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
e2dc9b6e 11676 int rc;
0eaa24b9 11677
e2dc9b6e 11678 mutex_lock(&bp->link_lock);
0eaa24b9
MC
11679 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
11680 &bp->sp_event))
11681 bnxt_hwrm_phy_qcaps(bp);
11682
e2dc9b6e 11683 rc = bnxt_update_link(bp, true);
0eaa24b9
MC
11684 if (rc)
11685 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
11686 rc);
ca0c7538
VV
11687
11688 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
11689 &bp->sp_event))
11690 bnxt_init_ethtool_link_settings(bp);
11691 mutex_unlock(&bp->link_lock);
0eaa24b9 11692 }
a1ef4a79
MC
11693 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
11694 int rc;
11695
11696 mutex_lock(&bp->link_lock);
11697 rc = bnxt_update_phy_setting(bp);
11698 mutex_unlock(&bp->link_lock);
11699 if (rc) {
11700 netdev_warn(bp->dev, "update phy settings retry failed\n");
11701 } else {
11702 bp->link_info.phy_retry = false;
11703 netdev_info(bp->dev, "update phy settings retry succeeded\n");
11704 }
11705 }
90c694bb 11706 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
e2dc9b6e
MC
11707 mutex_lock(&bp->link_lock);
11708 bnxt_get_port_module_status(bp);
11709 mutex_unlock(&bp->link_lock);
90c694bb 11710 }
5a84acbe
SP
11711
11712 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
11713 bnxt_tc_flow_stats_work(bp);
11714
ffd77621
MC
11715 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
11716 bnxt_chk_missed_irq(bp);
11717
df97b34d
MC
11718 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
11719 bnxt_fw_echo_reply(bp);
11720
e2dc9b6e
MC
11721 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
11722 * must be the last functions to be called before exiting.
11723 */
6988bd92
MC
11724 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
11725 bnxt_reset(bp, false);
4cebdcec 11726
fc0f1929
MC
11727 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
11728 bnxt_reset(bp, true);
11729
8fbf58e1
MC
11730 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
11731 bnxt_rx_ring_reset(bp);
11732
657a33c8
VV
11733 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
11734 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
11735
acfb50e4
VV
11736 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
11737 if (!is_bnxt_fw_ok(bp))
11738 bnxt_devlink_health_report(bp,
11739 BNXT_FW_EXCEPTION_SP_EVENT);
11740 }
11741
4cebdcec
MC
11742 smp_mb__before_atomic();
11743 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5
MC
11744}
11745
d1e7925e 11746/* Under rtnl_lock */
98fdbe73
MC
11747int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
11748 int tx_xdp)
d1e7925e
MC
11749{
11750 int max_rx, max_tx, tx_sets = 1;
780baad4 11751 int tx_rings_needed, stats;
8f23d638 11752 int rx_rings = rx;
6fc2ffdf 11753 int cp, vnics, rc;
d1e7925e 11754
d1e7925e
MC
11755 if (tcs)
11756 tx_sets = tcs;
11757
11758 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
11759 if (rc)
11760 return rc;
11761
11762 if (max_rx < rx)
11763 return -ENOMEM;
11764
5f449249 11765 tx_rings_needed = tx * tx_sets + tx_xdp;
d1e7925e
MC
11766 if (max_tx < tx_rings_needed)
11767 return -ENOMEM;
11768
6fc2ffdf 11769 vnics = 1;
9b3d15e6 11770 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
6fc2ffdf
EW
11771 vnics += rx_rings;
11772
8f23d638
MC
11773 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11774 rx_rings <<= 1;
11775 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
780baad4
VV
11776 stats = cp;
11777 if (BNXT_NEW_RM(bp)) {
11c3ec7b 11778 cp += bnxt_get_ulp_msix_num(bp);
780baad4
VV
11779 stats += bnxt_get_ulp_stat_ctxs(bp);
11780 }
6fc2ffdf 11781 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
780baad4 11782 stats, vnics);
d1e7925e
MC
11783}
11784
17086399
SP
11785static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
11786{
11787 if (bp->bar2) {
11788 pci_iounmap(pdev, bp->bar2);
11789 bp->bar2 = NULL;
11790 }
11791
11792 if (bp->bar1) {
11793 pci_iounmap(pdev, bp->bar1);
11794 bp->bar1 = NULL;
11795 }
11796
11797 if (bp->bar0) {
11798 pci_iounmap(pdev, bp->bar0);
11799 bp->bar0 = NULL;
11800 }
11801}
11802
11803static void bnxt_cleanup_pci(struct bnxt *bp)
11804{
11805 bnxt_unmap_bars(bp, bp->pdev);
11806 pci_release_regions(bp->pdev);
f6824308
VV
11807 if (pci_is_enabled(bp->pdev))
11808 pci_disable_device(bp->pdev);
17086399
SP
11809}
11810
18775aa8
MC
11811static void bnxt_init_dflt_coal(struct bnxt *bp)
11812{
11813 struct bnxt_coal *coal;
11814
11815 /* Tick values in micro seconds.
11816 * 1 coal_buf x bufs_per_record = 1 completion record.
11817 */
11818 coal = &bp->rx_coal;
0c2ff8d7 11819 coal->coal_ticks = 10;
18775aa8
MC
11820 coal->coal_bufs = 30;
11821 coal->coal_ticks_irq = 1;
11822 coal->coal_bufs_irq = 2;
05abe4dd 11823 coal->idle_thresh = 50;
18775aa8
MC
11824 coal->bufs_per_record = 2;
11825 coal->budget = 64; /* NAPI budget */
11826
11827 coal = &bp->tx_coal;
11828 coal->coal_ticks = 28;
11829 coal->coal_bufs = 30;
11830 coal->coal_ticks_irq = 2;
11831 coal->coal_bufs_irq = 2;
11832 coal->bufs_per_record = 1;
11833
11834 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
11835}
11836
7c380918
MC
11837static int bnxt_fw_init_one_p1(struct bnxt *bp)
11838{
11839 int rc;
11840
11841 bp->fw_cap = 0;
11842 rc = bnxt_hwrm_ver_get(bp);
ba02629f
EP
11843 bnxt_try_map_fw_health_reg(bp);
11844 if (rc) {
b187e4ba
EP
11845 rc = bnxt_try_recover_fw(bp);
11846 if (rc)
11847 return rc;
11848 rc = bnxt_hwrm_ver_get(bp);
87f7ab8d
EP
11849 if (rc)
11850 return rc;
ba02629f 11851 }
7c380918 11852
4933f675
VV
11853 bnxt_nvm_cfg_ver_get(bp);
11854
7c380918
MC
11855 rc = bnxt_hwrm_func_reset(bp);
11856 if (rc)
11857 return -ENODEV;
11858
11859 bnxt_hwrm_fw_set_time(bp);
11860 return 0;
11861}
11862
11863static int bnxt_fw_init_one_p2(struct bnxt *bp)
11864{
11865 int rc;
11866
11867 /* Get the MAX capabilities for this function */
11868 rc = bnxt_hwrm_func_qcaps(bp);
11869 if (rc) {
11870 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
11871 rc);
11872 return -ENODEV;
11873 }
11874
11875 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
11876 if (rc)
11877 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
11878 rc);
11879
3e9ec2bb
EP
11880 if (bnxt_alloc_fw_health(bp)) {
11881 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
11882 } else {
11883 rc = bnxt_hwrm_error_recovery_qcfg(bp);
11884 if (rc)
11885 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
11886 rc);
11887 }
07f83d72 11888
2e882468 11889 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
7c380918
MC
11890 if (rc)
11891 return -ENODEV;
11892
11893 bnxt_hwrm_func_qcfg(bp);
11894 bnxt_hwrm_vnic_qcaps(bp);
11895 bnxt_hwrm_port_led_qcaps(bp);
11896 bnxt_ethtool_init(bp);
11897 bnxt_dcb_init(bp);
11898 return 0;
11899}
11900
ba642ab7
MC
11901static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
11902{
11903 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
11904 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
11905 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
11906 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
11907 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
c66c06c5 11908 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
ba642ab7
MC
11909 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
11910 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
11911 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
11912 }
11913}
11914
11915static void bnxt_set_dflt_rfs(struct bnxt *bp)
11916{
11917 struct net_device *dev = bp->dev;
11918
11919 dev->hw_features &= ~NETIF_F_NTUPLE;
11920 dev->features &= ~NETIF_F_NTUPLE;
11921 bp->flags &= ~BNXT_FLAG_RFS;
11922 if (bnxt_rfs_supported(bp)) {
11923 dev->hw_features |= NETIF_F_NTUPLE;
11924 if (bnxt_rfs_capable(bp)) {
11925 bp->flags |= BNXT_FLAG_RFS;
11926 dev->features |= NETIF_F_NTUPLE;
11927 }
11928 }
11929}
11930
11931static void bnxt_fw_init_one_p3(struct bnxt *bp)
11932{
11933 struct pci_dev *pdev = bp->pdev;
11934
11935 bnxt_set_dflt_rss_hash_type(bp);
11936 bnxt_set_dflt_rfs(bp);
11937
11938 bnxt_get_wol_settings(bp);
11939 if (bp->flags & BNXT_FLAG_WOL_CAP)
11940 device_set_wakeup_enable(&pdev->dev, bp->wol);
11941 else
11942 device_set_wakeup_capable(&pdev->dev, false);
11943
11944 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
11945 bnxt_hwrm_coal_params_qcaps(bp);
11946}
11947
0afd6a4e
MC
11948static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
11949
ec5d31e3
MC
11950static int bnxt_fw_init_one(struct bnxt *bp)
11951{
11952 int rc;
11953
11954 rc = bnxt_fw_init_one_p1(bp);
11955 if (rc) {
11956 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
11957 return rc;
11958 }
11959 rc = bnxt_fw_init_one_p2(bp);
11960 if (rc) {
11961 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
11962 return rc;
11963 }
0afd6a4e
MC
11964 rc = bnxt_probe_phy(bp, false);
11965 if (rc)
11966 return rc;
ec5d31e3
MC
11967 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
11968 if (rc)
11969 return rc;
937f188c
VV
11970
11971 /* In case fw capabilities have changed, destroy the unneeded
11972 * reporters and create newly capable ones.
11973 */
11974 bnxt_dl_fw_reporters_destroy(bp, false);
11975 bnxt_dl_fw_reporters_create(bp);
ec5d31e3
MC
11976 bnxt_fw_init_one_p3(bp);
11977 return 0;
11978}
11979
cbb51067
MC
11980static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
11981{
11982 struct bnxt_fw_health *fw_health = bp->fw_health;
11983 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
11984 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
11985 u32 reg_type, reg_off, delay_msecs;
11986
11987 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
11988 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
11989 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
11990 switch (reg_type) {
11991 case BNXT_FW_HEALTH_REG_TYPE_CFG:
11992 pci_write_config_dword(bp->pdev, reg_off, val);
11993 break;
11994 case BNXT_FW_HEALTH_REG_TYPE_GRC:
11995 writel(reg_off & BNXT_GRC_BASE_MASK,
11996 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
11997 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
df561f66 11998 fallthrough;
cbb51067
MC
11999 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
12000 writel(val, bp->bar0 + reg_off);
12001 break;
12002 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
12003 writel(val, bp->bar1 + reg_off);
12004 break;
12005 }
12006 if (delay_msecs) {
12007 pci_read_config_dword(bp->pdev, 0, &val);
12008 msleep(delay_msecs);
12009 }
12010}
12011
12012static void bnxt_reset_all(struct bnxt *bp)
12013{
12014 struct bnxt_fw_health *fw_health = bp->fw_health;
e07ab202
VV
12015 int i, rc;
12016
12017 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
87f7ab8d 12018 bnxt_fw_reset_via_optee(bp);
e07ab202 12019 bp->fw_reset_timestamp = jiffies;
e07ab202
VV
12020 return;
12021 }
cbb51067
MC
12022
12023 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
12024 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
12025 bnxt_fw_reset_writel(bp, i);
12026 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
bbf33d1d
EP
12027 struct hwrm_fw_reset_input *req;
12028
12029 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
12030 if (!rc) {
12031 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
12032 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
12033 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
12034 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
12035 rc = hwrm_req_send(bp, req);
12036 }
a2f3835c 12037 if (rc != -ENODEV)
cbb51067
MC
12038 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
12039 }
12040 bp->fw_reset_timestamp = jiffies;
12041}
12042
339eeb4b
MC
12043static bool bnxt_fw_reset_timeout(struct bnxt *bp)
12044{
12045 return time_after(jiffies, bp->fw_reset_timestamp +
12046 (bp->fw_reset_max_dsecs * HZ / 10));
12047}
12048
3958b1da
SK
12049static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
12050{
12051 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12052 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
12053 bnxt_ulp_start(bp, rc);
12054 bnxt_dl_health_status_update(bp, false);
12055 }
12056 bp->fw_reset_state = 0;
12057 dev_close(bp->dev);
12058}
12059
230d1f0d
MC
12060static void bnxt_fw_reset_task(struct work_struct *work)
12061{
12062 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
3958b1da 12063 int rc = 0;
230d1f0d
MC
12064
12065 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12066 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
12067 return;
12068 }
12069
12070 switch (bp->fw_reset_state) {
e72cb7d6
MC
12071 case BNXT_FW_RESET_STATE_POLL_VF: {
12072 int n = bnxt_get_registered_vfs(bp);
4037eb71 12073 int tmo;
e72cb7d6
MC
12074
12075 if (n < 0) {
230d1f0d 12076 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
e72cb7d6 12077 n, jiffies_to_msecs(jiffies -
230d1f0d
MC
12078 bp->fw_reset_timestamp));
12079 goto fw_reset_abort;
e72cb7d6 12080 } else if (n > 0) {
339eeb4b 12081 if (bnxt_fw_reset_timeout(bp)) {
230d1f0d
MC
12082 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12083 bp->fw_reset_state = 0;
e72cb7d6
MC
12084 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
12085 n);
230d1f0d
MC
12086 return;
12087 }
12088 bnxt_queue_fw_reset_work(bp, HZ / 10);
12089 return;
12090 }
12091 bp->fw_reset_timestamp = jiffies;
12092 rtnl_lock();
6cd657cb 12093 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
3958b1da 12094 bnxt_fw_reset_abort(bp, rc);
6cd657cb 12095 rtnl_unlock();
3958b1da 12096 return;
6cd657cb 12097 }
230d1f0d 12098 bnxt_fw_reset_close(bp);
4037eb71
VV
12099 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12100 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
12101 tmo = HZ / 10;
12102 } else {
12103 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12104 tmo = bp->fw_reset_min_dsecs * HZ / 10;
12105 }
230d1f0d 12106 rtnl_unlock();
4037eb71 12107 bnxt_queue_fw_reset_work(bp, tmo);
230d1f0d 12108 return;
e72cb7d6 12109 }
4037eb71
VV
12110 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
12111 u32 val;
12112
12113 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12114 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
339eeb4b 12115 !bnxt_fw_reset_timeout(bp)) {
4037eb71
VV
12116 bnxt_queue_fw_reset_work(bp, HZ / 5);
12117 return;
12118 }
12119
12120 if (!bp->fw_health->master) {
12121 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
12122
12123 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12124 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
12125 return;
12126 }
12127 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
12128 }
df561f66 12129 fallthrough;
c6a9e7aa 12130 case BNXT_FW_RESET_STATE_RESET_FW:
cbb51067
MC
12131 bnxt_reset_all(bp);
12132 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
c6a9e7aa 12133 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
cbb51067 12134 return;
230d1f0d 12135 case BNXT_FW_RESET_STATE_ENABLE_DEV:
43a440c4 12136 bnxt_inv_fw_health_reg(bp);
bae8a003
VV
12137 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
12138 !bp->fw_reset_min_dsecs) {
12139 u16 val;
12140
12141 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
12142 if (val == 0xffff) {
12143 if (bnxt_fw_reset_timeout(bp)) {
12144 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
3958b1da 12145 rc = -ETIMEDOUT;
bae8a003 12146 goto fw_reset_abort;
dab62e7c 12147 }
bae8a003
VV
12148 bnxt_queue_fw_reset_work(bp, HZ / 1000);
12149 return;
dab62e7c 12150 }
d1db9e16 12151 }
b4fff207 12152 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
230d1f0d
MC
12153 if (pci_enable_device(bp->pdev)) {
12154 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
3958b1da 12155 rc = -ENODEV;
230d1f0d
MC
12156 goto fw_reset_abort;
12157 }
12158 pci_set_master(bp->pdev);
12159 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
df561f66 12160 fallthrough;
230d1f0d
MC
12161 case BNXT_FW_RESET_STATE_POLL_FW:
12162 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
7b370ad7 12163 rc = bnxt_hwrm_poll(bp);
230d1f0d 12164 if (rc) {
339eeb4b 12165 if (bnxt_fw_reset_timeout(bp)) {
230d1f0d 12166 netdev_err(bp->dev, "Firmware reset aborted\n");
fc8864e0 12167 goto fw_reset_abort_status;
230d1f0d
MC
12168 }
12169 bnxt_queue_fw_reset_work(bp, HZ / 5);
12170 return;
12171 }
12172 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
12173 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
df561f66 12174 fallthrough;
230d1f0d
MC
12175 case BNXT_FW_RESET_STATE_OPENING:
12176 while (!rtnl_trylock()) {
12177 bnxt_queue_fw_reset_work(bp, HZ / 10);
12178 return;
12179 }
12180 rc = bnxt_open(bp->dev);
12181 if (rc) {
3958b1da
SK
12182 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
12183 bnxt_fw_reset_abort(bp, rc);
12184 rtnl_unlock();
12185 return;
230d1f0d 12186 }
230d1f0d
MC
12187
12188 bp->fw_reset_state = 0;
12189 /* Make sure fw_reset_state is 0 before clearing the flag */
12190 smp_mb__before_atomic();
12191 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
758684e4
SK
12192 bnxt_ulp_start(bp, 0);
12193 bnxt_reenable_sriov(bp);
ac797ced
SB
12194 bnxt_vf_reps_alloc(bp);
12195 bnxt_vf_reps_open(bp);
9e518f25 12196 bnxt_ptp_reapply_pps(bp);
737d7a6c 12197 bnxt_dl_health_recovery_done(bp);
e4e38237 12198 bnxt_dl_health_status_update(bp, true);
f3a6d206 12199 rtnl_unlock();
230d1f0d
MC
12200 break;
12201 }
12202 return;
12203
fc8864e0
MC
12204fw_reset_abort_status:
12205 if (bp->fw_health->status_reliable ||
12206 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
12207 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12208
12209 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
12210 }
230d1f0d 12211fw_reset_abort:
230d1f0d 12212 rtnl_lock();
3958b1da 12213 bnxt_fw_reset_abort(bp, rc);
230d1f0d
MC
12214 rtnl_unlock();
12215}
12216
c0c050c5
MC
12217static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
12218{
12219 int rc;
12220 struct bnxt *bp = netdev_priv(dev);
12221
12222 SET_NETDEV_DEV(dev, &pdev->dev);
12223
12224 /* enable device (incl. PCI PM wakeup), and bus-mastering */
12225 rc = pci_enable_device(pdev);
12226 if (rc) {
12227 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
12228 goto init_err;
12229 }
12230
12231 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12232 dev_err(&pdev->dev,
12233 "Cannot find PCI device base address, aborting\n");
12234 rc = -ENODEV;
12235 goto init_err_disable;
12236 }
12237
12238 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12239 if (rc) {
12240 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
12241 goto init_err_disable;
12242 }
12243
12244 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
12245 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12246 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
3383176e 12247 rc = -EIO;
c54bc3ce 12248 goto init_err_release;
c0c050c5
MC
12249 }
12250
12251 pci_set_master(pdev);
12252
12253 bp->dev = dev;
12254 bp->pdev = pdev;
12255
8ae24738
MC
12256 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
12257 * determines the BAR size.
12258 */
c0c050c5
MC
12259 bp->bar0 = pci_ioremap_bar(pdev, 0);
12260 if (!bp->bar0) {
12261 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
12262 rc = -ENOMEM;
12263 goto init_err_release;
12264 }
12265
c0c050c5
MC
12266 bp->bar2 = pci_ioremap_bar(pdev, 4);
12267 if (!bp->bar2) {
12268 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
12269 rc = -ENOMEM;
12270 goto init_err_release;
12271 }
12272
6316ea6d
SB
12273 pci_enable_pcie_error_reporting(pdev);
12274
c0c050c5 12275 INIT_WORK(&bp->sp_task, bnxt_sp_task);
230d1f0d 12276 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
c0c050c5
MC
12277
12278 spin_lock_init(&bp->ntp_fltr_lock);
697197e5
MC
12279#if BITS_PER_LONG == 32
12280 spin_lock_init(&bp->db_lock);
12281#endif
c0c050c5
MC
12282
12283 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
12284 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
12285
18775aa8 12286 bnxt_init_dflt_coal(bp);
51f30785 12287
e99e88a9 12288 timer_setup(&bp->timer, bnxt_timer, 0);
c0c050c5
MC
12289 bp->current_interval = BNXT_TIMER_INTERVAL;
12290
442a35a5
JK
12291 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
12292 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
12293
caefe526 12294 clear_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
12295 return 0;
12296
12297init_err_release:
17086399 12298 bnxt_unmap_bars(bp, pdev);
c0c050c5
MC
12299 pci_release_regions(pdev);
12300
12301init_err_disable:
12302 pci_disable_device(pdev);
12303
12304init_err:
12305 return rc;
12306}
12307
12308/* rtnl_lock held */
12309static int bnxt_change_mac_addr(struct net_device *dev, void *p)
12310{
12311 struct sockaddr *addr = p;
1fc2cfd0
JH
12312 struct bnxt *bp = netdev_priv(dev);
12313 int rc = 0;
c0c050c5
MC
12314
12315 if (!is_valid_ether_addr(addr->sa_data))
12316 return -EADDRNOTAVAIL;
12317
c1a7bdff
MC
12318 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
12319 return 0;
12320
28ea334b 12321 rc = bnxt_approve_mac(bp, addr->sa_data, true);
84c33dd3
MC
12322 if (rc)
12323 return rc;
bdd4347b 12324
c0c050c5 12325 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1fc2cfd0
JH
12326 if (netif_running(dev)) {
12327 bnxt_close_nic(bp, false, false);
12328 rc = bnxt_open_nic(bp, false, false);
12329 }
c0c050c5 12330
1fc2cfd0 12331 return rc;
c0c050c5
MC
12332}
12333
12334/* rtnl_lock held */
12335static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
12336{
12337 struct bnxt *bp = netdev_priv(dev);
12338
c0c050c5 12339 if (netif_running(dev))
a9b952d2 12340 bnxt_close_nic(bp, true, false);
c0c050c5
MC
12341
12342 dev->mtu = new_mtu;
12343 bnxt_set_ring_params(bp);
12344
12345 if (netif_running(dev))
a9b952d2 12346 return bnxt_open_nic(bp, true, false);
c0c050c5
MC
12347
12348 return 0;
12349}
12350
c5e3deb8 12351int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
c0c050c5
MC
12352{
12353 struct bnxt *bp = netdev_priv(dev);
3ffb6a39 12354 bool sh = false;
d1e7925e 12355 int rc;
16e5cc64 12356
c0c050c5 12357 if (tc > bp->max_tc) {
b451c8b6 12358 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
c0c050c5
MC
12359 tc, bp->max_tc);
12360 return -EINVAL;
12361 }
12362
12363 if (netdev_get_num_tc(dev) == tc)
12364 return 0;
12365
3ffb6a39
MC
12366 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
12367 sh = true;
12368
98fdbe73
MC
12369 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
12370 sh, tc, bp->tx_nr_rings_xdp);
d1e7925e
MC
12371 if (rc)
12372 return rc;
c0c050c5
MC
12373
12374 /* Needs to close the device and do hw resource re-allocations */
12375 if (netif_running(bp->dev))
12376 bnxt_close_nic(bp, true, false);
12377
12378 if (tc) {
12379 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
12380 netdev_set_num_tc(dev, tc);
12381 } else {
12382 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12383 netdev_reset_tc(dev);
12384 }
87e9b377 12385 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
3ffb6a39
MC
12386 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
12387 bp->tx_nr_rings + bp->rx_nr_rings;
c0c050c5
MC
12388
12389 if (netif_running(bp->dev))
12390 return bnxt_open_nic(bp, true, false);
12391
12392 return 0;
12393}
12394
9e0fd15d
JP
12395static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
12396 void *cb_priv)
c5e3deb8 12397{
9e0fd15d 12398 struct bnxt *bp = cb_priv;
de4784ca 12399
312324f1
JK
12400 if (!bnxt_tc_flower_enabled(bp) ||
12401 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
38cf0426 12402 return -EOPNOTSUPP;
c5e3deb8 12403
9e0fd15d
JP
12404 switch (type) {
12405 case TC_SETUP_CLSFLOWER:
12406 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
12407 default:
12408 return -EOPNOTSUPP;
12409 }
12410}
12411
627c89d0 12412LIST_HEAD(bnxt_block_cb_list);
955bcb6e 12413
2ae7408f
SP
12414static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
12415 void *type_data)
12416{
4e95bc26
PNA
12417 struct bnxt *bp = netdev_priv(dev);
12418
2ae7408f 12419 switch (type) {
9e0fd15d 12420 case TC_SETUP_BLOCK:
955bcb6e
PNA
12421 return flow_block_cb_setup_simple(type_data,
12422 &bnxt_block_cb_list,
4e95bc26
PNA
12423 bnxt_setup_tc_block_cb,
12424 bp, bp, true);
575ed7d3 12425 case TC_SETUP_QDISC_MQPRIO: {
2ae7408f
SP
12426 struct tc_mqprio_qopt *mqprio = type_data;
12427
12428 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
56f36acd 12429
2ae7408f
SP
12430 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
12431 }
12432 default:
12433 return -EOPNOTSUPP;
12434 }
c5e3deb8
MC
12435}
12436
c0c050c5
MC
12437#ifdef CONFIG_RFS_ACCEL
12438static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
12439 struct bnxt_ntuple_filter *f2)
12440{
12441 struct flow_keys *keys1 = &f1->fkeys;
12442 struct flow_keys *keys2 = &f2->fkeys;
12443
6fc7caa8
MC
12444 if (keys1->basic.n_proto != keys2->basic.n_proto ||
12445 keys1->basic.ip_proto != keys2->basic.ip_proto)
12446 return false;
12447
12448 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
12449 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
12450 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
12451 return false;
12452 } else {
12453 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
12454 sizeof(keys1->addrs.v6addrs.src)) ||
12455 memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
12456 sizeof(keys1->addrs.v6addrs.dst)))
12457 return false;
12458 }
12459
12460 if (keys1->ports.ports == keys2->ports.ports &&
61aad724 12461 keys1->control.flags == keys2->control.flags &&
a54c4d74
MC
12462 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
12463 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
c0c050c5
MC
12464 return true;
12465
12466 return false;
12467}
12468
12469static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
12470 u16 rxq_index, u32 flow_id)
12471{
12472 struct bnxt *bp = netdev_priv(dev);
12473 struct bnxt_ntuple_filter *fltr, *new_fltr;
12474 struct flow_keys *fkeys;
12475 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
a54c4d74 12476 int rc = 0, idx, bit_id, l2_idx = 0;
c0c050c5 12477 struct hlist_head *head;
f47d0e19 12478 u32 flags;
c0c050c5 12479
a54c4d74
MC
12480 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
12481 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12482 int off = 0, j;
12483
12484 netif_addr_lock_bh(dev);
12485 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
12486 if (ether_addr_equal(eth->h_dest,
12487 vnic->uc_list + off)) {
12488 l2_idx = j + 1;
12489 break;
12490 }
12491 }
12492 netif_addr_unlock_bh(dev);
12493 if (!l2_idx)
12494 return -EINVAL;
12495 }
c0c050c5
MC
12496 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
12497 if (!new_fltr)
12498 return -ENOMEM;
12499
12500 fkeys = &new_fltr->fkeys;
12501 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
12502 rc = -EPROTONOSUPPORT;
12503 goto err_free;
12504 }
12505
dda0e746
MC
12506 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
12507 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
c0c050c5
MC
12508 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
12509 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
12510 rc = -EPROTONOSUPPORT;
12511 goto err_free;
12512 }
dda0e746
MC
12513 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
12514 bp->hwrm_spec_code < 0x10601) {
12515 rc = -EPROTONOSUPPORT;
12516 goto err_free;
12517 }
f47d0e19
MC
12518 flags = fkeys->control.flags;
12519 if (((flags & FLOW_DIS_ENCAPSULATION) &&
12520 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
61aad724
MC
12521 rc = -EPROTONOSUPPORT;
12522 goto err_free;
12523 }
c0c050c5 12524
a54c4d74 12525 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
c0c050c5
MC
12526 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
12527
12528 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
12529 head = &bp->ntp_fltr_hash_tbl[idx];
12530 rcu_read_lock();
12531 hlist_for_each_entry_rcu(fltr, head, hash) {
12532 if (bnxt_fltr_match(fltr, new_fltr)) {
12533 rcu_read_unlock();
12534 rc = 0;
12535 goto err_free;
12536 }
12537 }
12538 rcu_read_unlock();
12539
12540 spin_lock_bh(&bp->ntp_fltr_lock);
84e86b98
MC
12541 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
12542 BNXT_NTP_FLTR_MAX_FLTR, 0);
12543 if (bit_id < 0) {
c0c050c5
MC
12544 spin_unlock_bh(&bp->ntp_fltr_lock);
12545 rc = -ENOMEM;
12546 goto err_free;
12547 }
12548
84e86b98 12549 new_fltr->sw_id = (u16)bit_id;
c0c050c5 12550 new_fltr->flow_id = flow_id;
a54c4d74 12551 new_fltr->l2_fltr_idx = l2_idx;
c0c050c5
MC
12552 new_fltr->rxq = rxq_index;
12553 hlist_add_head_rcu(&new_fltr->hash, head);
12554 bp->ntp_fltr_count++;
12555 spin_unlock_bh(&bp->ntp_fltr_lock);
12556
12557 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
c213eae8 12558 bnxt_queue_sp_work(bp);
c0c050c5
MC
12559
12560 return new_fltr->sw_id;
12561
12562err_free:
12563 kfree(new_fltr);
12564 return rc;
12565}
12566
12567static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12568{
12569 int i;
12570
12571 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
12572 struct hlist_head *head;
12573 struct hlist_node *tmp;
12574 struct bnxt_ntuple_filter *fltr;
12575 int rc;
12576
12577 head = &bp->ntp_fltr_hash_tbl[i];
12578 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
12579 bool del = false;
12580
12581 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
12582 if (rps_may_expire_flow(bp->dev, fltr->rxq,
12583 fltr->flow_id,
12584 fltr->sw_id)) {
12585 bnxt_hwrm_cfa_ntuple_filter_free(bp,
12586 fltr);
12587 del = true;
12588 }
12589 } else {
12590 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
12591 fltr);
12592 if (rc)
12593 del = true;
12594 else
12595 set_bit(BNXT_FLTR_VALID, &fltr->state);
12596 }
12597
12598 if (del) {
12599 spin_lock_bh(&bp->ntp_fltr_lock);
12600 hlist_del_rcu(&fltr->hash);
12601 bp->ntp_fltr_count--;
12602 spin_unlock_bh(&bp->ntp_fltr_lock);
12603 synchronize_rcu();
12604 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
12605 kfree(fltr);
12606 }
12607 }
12608 }
19241368 12609 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
9a005c38 12610 netdev_info(bp->dev, "Receive PF driver unload event!\n");
c0c050c5
MC
12611}
12612
12613#else
12614
12615static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12616{
12617}
12618
12619#endif /* CONFIG_RFS_ACCEL */
12620
442a35a5 12621static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
c0c050c5 12622{
442a35a5
JK
12623 struct bnxt *bp = netdev_priv(netdev);
12624 struct udp_tunnel_info ti;
12625 unsigned int cmd;
c0c050c5 12626
442a35a5 12627 udp_tunnel_nic_get_port(netdev, table, 0, &ti);
1698d600
MC
12628 if (ti.type == UDP_TUNNEL_TYPE_VXLAN) {
12629 bp->vxlan_port = ti.port;
442a35a5 12630 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
1698d600
MC
12631 } else {
12632 bp->nge_port = ti.port;
442a35a5 12633 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
1698d600 12634 }
7cdd5fc3 12635
442a35a5
JK
12636 if (ti.port)
12637 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
ad51b8e9 12638
442a35a5 12639 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
c0c050c5
MC
12640}
12641
442a35a5
JK
12642static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
12643 .sync_table = bnxt_udp_tunnel_sync,
12644 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
12645 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
12646 .tables = {
12647 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
12648 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
12649 },
12650};
c0c050c5 12651
39d8ba2e
MC
12652static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12653 struct net_device *dev, u32 filter_mask,
12654 int nlflags)
12655{
12656 struct bnxt *bp = netdev_priv(dev);
12657
12658 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
12659 nlflags, filter_mask, NULL);
12660}
12661
12662static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
2fd527b7 12663 u16 flags, struct netlink_ext_ack *extack)
39d8ba2e
MC
12664{
12665 struct bnxt *bp = netdev_priv(dev);
12666 struct nlattr *attr, *br_spec;
12667 int rem, rc = 0;
12668
12669 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
12670 return -EOPNOTSUPP;
12671
12672 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12673 if (!br_spec)
12674 return -EINVAL;
12675
12676 nla_for_each_nested(attr, br_spec, rem) {
12677 u16 mode;
12678
12679 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12680 continue;
12681
12682 if (nla_len(attr) < sizeof(mode))
12683 return -EINVAL;
12684
12685 mode = nla_get_u16(attr);
12686 if (mode == bp->br_mode)
12687 break;
12688
12689 rc = bnxt_hwrm_set_br_mode(bp, mode);
12690 if (!rc)
12691 bp->br_mode = mode;
12692 break;
12693 }
12694 return rc;
12695}
12696
52d5254a
FF
12697int bnxt_get_port_parent_id(struct net_device *dev,
12698 struct netdev_phys_item_id *ppid)
c124a62f 12699{
52d5254a
FF
12700 struct bnxt *bp = netdev_priv(dev);
12701
c124a62f
SP
12702 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
12703 return -EOPNOTSUPP;
12704
12705 /* The PF and it's VF-reps only support the switchdev framework */
d061b241 12706 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
c124a62f
SP
12707 return -EOPNOTSUPP;
12708
b014232f
VV
12709 ppid->id_len = sizeof(bp->dsn);
12710 memcpy(ppid->id, bp->dsn, ppid->id_len);
c124a62f 12711
52d5254a 12712 return 0;
c124a62f
SP
12713}
12714
c9c49a65
JP
12715static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
12716{
12717 struct bnxt *bp = netdev_priv(dev);
12718
12719 return &bp->dl_port;
12720}
12721
c0c050c5
MC
12722static const struct net_device_ops bnxt_netdev_ops = {
12723 .ndo_open = bnxt_open,
12724 .ndo_start_xmit = bnxt_start_xmit,
12725 .ndo_stop = bnxt_close,
12726 .ndo_get_stats64 = bnxt_get_stats64,
12727 .ndo_set_rx_mode = bnxt_set_rx_mode,
a7605370 12728 .ndo_eth_ioctl = bnxt_ioctl,
c0c050c5
MC
12729 .ndo_validate_addr = eth_validate_addr,
12730 .ndo_set_mac_address = bnxt_change_mac_addr,
12731 .ndo_change_mtu = bnxt_change_mtu,
12732 .ndo_fix_features = bnxt_fix_features,
12733 .ndo_set_features = bnxt_set_features,
1698d600 12734 .ndo_features_check = bnxt_features_check,
c0c050c5
MC
12735 .ndo_tx_timeout = bnxt_tx_timeout,
12736#ifdef CONFIG_BNXT_SRIOV
12737 .ndo_get_vf_config = bnxt_get_vf_config,
12738 .ndo_set_vf_mac = bnxt_set_vf_mac,
12739 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
12740 .ndo_set_vf_rate = bnxt_set_vf_bw,
12741 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
12742 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
746df139 12743 .ndo_set_vf_trust = bnxt_set_vf_trust,
c0c050c5
MC
12744#endif
12745 .ndo_setup_tc = bnxt_setup_tc,
12746#ifdef CONFIG_RFS_ACCEL
12747 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
12748#endif
f4e63525 12749 .ndo_bpf = bnxt_xdp,
f18c2b77 12750 .ndo_xdp_xmit = bnxt_xdp_xmit,
39d8ba2e
MC
12751 .ndo_bridge_getlink = bnxt_bridge_getlink,
12752 .ndo_bridge_setlink = bnxt_bridge_setlink,
c9c49a65 12753 .ndo_get_devlink_port = bnxt_get_devlink_port,
c0c050c5
MC
12754};
12755
12756static void bnxt_remove_one(struct pci_dev *pdev)
12757{
12758 struct net_device *dev = pci_get_drvdata(pdev);
12759 struct bnxt *bp = netdev_priv(dev);
12760
7e334fc8 12761 if (BNXT_PF(bp))
c0c050c5
MC
12762 bnxt_sriov_disable(bp);
12763
21d6a11e
VV
12764 if (BNXT_PF(bp))
12765 devlink_port_type_clear(&bp->dl_port);
93cb62d9 12766
a521c8a0 12767 bnxt_ptp_clear(bp);
21d6a11e
VV
12768 pci_disable_pcie_error_reporting(pdev);
12769 unregister_netdev(dev);
b16939b5 12770 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
21d6a11e 12771 /* Flush any pending tasks */
631ce27a
VV
12772 cancel_work_sync(&bp->sp_task);
12773 cancel_delayed_work_sync(&bp->fw_reset_task);
b16939b5
VV
12774 bp->sp_event = 0;
12775
7e334fc8 12776 bnxt_dl_fw_reporters_destroy(bp, true);
cda2cab0 12777 bnxt_dl_unregister(bp);
2ae7408f 12778 bnxt_shutdown_tc(bp);
c0c050c5 12779
7809592d 12780 bnxt_clear_int_mode(bp);
be58a0da 12781 bnxt_hwrm_func_drv_unrgtr(bp);
c0c050c5 12782 bnxt_free_hwrm_resources(bp);
eb513658 12783 bnxt_ethtool_free(bp);
7df4ae9f 12784 bnxt_dcb_free(bp);
a588e458
MC
12785 kfree(bp->edev);
12786 bp->edev = NULL;
ae5c42f0
MC
12787 kfree(bp->ptp_cfg);
12788 bp->ptp_cfg = NULL;
8280b38e
VV
12789 kfree(bp->fw_health);
12790 bp->fw_health = NULL;
c20dc142 12791 bnxt_cleanup_pci(bp);
98f04cf0
MC
12792 bnxt_free_ctx_mem(bp);
12793 kfree(bp->ctx);
12794 bp->ctx = NULL;
1667cbf6
MC
12795 kfree(bp->rss_indir_tbl);
12796 bp->rss_indir_tbl = NULL;
fd3ab1c7 12797 bnxt_free_port_stats(bp);
c0c050c5 12798 free_netdev(dev);
c0c050c5
MC
12799}
12800
ba642ab7 12801static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
c0c050c5
MC
12802{
12803 int rc = 0;
12804 struct bnxt_link_info *link_info = &bp->link_info;
c0c050c5 12805
b0d28207 12806 bp->phy_flags = 0;
170ce013
MC
12807 rc = bnxt_hwrm_phy_qcaps(bp);
12808 if (rc) {
12809 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
12810 rc);
12811 return rc;
12812 }
dade5e15
MC
12813 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
12814 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
12815 else
12816 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
43a5107d
MC
12817 if (!fw_dflt)
12818 return 0;
12819
3c10ed49 12820 mutex_lock(&bp->link_lock);
c0c050c5
MC
12821 rc = bnxt_update_link(bp, false);
12822 if (rc) {
3c10ed49 12823 mutex_unlock(&bp->link_lock);
c0c050c5
MC
12824 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
12825 rc);
12826 return rc;
12827 }
12828
93ed8117
MC
12829 /* Older firmware does not have supported_auto_speeds, so assume
12830 * that all supported speeds can be autonegotiated.
12831 */
12832 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
12833 link_info->support_auto_speeds = link_info->support_speeds;
12834
8119e49b 12835 bnxt_init_ethtool_link_settings(bp);
3c10ed49 12836 mutex_unlock(&bp->link_lock);
ba642ab7 12837 return 0;
c0c050c5
MC
12838}
12839
12840static int bnxt_get_max_irq(struct pci_dev *pdev)
12841{
12842 u16 ctrl;
12843
12844 if (!pdev->msix_cap)
12845 return 1;
12846
12847 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
12848 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
12849}
12850
6e6c5a57
MC
12851static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12852 int *max_cp)
c0c050c5 12853{
6a4f2947 12854 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
e30fbc33 12855 int max_ring_grps = 0, max_irq;
c0c050c5 12856
6a4f2947
MC
12857 *max_tx = hw_resc->max_tx_rings;
12858 *max_rx = hw_resc->max_rx_rings;
e30fbc33
MC
12859 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
12860 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
12861 bnxt_get_ulp_msix_num(bp),
c027c6b4 12862 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
e30fbc33
MC
12863 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
12864 *max_cp = min_t(int, *max_cp, max_irq);
6a4f2947 12865 max_ring_grps = hw_resc->max_hw_ring_grps;
76595193
PS
12866 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
12867 *max_cp -= 1;
12868 *max_rx -= 2;
12869 }
c0c050c5
MC
12870 if (bp->flags & BNXT_FLAG_AGG_RINGS)
12871 *max_rx >>= 1;
e30fbc33
MC
12872 if (bp->flags & BNXT_FLAG_CHIP_P5) {
12873 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
12874 /* On P5 chips, max_cp output param should be available NQs */
12875 *max_cp = max_irq;
12876 }
b72d4a68 12877 *max_rx = min_t(int, *max_rx, max_ring_grps);
6e6c5a57
MC
12878}
12879
12880int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
12881{
12882 int rx, tx, cp;
12883
12884 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
78f058a4
MC
12885 *max_rx = rx;
12886 *max_tx = tx;
6e6c5a57
MC
12887 if (!rx || !tx || !cp)
12888 return -ENOMEM;
12889
6e6c5a57
MC
12890 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
12891}
12892
e4060d30
MC
12893static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12894 bool shared)
12895{
12896 int rc;
12897
12898 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
bdbd1eb5
MC
12899 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
12900 /* Not enough rings, try disabling agg rings. */
12901 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
12902 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
07f4fde5
MC
12903 if (rc) {
12904 /* set BNXT_FLAG_AGG_RINGS back for consistency */
12905 bp->flags |= BNXT_FLAG_AGG_RINGS;
bdbd1eb5 12906 return rc;
07f4fde5 12907 }
bdbd1eb5 12908 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
1054aee8
MC
12909 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12910 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
bdbd1eb5
MC
12911 bnxt_set_ring_params(bp);
12912 }
e4060d30
MC
12913
12914 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
12915 int max_cp, max_stat, max_irq;
12916
12917 /* Reserve minimum resources for RoCE */
12918 max_cp = bnxt_get_max_func_cp_rings(bp);
12919 max_stat = bnxt_get_max_func_stat_ctxs(bp);
12920 max_irq = bnxt_get_max_func_irqs(bp);
12921 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
12922 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
12923 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
12924 return 0;
12925
12926 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
12927 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
12928 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
12929 max_cp = min_t(int, max_cp, max_irq);
12930 max_cp = min_t(int, max_cp, max_stat);
12931 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
12932 if (rc)
12933 rc = 0;
12934 }
12935 return rc;
12936}
12937
58ea801a
MC
12938/* In initial default shared ring setting, each shared ring must have a
12939 * RX/TX ring pair.
12940 */
12941static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
12942{
12943 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
12944 bp->rx_nr_rings = bp->cp_nr_rings;
12945 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
12946 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12947}
12948
702c221c 12949static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
6e6c5a57
MC
12950{
12951 int dflt_rings, max_rx_rings, max_tx_rings, rc;
6e6c5a57 12952
2773dfb2
MC
12953 if (!bnxt_can_reserve_rings(bp))
12954 return 0;
12955
6e6c5a57
MC
12956 if (sh)
12957 bp->flags |= BNXT_FLAG_SHARED_RINGS;
d629522e 12958 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
1d3ef13d
MC
12959 /* Reduce default rings on multi-port cards so that total default
12960 * rings do not exceed CPU count.
12961 */
12962 if (bp->port_count > 1) {
12963 int max_rings =
12964 max_t(int, num_online_cpus() / bp->port_count, 1);
12965
12966 dflt_rings = min_t(int, dflt_rings, max_rings);
12967 }
e4060d30 12968 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
6e6c5a57
MC
12969 if (rc)
12970 return rc;
12971 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
12972 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
58ea801a
MC
12973 if (sh)
12974 bnxt_trim_dflt_sh_rings(bp);
12975 else
12976 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
12977 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
391be5c2 12978
674f50a5 12979 rc = __bnxt_reserve_rings(bp);
391be5c2
MC
12980 if (rc)
12981 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
58ea801a
MC
12982 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12983 if (sh)
12984 bnxt_trim_dflt_sh_rings(bp);
391be5c2 12985
674f50a5
MC
12986 /* Rings may have been trimmed, re-reserve the trimmed rings. */
12987 if (bnxt_need_reserve_rings(bp)) {
12988 rc = __bnxt_reserve_rings(bp);
12989 if (rc)
12990 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
12991 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12992 }
76595193
PS
12993 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
12994 bp->rx_nr_rings++;
12995 bp->cp_nr_rings++;
12996 }
5d765a5e
VV
12997 if (rc) {
12998 bp->tx_nr_rings = 0;
12999 bp->rx_nr_rings = 0;
13000 }
6e6c5a57 13001 return rc;
c0c050c5
MC
13002}
13003
47558acd
MC
13004static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
13005{
13006 int rc;
13007
13008 if (bp->tx_nr_rings)
13009 return 0;
13010
6b95c3e9
MC
13011 bnxt_ulp_irq_stop(bp);
13012 bnxt_clear_int_mode(bp);
47558acd
MC
13013 rc = bnxt_set_dflt_rings(bp, true);
13014 if (rc) {
13015 netdev_err(bp->dev, "Not enough rings available.\n");
6b95c3e9 13016 goto init_dflt_ring_err;
47558acd
MC
13017 }
13018 rc = bnxt_init_int_mode(bp);
13019 if (rc)
6b95c3e9
MC
13020 goto init_dflt_ring_err;
13021
47558acd
MC
13022 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13023 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
13024 bp->flags |= BNXT_FLAG_RFS;
13025 bp->dev->features |= NETIF_F_NTUPLE;
13026 }
6b95c3e9
MC
13027init_dflt_ring_err:
13028 bnxt_ulp_irq_restart(bp, rc);
13029 return rc;
47558acd
MC
13030}
13031
80fcaf46 13032int bnxt_restore_pf_fw_resources(struct bnxt *bp)
7b08f661 13033{
80fcaf46
MC
13034 int rc;
13035
7b08f661
MC
13036 ASSERT_RTNL();
13037 bnxt_hwrm_func_qcaps(bp);
1a037782
VD
13038
13039 if (netif_running(bp->dev))
13040 __bnxt_close_nic(bp, true, false);
13041
ec86f14e 13042 bnxt_ulp_irq_stop(bp);
80fcaf46
MC
13043 bnxt_clear_int_mode(bp);
13044 rc = bnxt_init_int_mode(bp);
ec86f14e 13045 bnxt_ulp_irq_restart(bp, rc);
1a037782
VD
13046
13047 if (netif_running(bp->dev)) {
13048 if (rc)
13049 dev_close(bp->dev);
13050 else
13051 rc = bnxt_open_nic(bp, true, false);
13052 }
13053
80fcaf46 13054 return rc;
7b08f661
MC
13055}
13056
a22a6ac2
MC
13057static int bnxt_init_mac_addr(struct bnxt *bp)
13058{
13059 int rc = 0;
13060
13061 if (BNXT_PF(bp)) {
13062 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
13063 } else {
13064#ifdef CONFIG_BNXT_SRIOV
13065 struct bnxt_vf_info *vf = &bp->vf;
28ea334b 13066 bool strict_approval = true;
a22a6ac2
MC
13067
13068 if (is_valid_ether_addr(vf->mac_addr)) {
91cdda40 13069 /* overwrite netdev dev_addr with admin VF MAC */
a22a6ac2 13070 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
28ea334b
MC
13071 /* Older PF driver or firmware may not approve this
13072 * correctly.
13073 */
13074 strict_approval = false;
a22a6ac2
MC
13075 } else {
13076 eth_hw_addr_random(bp->dev);
a22a6ac2 13077 }
28ea334b 13078 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
a22a6ac2
MC
13079#endif
13080 }
13081 return rc;
13082}
13083
197c316c 13084#define BNXT_VPD_LEN 512
a0d0fd70
VV
13085static void bnxt_vpd_read_info(struct bnxt *bp)
13086{
13087 struct pci_dev *pdev = bp->pdev;
54c0bcc0 13088 int i, len, pos, ro_size, size;
197c316c 13089 ssize_t vpd_size;
a0d0fd70
VV
13090 u8 *vpd_data;
13091
197c316c
DM
13092 vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL);
13093 if (!vpd_data)
a0d0fd70 13094 return;
197c316c
DM
13095
13096 vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data);
13097 if (vpd_size <= 0) {
13098 netdev_err(bp->dev, "Unable to read VPD\n");
13099 goto exit;
a0d0fd70
VV
13100 }
13101
54c0bcc0
DM
13102 i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
13103 if (i < 0) {
13104 netdev_err(bp->dev, "VPD READ-Only not found\n");
13105 goto exit;
13106 }
13107
4fd13157
DM
13108 i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
13109 if (i < 0) {
13110 netdev_err(bp->dev, "VPD READ-Only not found\n");
13111 goto exit;
13112 }
13113
54c0bcc0
DM
13114 ro_size = pci_vpd_lrdt_size(&vpd_data[i]);
13115 i += PCI_VPD_LRDT_TAG_SIZE;
13116 if (i + ro_size > vpd_size)
13117 goto exit;
13118
13119 pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
13120 PCI_VPD_RO_KEYWORD_PARTNO);
a0d0fd70
VV
13121 if (pos < 0)
13122 goto read_sn;
13123
54c0bcc0
DM
13124 len = pci_vpd_info_field_size(&vpd_data[pos]);
13125 pos += PCI_VPD_INFO_FLD_HDR_SIZE;
13126 if (len + pos > vpd_size)
13127 goto read_sn;
13128
13129 size = min(len, BNXT_VPD_FLD_LEN - 1);
492adcf4 13130 memcpy(bp->board_partno, &vpd_data[pos], size);
a0d0fd70
VV
13131
13132read_sn:
54c0bcc0
DM
13133 pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
13134 PCI_VPD_RO_KEYWORD_SERIALNO);
a0d0fd70
VV
13135 if (pos < 0)
13136 goto exit;
13137
54c0bcc0
DM
13138 len = pci_vpd_info_field_size(&vpd_data[pos]);
13139 pos += PCI_VPD_INFO_FLD_HDR_SIZE;
13140 if (len + pos > vpd_size)
13141 goto exit;
13142
13143 size = min(len, BNXT_VPD_FLD_LEN - 1);
492adcf4 13144 memcpy(bp->board_serialno, &vpd_data[pos], size);
a0d0fd70
VV
13145exit:
13146 kfree(vpd_data);
13147}
13148
03213a99
JP
13149static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
13150{
13151 struct pci_dev *pdev = bp->pdev;
8d85b75b 13152 u64 qword;
03213a99 13153
8d85b75b
JK
13154 qword = pci_get_dsn(pdev);
13155 if (!qword) {
13156 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
03213a99
JP
13157 return -EOPNOTSUPP;
13158 }
13159
8d85b75b
JK
13160 put_unaligned_le64(qword, dsn);
13161
d061b241 13162 bp->flags |= BNXT_FLAG_DSN_VALID;
03213a99
JP
13163 return 0;
13164}
13165
8ae24738
MC
13166static int bnxt_map_db_bar(struct bnxt *bp)
13167{
13168 if (!bp->db_size)
13169 return -ENODEV;
13170 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
13171 if (!bp->bar1)
13172 return -ENOMEM;
13173 return 0;
13174}
13175
c0c050c5
MC
13176static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
13177{
c0c050c5
MC
13178 struct net_device *dev;
13179 struct bnxt *bp;
6e6c5a57 13180 int rc, max_irqs;
c0c050c5 13181
4e00338a 13182 if (pci_is_bridge(pdev))
fa853dda
PS
13183 return -ENODEV;
13184
8743db4a
VV
13185 /* Clear any pending DMA transactions from crash kernel
13186 * while loading driver in capture kernel.
13187 */
13188 if (is_kdump_kernel()) {
13189 pci_clear_master(pdev);
13190 pcie_flr(pdev);
13191 }
13192
c0c050c5
MC
13193 max_irqs = bnxt_get_max_irq(pdev);
13194 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
13195 if (!dev)
13196 return -ENOMEM;
13197
13198 bp = netdev_priv(dev);
8fb35cd3 13199 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
9c1fabdf 13200 bnxt_set_max_func_irqs(bp, max_irqs);
c0c050c5
MC
13201
13202 if (bnxt_vf_pciid(ent->driver_data))
13203 bp->flags |= BNXT_FLAG_VF;
13204
2bcfa6f6 13205 if (pdev->msix_cap)
c0c050c5 13206 bp->flags |= BNXT_FLAG_MSIX_CAP;
c0c050c5
MC
13207
13208 rc = bnxt_init_board(pdev, dev);
13209 if (rc < 0)
13210 goto init_err_free;
13211
13212 dev->netdev_ops = &bnxt_netdev_ops;
13213 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
13214 dev->ethtool_ops = &bnxt_ethtool_ops;
c0c050c5
MC
13215 pci_set_drvdata(pdev, dev);
13216
3e8060fa
PS
13217 rc = bnxt_alloc_hwrm_resources(bp);
13218 if (rc)
17086399 13219 goto init_err_pci_clean;
3e8060fa
PS
13220
13221 mutex_init(&bp->hwrm_cmd_lock);
ba642ab7 13222 mutex_init(&bp->link_lock);
7c380918
MC
13223
13224 rc = bnxt_fw_init_one_p1(bp);
3e8060fa 13225 if (rc)
17086399 13226 goto init_err_pci_clean;
3e8060fa 13227
3e3c09b0
VV
13228 if (BNXT_PF(bp))
13229 bnxt_vpd_read_info(bp);
13230
9d6b648c 13231 if (BNXT_CHIP_P5(bp)) {
e38287b7 13232 bp->flags |= BNXT_FLAG_CHIP_P5;
9d6b648c
MC
13233 if (BNXT_CHIP_SR2(bp))
13234 bp->flags |= BNXT_FLAG_CHIP_SR2;
13235 }
e38287b7 13236
5fa65524
EP
13237 rc = bnxt_alloc_rss_indir_tbl(bp);
13238 if (rc)
13239 goto init_err_pci_clean;
13240
7c380918 13241 rc = bnxt_fw_init_one_p2(bp);
3c2217a6
MC
13242 if (rc)
13243 goto init_err_pci_clean;
13244
8ae24738
MC
13245 rc = bnxt_map_db_bar(bp);
13246 if (rc) {
13247 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
13248 rc);
13249 goto init_err_pci_clean;
13250 }
13251
c0c050c5
MC
13252 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13253 NETIF_F_TSO | NETIF_F_TSO6 |
13254 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
7e13318d 13255 NETIF_F_GSO_IPXIP4 |
152971ee
AD
13256 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13257 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
3e8060fa
PS
13258 NETIF_F_RXCSUM | NETIF_F_GRO;
13259
e38287b7 13260 if (BNXT_SUPPORTS_TPA(bp))
3e8060fa 13261 dev->hw_features |= NETIF_F_LRO;
c0c050c5 13262
c0c050c5
MC
13263 dev->hw_enc_features =
13264 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13265 NETIF_F_TSO | NETIF_F_TSO6 |
13266 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
152971ee 13267 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
7e13318d 13268 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
442a35a5
JK
13269 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
13270
152971ee
AD
13271 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
13272 NETIF_F_GSO_GRE_CSUM;
c0c050c5 13273 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
1da63ddd
EP
13274 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
13275 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13276 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
13277 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
e38287b7 13278 if (BNXT_SUPPORTS_TPA(bp))
1054aee8 13279 dev->hw_features |= NETIF_F_GRO_HW;
c0c050c5 13280 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
1054aee8
MC
13281 if (dev->features & NETIF_F_GRO_HW)
13282 dev->features &= ~NETIF_F_LRO;
c0c050c5
MC
13283 dev->priv_flags |= IFF_UNICAST_FLT;
13284
13285#ifdef CONFIG_BNXT_SRIOV
13286 init_waitqueue_head(&bp->sriov_cfg_wait);
4ab0c6a8 13287 mutex_init(&bp->sriov_lock);
c0c050c5 13288#endif
e38287b7
MC
13289 if (BNXT_SUPPORTS_TPA(bp)) {
13290 bp->gro_func = bnxt_gro_func_5730x;
67912c36 13291 if (BNXT_CHIP_P4(bp))
e38287b7 13292 bp->gro_func = bnxt_gro_func_5731x;
67912c36
MC
13293 else if (BNXT_CHIP_P5(bp))
13294 bp->gro_func = bnxt_gro_func_5750x;
e38287b7
MC
13295 }
13296 if (!BNXT_CHIP_P4_PLUS(bp))
434c975a 13297 bp->flags |= BNXT_FLAG_DOUBLE_DB;
309369c9 13298
a22a6ac2
MC
13299 rc = bnxt_init_mac_addr(bp);
13300 if (rc) {
13301 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
13302 rc = -EADDRNOTAVAIL;
13303 goto init_err_pci_clean;
13304 }
c0c050c5 13305
2e9217d1
VV
13306 if (BNXT_PF(bp)) {
13307 /* Read the adapter's DSN to use as the eswitch switch_id */
b014232f 13308 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
2e9217d1 13309 }
567b2abe 13310
7eb9bb3a
MC
13311 /* MTU range: 60 - FW defined max */
13312 dev->min_mtu = ETH_ZLEN;
13313 dev->max_mtu = bp->max_mtu;
13314
ba642ab7 13315 rc = bnxt_probe_phy(bp, true);
d5430d31
MC
13316 if (rc)
13317 goto init_err_pci_clean;
13318
c61fb99c 13319 bnxt_set_rx_skb_mode(bp, false);
c0c050c5
MC
13320 bnxt_set_tpa_flags(bp);
13321 bnxt_set_ring_params(bp);
702c221c 13322 rc = bnxt_set_dflt_rings(bp, true);
bdbd1eb5
MC
13323 if (rc) {
13324 netdev_err(bp->dev, "Not enough rings available.\n");
13325 rc = -ENOMEM;
17086399 13326 goto init_err_pci_clean;
bdbd1eb5 13327 }
c0c050c5 13328
ba642ab7 13329 bnxt_fw_init_one_p3(bp);
2bcfa6f6 13330
a196e96b 13331 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
c0c050c5
MC
13332 bp->flags |= BNXT_FLAG_STRIP_VLAN;
13333
7809592d 13334 rc = bnxt_init_int_mode(bp);
c0c050c5 13335 if (rc)
17086399 13336 goto init_err_pci_clean;
c0c050c5 13337
832aed16
MC
13338 /* No TC has been set yet and rings may have been trimmed due to
13339 * limited MSIX, so we re-initialize the TX rings per TC.
13340 */
13341 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13342
c213eae8
MC
13343 if (BNXT_PF(bp)) {
13344 if (!bnxt_pf_wq) {
13345 bnxt_pf_wq =
13346 create_singlethread_workqueue("bnxt_pf_wq");
13347 if (!bnxt_pf_wq) {
13348 dev_err(&pdev->dev, "Unable to create workqueue.\n");
b5f796b6 13349 rc = -ENOMEM;
c213eae8
MC
13350 goto init_err_pci_clean;
13351 }
13352 }
18c7015c
JK
13353 rc = bnxt_init_tc(bp);
13354 if (rc)
13355 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
13356 rc);
c213eae8 13357 }
2ae7408f 13358
190eda1a 13359 bnxt_inv_fw_health_reg(bp);
cda2cab0
VV
13360 bnxt_dl_register(bp);
13361
7809592d
MC
13362 rc = register_netdev(dev);
13363 if (rc)
cda2cab0 13364 goto init_err_cleanup;
7809592d 13365
cda2cab0
VV
13366 if (BNXT_PF(bp))
13367 devlink_port_type_eth_set(&bp->dl_port, bp->dev);
7e334fc8 13368 bnxt_dl_fw_reporters_create(bp);
4ab0c6a8 13369
c0c050c5
MC
13370 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
13371 board_info[ent->driver_data].name,
13372 (long)pci_resource_start(pdev, 0), dev->dev_addr);
af125b75 13373 pcie_print_link_status(pdev);
90c4f788 13374
df3875ec 13375 pci_save_state(pdev);
c0c050c5
MC
13376 return 0;
13377
cda2cab0
VV
13378init_err_cleanup:
13379 bnxt_dl_unregister(bp);
2ae7408f 13380 bnxt_shutdown_tc(bp);
7809592d
MC
13381 bnxt_clear_int_mode(bp);
13382
17086399 13383init_err_pci_clean:
bdb38602 13384 bnxt_hwrm_func_drv_unrgtr(bp);
a2bf74f4 13385 bnxt_free_hwrm_resources(bp);
03400aaa 13386 bnxt_ethtool_free(bp);
a521c8a0 13387 bnxt_ptp_clear(bp);
ae5c42f0
MC
13388 kfree(bp->ptp_cfg);
13389 bp->ptp_cfg = NULL;
07f83d72
MC
13390 kfree(bp->fw_health);
13391 bp->fw_health = NULL;
17086399 13392 bnxt_cleanup_pci(bp);
62bfb932
MC
13393 bnxt_free_ctx_mem(bp);
13394 kfree(bp->ctx);
13395 bp->ctx = NULL;
1667cbf6
MC
13396 kfree(bp->rss_indir_tbl);
13397 bp->rss_indir_tbl = NULL;
c0c050c5
MC
13398
13399init_err_free:
13400 free_netdev(dev);
13401 return rc;
13402}
13403
d196ece7
MC
13404static void bnxt_shutdown(struct pci_dev *pdev)
13405{
13406 struct net_device *dev = pci_get_drvdata(pdev);
13407 struct bnxt *bp;
13408
13409 if (!dev)
13410 return;
13411
13412 rtnl_lock();
13413 bp = netdev_priv(dev);
13414 if (!bp)
13415 goto shutdown_exit;
13416
13417 if (netif_running(dev))
13418 dev_close(dev);
13419
a7f3f939 13420 bnxt_ulp_shutdown(bp);
5567ae4a
VV
13421 bnxt_clear_int_mode(bp);
13422 pci_disable_device(pdev);
a7f3f939 13423
d196ece7 13424 if (system_state == SYSTEM_POWER_OFF) {
d196ece7
MC
13425 pci_wake_from_d3(pdev, bp->wol);
13426 pci_set_power_state(pdev, PCI_D3hot);
13427 }
13428
13429shutdown_exit:
13430 rtnl_unlock();
13431}
13432
f65a2044
MC
13433#ifdef CONFIG_PM_SLEEP
13434static int bnxt_suspend(struct device *device)
13435{
f521eaa9 13436 struct net_device *dev = dev_get_drvdata(device);
f65a2044
MC
13437 struct bnxt *bp = netdev_priv(dev);
13438 int rc = 0;
13439
13440 rtnl_lock();
6a68749d 13441 bnxt_ulp_stop(bp);
f65a2044
MC
13442 if (netif_running(dev)) {
13443 netif_device_detach(dev);
13444 rc = bnxt_close(dev);
13445 }
13446 bnxt_hwrm_func_drv_unrgtr(bp);
ef02af8c 13447 pci_disable_device(bp->pdev);
f9b69d7f
VV
13448 bnxt_free_ctx_mem(bp);
13449 kfree(bp->ctx);
13450 bp->ctx = NULL;
f65a2044
MC
13451 rtnl_unlock();
13452 return rc;
13453}
13454
13455static int bnxt_resume(struct device *device)
13456{
f521eaa9 13457 struct net_device *dev = dev_get_drvdata(device);
f65a2044
MC
13458 struct bnxt *bp = netdev_priv(dev);
13459 int rc = 0;
13460
13461 rtnl_lock();
ef02af8c
MC
13462 rc = pci_enable_device(bp->pdev);
13463 if (rc) {
13464 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
13465 rc);
13466 goto resume_exit;
13467 }
13468 pci_set_master(bp->pdev);
f92335d8 13469 if (bnxt_hwrm_ver_get(bp)) {
f65a2044
MC
13470 rc = -ENODEV;
13471 goto resume_exit;
13472 }
13473 rc = bnxt_hwrm_func_reset(bp);
13474 if (rc) {
13475 rc = -EBUSY;
13476 goto resume_exit;
13477 }
f92335d8 13478
2084ccf6
MC
13479 rc = bnxt_hwrm_func_qcaps(bp);
13480 if (rc)
f9b69d7f 13481 goto resume_exit;
f92335d8
VV
13482
13483 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
13484 rc = -ENODEV;
13485 goto resume_exit;
13486 }
13487
f65a2044
MC
13488 bnxt_get_wol_settings(bp);
13489 if (netif_running(dev)) {
13490 rc = bnxt_open(dev);
13491 if (!rc)
13492 netif_device_attach(dev);
13493 }
13494
13495resume_exit:
6a68749d 13496 bnxt_ulp_start(bp, rc);
59ae2101
MC
13497 if (!rc)
13498 bnxt_reenable_sriov(bp);
f65a2044
MC
13499 rtnl_unlock();
13500 return rc;
13501}
13502
13503static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
13504#define BNXT_PM_OPS (&bnxt_pm_ops)
13505
13506#else
13507
13508#define BNXT_PM_OPS NULL
13509
13510#endif /* CONFIG_PM_SLEEP */
13511
6316ea6d
SB
13512/**
13513 * bnxt_io_error_detected - called when PCI error is detected
13514 * @pdev: Pointer to PCI device
13515 * @state: The current pci connection state
13516 *
13517 * This function is called after a PCI bus error affecting
13518 * this device has been detected.
13519 */
13520static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
13521 pci_channel_state_t state)
13522{
13523 struct net_device *netdev = pci_get_drvdata(pdev);
a588e458 13524 struct bnxt *bp = netdev_priv(netdev);
6316ea6d
SB
13525
13526 netdev_info(netdev, "PCI I/O error detected\n");
13527
13528 rtnl_lock();
13529 netif_device_detach(netdev);
13530
a588e458
MC
13531 bnxt_ulp_stop(bp);
13532
6316ea6d
SB
13533 if (state == pci_channel_io_perm_failure) {
13534 rtnl_unlock();
13535 return PCI_ERS_RESULT_DISCONNECT;
13536 }
13537
f75d9a0a
VV
13538 if (state == pci_channel_io_frozen)
13539 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
13540
6316ea6d
SB
13541 if (netif_running(netdev))
13542 bnxt_close(netdev);
13543
c81cfb62
KA
13544 if (pci_is_enabled(pdev))
13545 pci_disable_device(pdev);
6e2f8388
MC
13546 bnxt_free_ctx_mem(bp);
13547 kfree(bp->ctx);
13548 bp->ctx = NULL;
6316ea6d
SB
13549 rtnl_unlock();
13550
13551 /* Request a slot slot reset. */
13552 return PCI_ERS_RESULT_NEED_RESET;
13553}
13554
13555/**
13556 * bnxt_io_slot_reset - called after the pci bus has been reset.
13557 * @pdev: Pointer to PCI device
13558 *
13559 * Restart the card from scratch, as if from a cold-boot.
13560 * At this point, the card has exprienced a hard reset,
13561 * followed by fixups by BIOS, and has its config space
13562 * set up identically to what it was at cold boot.
13563 */
13564static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
13565{
fb1e6e56 13566 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
6316ea6d
SB
13567 struct net_device *netdev = pci_get_drvdata(pdev);
13568 struct bnxt *bp = netdev_priv(netdev);
f75d9a0a 13569 int err = 0, off;
6316ea6d
SB
13570
13571 netdev_info(bp->dev, "PCI Slot Reset\n");
13572
13573 rtnl_lock();
13574
13575 if (pci_enable_device(pdev)) {
13576 dev_err(&pdev->dev,
13577 "Cannot re-enable PCI device after reset.\n");
13578 } else {
13579 pci_set_master(pdev);
f75d9a0a
VV
13580 /* Upon fatal error, our device internal logic that latches to
13581 * BAR value is getting reset and will restore only upon
13582 * rewritting the BARs.
13583 *
13584 * As pci_restore_state() does not re-write the BARs if the
13585 * value is same as saved value earlier, driver needs to
13586 * write the BARs to 0 to force restore, in case of fatal error.
13587 */
13588 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
13589 &bp->state)) {
13590 for (off = PCI_BASE_ADDRESS_0;
13591 off <= PCI_BASE_ADDRESS_5; off += 4)
13592 pci_write_config_dword(bp->pdev, off, 0);
13593 }
df3875ec
VV
13594 pci_restore_state(pdev);
13595 pci_save_state(pdev);
6316ea6d 13596
aa8ed021 13597 err = bnxt_hwrm_func_reset(bp);
fb1e6e56 13598 if (!err)
6e2f8388 13599 result = PCI_ERS_RESULT_RECOVERED;
bae361c5 13600 }
6316ea6d
SB
13601
13602 rtnl_unlock();
13603
bae361c5 13604 return result;
6316ea6d
SB
13605}
13606
13607/**
13608 * bnxt_io_resume - called when traffic can start flowing again.
13609 * @pdev: Pointer to PCI device
13610 *
13611 * This callback is called when the error recovery driver tells
13612 * us that its OK to resume normal operation.
13613 */
13614static void bnxt_io_resume(struct pci_dev *pdev)
13615{
13616 struct net_device *netdev = pci_get_drvdata(pdev);
fb1e6e56
VV
13617 struct bnxt *bp = netdev_priv(netdev);
13618 int err;
6316ea6d 13619
fb1e6e56 13620 netdev_info(bp->dev, "PCI Slot Resume\n");
6316ea6d
SB
13621 rtnl_lock();
13622
fb1e6e56
VV
13623 err = bnxt_hwrm_func_qcaps(bp);
13624 if (!err && netif_running(netdev))
13625 err = bnxt_open(netdev);
13626
13627 bnxt_ulp_start(bp, err);
13628 if (!err) {
13629 bnxt_reenable_sriov(bp);
13630 netif_device_attach(netdev);
13631 }
6316ea6d
SB
13632
13633 rtnl_unlock();
13634}
13635
13636static const struct pci_error_handlers bnxt_err_handler = {
13637 .error_detected = bnxt_io_error_detected,
13638 .slot_reset = bnxt_io_slot_reset,
13639 .resume = bnxt_io_resume
13640};
13641
c0c050c5
MC
13642static struct pci_driver bnxt_pci_driver = {
13643 .name = DRV_MODULE_NAME,
13644 .id_table = bnxt_pci_tbl,
13645 .probe = bnxt_init_one,
13646 .remove = bnxt_remove_one,
d196ece7 13647 .shutdown = bnxt_shutdown,
f65a2044 13648 .driver.pm = BNXT_PM_OPS,
6316ea6d 13649 .err_handler = &bnxt_err_handler,
c0c050c5
MC
13650#if defined(CONFIG_BNXT_SRIOV)
13651 .sriov_configure = bnxt_sriov_configure,
13652#endif
13653};
13654
c213eae8
MC
13655static int __init bnxt_init(void)
13656{
cabfb09d 13657 bnxt_debug_init();
c213eae8
MC
13658 return pci_register_driver(&bnxt_pci_driver);
13659}
13660
13661static void __exit bnxt_exit(void)
13662{
13663 pci_unregister_driver(&bnxt_pci_driver);
13664 if (bnxt_pf_wq)
13665 destroy_workqueue(bnxt_pf_wq);
cabfb09d 13666 bnxt_debug_exit();
c213eae8
MC
13667}
13668
13669module_init(bnxt_init);
13670module_exit(bnxt_exit);