]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/net/ethernet/broadcom/bnxt/bnxt.c
bnxt_en: Fix Priority Bytes and Packets counters in ethtool -S.
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
CommitLineData
c0c050c5
MC
1/* Broadcom NetXtreme-C/E network driver.
2 *
11f15ed3 3 * Copyright (c) 2014-2016 Broadcom Corporation
c6cc32a2 4 * Copyright (c) 2016-2019 Broadcom Limited
c0c050c5
MC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12
13#include <linux/stringify.h>
14#include <linux/kernel.h>
15#include <linux/timer.h>
16#include <linux/errno.h>
17#include <linux/ioport.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/interrupt.h>
21#include <linux/pci.h>
22#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/skbuff.h>
25#include <linux/dma-mapping.h>
26#include <linux/bitops.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29#include <linux/delay.h>
30#include <asm/byteorder.h>
31#include <asm/page.h>
32#include <linux/time.h>
33#include <linux/mii.h>
0ca12be9 34#include <linux/mdio.h>
c0c050c5
MC
35#include <linux/if.h>
36#include <linux/if_vlan.h>
32e8239c 37#include <linux/if_bridge.h>
5ac67d8b 38#include <linux/rtc.h>
c6d30e83 39#include <linux/bpf.h>
c0c050c5
MC
40#include <net/ip.h>
41#include <net/tcp.h>
42#include <net/udp.h>
43#include <net/checksum.h>
44#include <net/ip6_checksum.h>
ad51b8e9 45#include <net/udp_tunnel.h>
c0c050c5
MC
46#include <linux/workqueue.h>
47#include <linux/prefetch.h>
48#include <linux/cache.h>
49#include <linux/log2.h>
50#include <linux/aer.h>
51#include <linux/bitmap.h>
52#include <linux/cpu_rmap.h>
56f0fd80 53#include <linux/cpumask.h>
2ae7408f 54#include <net/pkt_cls.h>
cde49a42
VV
55#include <linux/hwmon.h>
56#include <linux/hwmon-sysfs.h>
322b87ca 57#include <net/page_pool.h>
c0c050c5
MC
58
59#include "bnxt_hsi.h"
60#include "bnxt.h"
a588e458 61#include "bnxt_ulp.h"
c0c050c5
MC
62#include "bnxt_sriov.h"
63#include "bnxt_ethtool.h"
7df4ae9f 64#include "bnxt_dcb.h"
c6d30e83 65#include "bnxt_xdp.h"
4ab0c6a8 66#include "bnxt_vfr.h"
2ae7408f 67#include "bnxt_tc.h"
3c467bf3 68#include "bnxt_devlink.h"
cabfb09d 69#include "bnxt_debugfs.h"
c0c050c5
MC
70
71#define BNXT_TX_TIMEOUT (5 * HZ)
72
73static const char version[] =
74 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
75
76MODULE_LICENSE("GPL");
77MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
78MODULE_VERSION(DRV_MODULE_VERSION);
79
80#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
81#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
82#define BNXT_RX_COPY_THRESH 256
83
4419dbe6 84#define BNXT_TX_PUSH_THRESH 164
c0c050c5
MC
85
86enum board_idx {
fbc9a523 87 BCM57301,
c0c050c5
MC
88 BCM57302,
89 BCM57304,
1f681688 90 BCM57417_NPAR,
fa853dda 91 BCM58700,
b24eb6ae
MC
92 BCM57311,
93 BCM57312,
fbc9a523 94 BCM57402,
c0c050c5
MC
95 BCM57404,
96 BCM57406,
1f681688
MC
97 BCM57402_NPAR,
98 BCM57407,
b24eb6ae
MC
99 BCM57412,
100 BCM57414,
101 BCM57416,
102 BCM57417,
1f681688 103 BCM57412_NPAR,
5049e33b 104 BCM57314,
1f681688
MC
105 BCM57417_SFP,
106 BCM57416_SFP,
107 BCM57404_NPAR,
108 BCM57406_NPAR,
109 BCM57407_SFP,
adbc8305 110 BCM57407_NPAR,
1f681688
MC
111 BCM57414_NPAR,
112 BCM57416_NPAR,
32b40798
DK
113 BCM57452,
114 BCM57454,
92abef36 115 BCM5745x_NPAR,
1ab968d2 116 BCM57508,
c6cc32a2 117 BCM57504,
51fec80d 118 BCM57502,
49c98421
MC
119 BCM57508_NPAR,
120 BCM57504_NPAR,
121 BCM57502_NPAR,
4a58139b 122 BCM58802,
8ed693b7 123 BCM58804,
4a58139b 124 BCM58808,
adbc8305
MC
125 NETXTREME_E_VF,
126 NETXTREME_C_VF,
618784e3 127 NETXTREME_S_VF,
b16b6891 128 NETXTREME_E_P5_VF,
c0c050c5
MC
129};
130
131/* indexed by enum above */
132static const struct {
133 char *name;
134} board_info[] = {
27573a7d
SB
135 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
136 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
137 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
138 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
139 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
140 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
141 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
142 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
143 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
144 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
145 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
146 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
147 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
148 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
149 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
150 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
151 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
152 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
153 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
154 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
155 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
156 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
157 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
158 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
159 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
160 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
161 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
162 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
92abef36 163 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
1ab968d2 164 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
c6cc32a2 165 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
51fec80d 166 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
49c98421
MC
167 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
168 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
169 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
27573a7d 170 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
8ed693b7 171 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
27573a7d
SB
172 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
173 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
174 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
618784e3 175 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
b16b6891 176 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
c0c050c5
MC
177};
178
179static const struct pci_device_id bnxt_pci_tbl[] = {
92abef36
VV
180 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
181 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
4a58139b 182 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
adbc8305 183 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
fbc9a523 184 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
c0c050c5
MC
185 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
186 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
1f681688 187 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
fa853dda 188 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
b24eb6ae
MC
189 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
190 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
fbc9a523 191 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
c0c050c5
MC
192 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
193 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
1f681688
MC
194 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
195 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
b24eb6ae
MC
196 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
197 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
198 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
199 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
1f681688 200 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
5049e33b 201 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
1f681688
MC
202 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
203 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
204 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
205 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
206 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
adbc8305
MC
207 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
208 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
1f681688 209 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
adbc8305 210 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
1f681688 211 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
adbc8305 212 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
4a58139b 213 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
32b40798 214 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
1ab968d2 215 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
c6cc32a2 216 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
51fec80d 217 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
49c98421
MC
218 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
219 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
220 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
221 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
222 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
223 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
4a58139b 224 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
8ed693b7 225 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
c0c050c5 226#ifdef CONFIG_BNXT_SRIOV
c7ef35eb
DK
227 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
228 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
adbc8305
MC
229 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
230 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
231 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
232 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
233 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
234 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
51fec80d 235 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
b16b6891 236 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
618784e3 237 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
c0c050c5
MC
238#endif
239 { 0 }
240};
241
242MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
243
244static const u16 bnxt_vf_req_snif[] = {
245 HWRM_FUNC_CFG,
91cdda40 246 HWRM_FUNC_VF_CFG,
c0c050c5
MC
247 HWRM_PORT_PHY_QCFG,
248 HWRM_CFA_L2_FILTER_ALLOC,
249};
250
25be8623 251static const u16 bnxt_async_events_arr[] = {
87c374de 252 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
b1613e78 253 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
87c374de
MC
254 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
255 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
256 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
257 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
b1613e78 258 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
2151fe08 259 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
7e914027 260 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
25be8623
MC
261};
262
c213eae8
MC
263static struct workqueue_struct *bnxt_pf_wq;
264
c0c050c5
MC
265static bool bnxt_vf_pciid(enum board_idx idx)
266{
618784e3 267 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
b16b6891 268 idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
c0c050c5
MC
269}
270
271#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
272#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
273#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
274
c0c050c5
MC
275#define BNXT_CP_DB_IRQ_DIS(db) \
276 writel(DB_CP_IRQ_DIS_FLAGS, db)
277
697197e5
MC
278#define BNXT_DB_CQ(db, idx) \
279 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
280
281#define BNXT_DB_NQ_P5(db, idx) \
282 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
283
284#define BNXT_DB_CQ_ARM(db, idx) \
285 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
286
287#define BNXT_DB_NQ_ARM_P5(db, idx) \
288 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
289
290static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
291{
292 if (bp->flags & BNXT_FLAG_CHIP_P5)
293 BNXT_DB_NQ_P5(db, idx);
294 else
295 BNXT_DB_CQ(db, idx);
296}
297
298static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
299{
300 if (bp->flags & BNXT_FLAG_CHIP_P5)
301 BNXT_DB_NQ_ARM_P5(db, idx);
302 else
303 BNXT_DB_CQ_ARM(db, idx);
304}
305
306static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
307{
308 if (bp->flags & BNXT_FLAG_CHIP_P5)
309 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
310 db->doorbell);
311 else
312 BNXT_DB_CQ(db, idx);
313}
314
38413406 315const u16 bnxt_lhint_arr[] = {
c0c050c5
MC
316 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
317 TX_BD_FLAGS_LHINT_512_TO_1023,
318 TX_BD_FLAGS_LHINT_1024_TO_2047,
319 TX_BD_FLAGS_LHINT_1024_TO_2047,
320 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
321 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
322 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
323 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
324 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
325 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
326 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
327 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
328 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
329 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
330 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
331 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
332 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
333 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
334 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
335};
336
ee5c7fb3
SP
337static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
338{
339 struct metadata_dst *md_dst = skb_metadata_dst(skb);
340
341 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
342 return 0;
343
344 return md_dst->u.port_info.port_id;
345}
346
c0c050c5
MC
347static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
348{
349 struct bnxt *bp = netdev_priv(dev);
350 struct tx_bd *txbd;
351 struct tx_bd_ext *txbd1;
352 struct netdev_queue *txq;
353 int i;
354 dma_addr_t mapping;
355 unsigned int length, pad = 0;
356 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
357 u16 prod, last_frag;
358 struct pci_dev *pdev = bp->pdev;
c0c050c5
MC
359 struct bnxt_tx_ring_info *txr;
360 struct bnxt_sw_tx_bd *tx_buf;
361
362 i = skb_get_queue_mapping(skb);
363 if (unlikely(i >= bp->tx_nr_rings)) {
364 dev_kfree_skb_any(skb);
365 return NETDEV_TX_OK;
366 }
367
c0c050c5 368 txq = netdev_get_tx_queue(dev, i);
a960dec9 369 txr = &bp->tx_ring[bp->tx_ring_map[i]];
c0c050c5
MC
370 prod = txr->tx_prod;
371
372 free_size = bnxt_tx_avail(bp, txr);
373 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
374 netif_tx_stop_queue(txq);
375 return NETDEV_TX_BUSY;
376 }
377
378 length = skb->len;
379 len = skb_headlen(skb);
380 last_frag = skb_shinfo(skb)->nr_frags;
381
382 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
383
384 txbd->tx_bd_opaque = prod;
385
386 tx_buf = &txr->tx_buf_ring[prod];
387 tx_buf->skb = skb;
388 tx_buf->nr_frags = last_frag;
389
390 vlan_tag_flags = 0;
ee5c7fb3 391 cfa_action = bnxt_xmit_get_cfa_action(skb);
c0c050c5
MC
392 if (skb_vlan_tag_present(skb)) {
393 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
394 skb_vlan_tag_get(skb);
395 /* Currently supports 8021Q, 8021AD vlan offloads
396 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
397 */
398 if (skb->vlan_proto == htons(ETH_P_8021Q))
399 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
400 }
401
402 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
4419dbe6
MC
403 struct tx_push_buffer *tx_push_buf = txr->tx_push;
404 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
405 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
697197e5 406 void __iomem *db = txr->tx_db.doorbell;
4419dbe6
MC
407 void *pdata = tx_push_buf->data;
408 u64 *end;
409 int j, push_len;
c0c050c5
MC
410
411 /* Set COAL_NOW to be ready quickly for the next push */
412 tx_push->tx_bd_len_flags_type =
413 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
414 TX_BD_TYPE_LONG_TX_BD |
415 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
416 TX_BD_FLAGS_COAL_NOW |
417 TX_BD_FLAGS_PACKET_END |
418 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
419
420 if (skb->ip_summed == CHECKSUM_PARTIAL)
421 tx_push1->tx_bd_hsize_lflags =
422 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
423 else
424 tx_push1->tx_bd_hsize_lflags = 0;
425
426 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
ee5c7fb3
SP
427 tx_push1->tx_bd_cfa_action =
428 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
c0c050c5 429
fbb0fa8b
MC
430 end = pdata + length;
431 end = PTR_ALIGN(end, 8) - 1;
4419dbe6
MC
432 *end = 0;
433
c0c050c5
MC
434 skb_copy_from_linear_data(skb, pdata, len);
435 pdata += len;
436 for (j = 0; j < last_frag; j++) {
437 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
438 void *fptr;
439
440 fptr = skb_frag_address_safe(frag);
441 if (!fptr)
442 goto normal_tx;
443
444 memcpy(pdata, fptr, skb_frag_size(frag));
445 pdata += skb_frag_size(frag);
446 }
447
4419dbe6
MC
448 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
449 txbd->tx_bd_haddr = txr->data_mapping;
c0c050c5
MC
450 prod = NEXT_TX(prod);
451 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
452 memcpy(txbd, tx_push1, sizeof(*txbd));
453 prod = NEXT_TX(prod);
4419dbe6 454 tx_push->doorbell =
c0c050c5
MC
455 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
456 txr->tx_prod = prod;
457
b9a8460a 458 tx_buf->is_push = 1;
c0c050c5 459 netdev_tx_sent_queue(txq, skb->len);
b9a8460a 460 wmb(); /* Sync is_push and byte queue before pushing data */
c0c050c5 461
4419dbe6
MC
462 push_len = (length + sizeof(*tx_push) + 7) / 8;
463 if (push_len > 16) {
697197e5
MC
464 __iowrite64_copy(db, tx_push_buf, 16);
465 __iowrite32_copy(db + 4, tx_push_buf + 1,
9d13744b 466 (push_len - 16) << 1);
4419dbe6 467 } else {
697197e5 468 __iowrite64_copy(db, tx_push_buf, push_len);
4419dbe6 469 }
c0c050c5 470
c0c050c5
MC
471 goto tx_done;
472 }
473
474normal_tx:
475 if (length < BNXT_MIN_PKT_SIZE) {
476 pad = BNXT_MIN_PKT_SIZE - length;
477 if (skb_pad(skb, pad)) {
478 /* SKB already freed. */
479 tx_buf->skb = NULL;
480 return NETDEV_TX_OK;
481 }
482 length = BNXT_MIN_PKT_SIZE;
483 }
484
485 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
486
487 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
488 dev_kfree_skb_any(skb);
489 tx_buf->skb = NULL;
490 return NETDEV_TX_OK;
491 }
492
493 dma_unmap_addr_set(tx_buf, mapping, mapping);
494 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
495 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
496
497 txbd->tx_bd_haddr = cpu_to_le64(mapping);
498
499 prod = NEXT_TX(prod);
500 txbd1 = (struct tx_bd_ext *)
501 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
502
503 txbd1->tx_bd_hsize_lflags = 0;
504 if (skb_is_gso(skb)) {
505 u32 hdr_len;
506
507 if (skb->encapsulation)
508 hdr_len = skb_inner_network_offset(skb) +
509 skb_inner_network_header_len(skb) +
510 inner_tcp_hdrlen(skb);
511 else
512 hdr_len = skb_transport_offset(skb) +
513 tcp_hdrlen(skb);
514
515 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
516 TX_BD_FLAGS_T_IPID |
517 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
518 length = skb_shinfo(skb)->gso_size;
519 txbd1->tx_bd_mss = cpu_to_le32(length);
520 length += hdr_len;
521 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
522 txbd1->tx_bd_hsize_lflags =
523 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
524 txbd1->tx_bd_mss = 0;
525 }
526
527 length >>= 9;
2b3c6885
MC
528 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
529 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
530 skb->len);
531 i = 0;
532 goto tx_dma_error;
533 }
c0c050c5
MC
534 flags |= bnxt_lhint_arr[length];
535 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
536
537 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
ee5c7fb3
SP
538 txbd1->tx_bd_cfa_action =
539 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
c0c050c5
MC
540 for (i = 0; i < last_frag; i++) {
541 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
542
543 prod = NEXT_TX(prod);
544 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
545
546 len = skb_frag_size(frag);
547 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
548 DMA_TO_DEVICE);
549
550 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
551 goto tx_dma_error;
552
553 tx_buf = &txr->tx_buf_ring[prod];
554 dma_unmap_addr_set(tx_buf, mapping, mapping);
555
556 txbd->tx_bd_haddr = cpu_to_le64(mapping);
557
558 flags = len << TX_BD_LEN_SHIFT;
559 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
560 }
561
562 flags &= ~TX_BD_LEN;
563 txbd->tx_bd_len_flags_type =
564 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
565 TX_BD_FLAGS_PACKET_END);
566
567 netdev_tx_sent_queue(txq, skb->len);
568
569 /* Sync BD data before updating doorbell */
570 wmb();
571
572 prod = NEXT_TX(prod);
573 txr->tx_prod = prod;
574
6b16f9ee 575 if (!netdev_xmit_more() || netif_xmit_stopped(txq))
697197e5 576 bnxt_db_write(bp, &txr->tx_db, prod);
c0c050c5
MC
577
578tx_done:
579
c0c050c5 580 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
6b16f9ee 581 if (netdev_xmit_more() && !tx_buf->is_push)
697197e5 582 bnxt_db_write(bp, &txr->tx_db, prod);
4d172f21 583
c0c050c5
MC
584 netif_tx_stop_queue(txq);
585
586 /* netif_tx_stop_queue() must be done before checking
587 * tx index in bnxt_tx_avail() below, because in
588 * bnxt_tx_int(), we update tx index before checking for
589 * netif_tx_queue_stopped().
590 */
591 smp_mb();
592 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
593 netif_tx_wake_queue(txq);
594 }
595 return NETDEV_TX_OK;
596
597tx_dma_error:
598 last_frag = i;
599
600 /* start back at beginning and unmap skb */
601 prod = txr->tx_prod;
602 tx_buf = &txr->tx_buf_ring[prod];
603 tx_buf->skb = NULL;
604 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
605 skb_headlen(skb), PCI_DMA_TODEVICE);
606 prod = NEXT_TX(prod);
607
608 /* unmap remaining mapped pages */
609 for (i = 0; i < last_frag; i++) {
610 prod = NEXT_TX(prod);
611 tx_buf = &txr->tx_buf_ring[prod];
612 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
613 skb_frag_size(&skb_shinfo(skb)->frags[i]),
614 PCI_DMA_TODEVICE);
615 }
616
617 dev_kfree_skb_any(skb);
618 return NETDEV_TX_OK;
619}
620
621static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
622{
b6ab4b01 623 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
a960dec9 624 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
c0c050c5
MC
625 u16 cons = txr->tx_cons;
626 struct pci_dev *pdev = bp->pdev;
627 int i;
628 unsigned int tx_bytes = 0;
629
630 for (i = 0; i < nr_pkts; i++) {
631 struct bnxt_sw_tx_bd *tx_buf;
632 struct sk_buff *skb;
633 int j, last;
634
635 tx_buf = &txr->tx_buf_ring[cons];
636 cons = NEXT_TX(cons);
637 skb = tx_buf->skb;
638 tx_buf->skb = NULL;
639
640 if (tx_buf->is_push) {
641 tx_buf->is_push = 0;
642 goto next_tx_int;
643 }
644
645 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
646 skb_headlen(skb), PCI_DMA_TODEVICE);
647 last = tx_buf->nr_frags;
648
649 for (j = 0; j < last; j++) {
650 cons = NEXT_TX(cons);
651 tx_buf = &txr->tx_buf_ring[cons];
652 dma_unmap_page(
653 &pdev->dev,
654 dma_unmap_addr(tx_buf, mapping),
655 skb_frag_size(&skb_shinfo(skb)->frags[j]),
656 PCI_DMA_TODEVICE);
657 }
658
659next_tx_int:
660 cons = NEXT_TX(cons);
661
662 tx_bytes += skb->len;
663 dev_kfree_skb_any(skb);
664 }
665
666 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
667 txr->tx_cons = cons;
668
669 /* Need to make the tx_cons update visible to bnxt_start_xmit()
670 * before checking for netif_tx_queue_stopped(). Without the
671 * memory barrier, there is a small possibility that bnxt_start_xmit()
672 * will miss it and cause the queue to be stopped forever.
673 */
674 smp_mb();
675
676 if (unlikely(netif_tx_queue_stopped(txq)) &&
677 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
678 __netif_tx_lock(txq, smp_processor_id());
679 if (netif_tx_queue_stopped(txq) &&
680 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
681 txr->dev_state != BNXT_DEV_STATE_CLOSING)
682 netif_tx_wake_queue(txq);
683 __netif_tx_unlock(txq);
684 }
685}
686
c61fb99c 687static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
322b87ca 688 struct bnxt_rx_ring_info *rxr,
c61fb99c
MC
689 gfp_t gfp)
690{
691 struct device *dev = &bp->pdev->dev;
692 struct page *page;
693
322b87ca 694 page = page_pool_dev_alloc_pages(rxr->page_pool);
c61fb99c
MC
695 if (!page)
696 return NULL;
697
c519fe9a
SN
698 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
699 DMA_ATTR_WEAK_ORDERING);
c61fb99c 700 if (dma_mapping_error(dev, *mapping)) {
322b87ca 701 page_pool_recycle_direct(rxr->page_pool, page);
c61fb99c
MC
702 return NULL;
703 }
704 *mapping += bp->rx_dma_offset;
705 return page;
706}
707
c0c050c5
MC
708static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
709 gfp_t gfp)
710{
711 u8 *data;
712 struct pci_dev *pdev = bp->pdev;
713
714 data = kmalloc(bp->rx_buf_size, gfp);
715 if (!data)
716 return NULL;
717
c519fe9a
SN
718 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
719 bp->rx_buf_use_size, bp->rx_dir,
720 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
721
722 if (dma_mapping_error(&pdev->dev, *mapping)) {
723 kfree(data);
724 data = NULL;
725 }
726 return data;
727}
728
38413406
MC
729int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
730 u16 prod, gfp_t gfp)
c0c050c5
MC
731{
732 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
733 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
c0c050c5
MC
734 dma_addr_t mapping;
735
c61fb99c 736 if (BNXT_RX_PAGE_MODE(bp)) {
322b87ca
AG
737 struct page *page =
738 __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
c0c050c5 739
c61fb99c
MC
740 if (!page)
741 return -ENOMEM;
742
743 rx_buf->data = page;
744 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
745 } else {
746 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
747
748 if (!data)
749 return -ENOMEM;
750
751 rx_buf->data = data;
752 rx_buf->data_ptr = data + bp->rx_offset;
753 }
11cd119d 754 rx_buf->mapping = mapping;
c0c050c5
MC
755
756 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
c0c050c5
MC
757 return 0;
758}
759
c6d30e83 760void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
c0c050c5
MC
761{
762 u16 prod = rxr->rx_prod;
763 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
764 struct rx_bd *cons_bd, *prod_bd;
765
766 prod_rx_buf = &rxr->rx_buf_ring[prod];
767 cons_rx_buf = &rxr->rx_buf_ring[cons];
768
769 prod_rx_buf->data = data;
6bb19474 770 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
c0c050c5 771
11cd119d 772 prod_rx_buf->mapping = cons_rx_buf->mapping;
c0c050c5
MC
773
774 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
775 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
776
777 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
778}
779
780static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
781{
782 u16 next, max = rxr->rx_agg_bmap_size;
783
784 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
785 if (next >= max)
786 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
787 return next;
788}
789
790static inline int bnxt_alloc_rx_page(struct bnxt *bp,
791 struct bnxt_rx_ring_info *rxr,
792 u16 prod, gfp_t gfp)
793{
794 struct rx_bd *rxbd =
795 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
796 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
797 struct pci_dev *pdev = bp->pdev;
798 struct page *page;
799 dma_addr_t mapping;
800 u16 sw_prod = rxr->rx_sw_agg_prod;
89d0a06c 801 unsigned int offset = 0;
c0c050c5 802
89d0a06c
MC
803 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
804 page = rxr->rx_page;
805 if (!page) {
806 page = alloc_page(gfp);
807 if (!page)
808 return -ENOMEM;
809 rxr->rx_page = page;
810 rxr->rx_page_offset = 0;
811 }
812 offset = rxr->rx_page_offset;
813 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
814 if (rxr->rx_page_offset == PAGE_SIZE)
815 rxr->rx_page = NULL;
816 else
817 get_page(page);
818 } else {
819 page = alloc_page(gfp);
820 if (!page)
821 return -ENOMEM;
822 }
c0c050c5 823
c519fe9a
SN
824 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
825 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
826 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
827 if (dma_mapping_error(&pdev->dev, mapping)) {
828 __free_page(page);
829 return -EIO;
830 }
831
832 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
833 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
834
835 __set_bit(sw_prod, rxr->rx_agg_bmap);
836 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
837 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
838
839 rx_agg_buf->page = page;
89d0a06c 840 rx_agg_buf->offset = offset;
c0c050c5
MC
841 rx_agg_buf->mapping = mapping;
842 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
843 rxbd->rx_bd_opaque = sw_prod;
844 return 0;
845}
846
4a228a3a
MC
847static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
848 struct bnxt_cp_ring_info *cpr,
849 u16 cp_cons, u16 curr)
850{
851 struct rx_agg_cmp *agg;
852
853 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
854 agg = (struct rx_agg_cmp *)
855 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
856 return agg;
857}
858
bfcd8d79
MC
859static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
860 struct bnxt_rx_ring_info *rxr,
861 u16 agg_id, u16 curr)
862{
863 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
864
865 return &tpa_info->agg_arr[curr];
866}
867
4a228a3a
MC
868static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
869 u16 start, u32 agg_bufs, bool tpa)
c0c050c5 870{
e44758b7 871 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5 872 struct bnxt *bp = bnapi->bp;
b6ab4b01 873 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
874 u16 prod = rxr->rx_agg_prod;
875 u16 sw_prod = rxr->rx_sw_agg_prod;
bfcd8d79 876 bool p5_tpa = false;
c0c050c5
MC
877 u32 i;
878
bfcd8d79
MC
879 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
880 p5_tpa = true;
881
c0c050c5
MC
882 for (i = 0; i < agg_bufs; i++) {
883 u16 cons;
884 struct rx_agg_cmp *agg;
885 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
886 struct rx_bd *prod_bd;
887 struct page *page;
888
bfcd8d79
MC
889 if (p5_tpa)
890 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
891 else
892 agg = bnxt_get_agg(bp, cpr, idx, start + i);
c0c050c5
MC
893 cons = agg->rx_agg_cmp_opaque;
894 __clear_bit(cons, rxr->rx_agg_bmap);
895
896 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
897 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
898
899 __set_bit(sw_prod, rxr->rx_agg_bmap);
900 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
901 cons_rx_buf = &rxr->rx_agg_ring[cons];
902
903 /* It is possible for sw_prod to be equal to cons, so
904 * set cons_rx_buf->page to NULL first.
905 */
906 page = cons_rx_buf->page;
907 cons_rx_buf->page = NULL;
908 prod_rx_buf->page = page;
89d0a06c 909 prod_rx_buf->offset = cons_rx_buf->offset;
c0c050c5
MC
910
911 prod_rx_buf->mapping = cons_rx_buf->mapping;
912
913 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
914
915 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
916 prod_bd->rx_bd_opaque = sw_prod;
917
918 prod = NEXT_RX_AGG(prod);
919 sw_prod = NEXT_RX_AGG(sw_prod);
c0c050c5
MC
920 }
921 rxr->rx_agg_prod = prod;
922 rxr->rx_sw_agg_prod = sw_prod;
923}
924
c61fb99c
MC
925static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
926 struct bnxt_rx_ring_info *rxr,
927 u16 cons, void *data, u8 *data_ptr,
928 dma_addr_t dma_addr,
929 unsigned int offset_and_len)
930{
931 unsigned int payload = offset_and_len >> 16;
932 unsigned int len = offset_and_len & 0xffff;
d7840976 933 skb_frag_t *frag;
c61fb99c
MC
934 struct page *page = data;
935 u16 prod = rxr->rx_prod;
936 struct sk_buff *skb;
937 int off, err;
938
939 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
940 if (unlikely(err)) {
941 bnxt_reuse_rx_data(rxr, cons, data);
942 return NULL;
943 }
944 dma_addr -= bp->rx_dma_offset;
c519fe9a
SN
945 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
946 DMA_ATTR_WEAK_ORDERING);
3071c517 947 page_pool_release_page(rxr->page_pool, page);
c61fb99c
MC
948
949 if (unlikely(!payload))
c43f1255 950 payload = eth_get_headlen(bp->dev, data_ptr, len);
c61fb99c
MC
951
952 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
953 if (!skb) {
954 __free_page(page);
955 return NULL;
956 }
957
958 off = (void *)data_ptr - page_address(page);
959 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
960 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
961 payload + NET_IP_ALIGN);
962
963 frag = &skb_shinfo(skb)->frags[0];
964 skb_frag_size_sub(frag, payload);
b54c9d5b 965 skb_frag_off_add(frag, payload);
c61fb99c
MC
966 skb->data_len -= payload;
967 skb->tail += payload;
968
969 return skb;
970}
971
c0c050c5
MC
972static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
973 struct bnxt_rx_ring_info *rxr, u16 cons,
6bb19474
MC
974 void *data, u8 *data_ptr,
975 dma_addr_t dma_addr,
976 unsigned int offset_and_len)
c0c050c5 977{
6bb19474 978 u16 prod = rxr->rx_prod;
c0c050c5 979 struct sk_buff *skb;
6bb19474 980 int err;
c0c050c5
MC
981
982 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
983 if (unlikely(err)) {
984 bnxt_reuse_rx_data(rxr, cons, data);
985 return NULL;
986 }
987
988 skb = build_skb(data, 0);
c519fe9a
SN
989 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
990 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
991 if (!skb) {
992 kfree(data);
993 return NULL;
994 }
995
b3dba77c 996 skb_reserve(skb, bp->rx_offset);
6bb19474 997 skb_put(skb, offset_and_len & 0xffff);
c0c050c5
MC
998 return skb;
999}
1000
e44758b7
MC
1001static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
1002 struct bnxt_cp_ring_info *cpr,
4a228a3a
MC
1003 struct sk_buff *skb, u16 idx,
1004 u32 agg_bufs, bool tpa)
c0c050c5 1005{
e44758b7 1006 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5 1007 struct pci_dev *pdev = bp->pdev;
b6ab4b01 1008 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5 1009 u16 prod = rxr->rx_agg_prod;
bfcd8d79 1010 bool p5_tpa = false;
c0c050c5
MC
1011 u32 i;
1012
bfcd8d79
MC
1013 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1014 p5_tpa = true;
1015
c0c050c5
MC
1016 for (i = 0; i < agg_bufs; i++) {
1017 u16 cons, frag_len;
1018 struct rx_agg_cmp *agg;
1019 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1020 struct page *page;
1021 dma_addr_t mapping;
1022
bfcd8d79
MC
1023 if (p5_tpa)
1024 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1025 else
1026 agg = bnxt_get_agg(bp, cpr, idx, i);
c0c050c5
MC
1027 cons = agg->rx_agg_cmp_opaque;
1028 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1029 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1030
1031 cons_rx_buf = &rxr->rx_agg_ring[cons];
89d0a06c
MC
1032 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1033 cons_rx_buf->offset, frag_len);
c0c050c5
MC
1034 __clear_bit(cons, rxr->rx_agg_bmap);
1035
1036 /* It is possible for bnxt_alloc_rx_page() to allocate
1037 * a sw_prod index that equals the cons index, so we
1038 * need to clear the cons entry now.
1039 */
11cd119d 1040 mapping = cons_rx_buf->mapping;
c0c050c5
MC
1041 page = cons_rx_buf->page;
1042 cons_rx_buf->page = NULL;
1043
1044 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1045 struct skb_shared_info *shinfo;
1046 unsigned int nr_frags;
1047
1048 shinfo = skb_shinfo(skb);
1049 nr_frags = --shinfo->nr_frags;
1050 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1051
1052 dev_kfree_skb(skb);
1053
1054 cons_rx_buf->page = page;
1055
1056 /* Update prod since possibly some pages have been
1057 * allocated already.
1058 */
1059 rxr->rx_agg_prod = prod;
4a228a3a 1060 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
c0c050c5
MC
1061 return NULL;
1062 }
1063
c519fe9a
SN
1064 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1065 PCI_DMA_FROMDEVICE,
1066 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
1067
1068 skb->data_len += frag_len;
1069 skb->len += frag_len;
1070 skb->truesize += PAGE_SIZE;
1071
1072 prod = NEXT_RX_AGG(prod);
c0c050c5
MC
1073 }
1074 rxr->rx_agg_prod = prod;
1075 return skb;
1076}
1077
1078static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1079 u8 agg_bufs, u32 *raw_cons)
1080{
1081 u16 last;
1082 struct rx_agg_cmp *agg;
1083
1084 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1085 last = RING_CMP(*raw_cons);
1086 agg = (struct rx_agg_cmp *)
1087 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1088 return RX_AGG_CMP_VALID(agg, *raw_cons);
1089}
1090
1091static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1092 unsigned int len,
1093 dma_addr_t mapping)
1094{
1095 struct bnxt *bp = bnapi->bp;
1096 struct pci_dev *pdev = bp->pdev;
1097 struct sk_buff *skb;
1098
1099 skb = napi_alloc_skb(&bnapi->napi, len);
1100 if (!skb)
1101 return NULL;
1102
745fc05c
MC
1103 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1104 bp->rx_dir);
c0c050c5 1105
6bb19474
MC
1106 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1107 len + NET_IP_ALIGN);
c0c050c5 1108
745fc05c
MC
1109 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1110 bp->rx_dir);
c0c050c5
MC
1111
1112 skb_put(skb, len);
1113 return skb;
1114}
1115
e44758b7 1116static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
fa7e2812
MC
1117 u32 *raw_cons, void *cmp)
1118{
fa7e2812
MC
1119 struct rx_cmp *rxcmp = cmp;
1120 u32 tmp_raw_cons = *raw_cons;
1121 u8 cmp_type, agg_bufs = 0;
1122
1123 cmp_type = RX_CMP_TYPE(rxcmp);
1124
1125 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1126 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1127 RX_CMP_AGG_BUFS) >>
1128 RX_CMP_AGG_BUFS_SHIFT;
1129 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1130 struct rx_tpa_end_cmp *tpa_end = cmp;
1131
bfcd8d79
MC
1132 if (bp->flags & BNXT_FLAG_CHIP_P5)
1133 return 0;
1134
4a228a3a 1135 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
fa7e2812
MC
1136 }
1137
1138 if (agg_bufs) {
1139 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1140 return -EBUSY;
1141 }
1142 *raw_cons = tmp_raw_cons;
1143 return 0;
1144}
1145
230d1f0d
MC
1146static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1147{
1148 if (BNXT_PF(bp))
1149 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1150 else
1151 schedule_delayed_work(&bp->fw_reset_task, delay);
1152}
1153
c213eae8
MC
1154static void bnxt_queue_sp_work(struct bnxt *bp)
1155{
1156 if (BNXT_PF(bp))
1157 queue_work(bnxt_pf_wq, &bp->sp_task);
1158 else
1159 schedule_work(&bp->sp_task);
1160}
1161
1162static void bnxt_cancel_sp_work(struct bnxt *bp)
1163{
1164 if (BNXT_PF(bp))
1165 flush_workqueue(bnxt_pf_wq);
1166 else
1167 cancel_work_sync(&bp->sp_task);
1168}
1169
fa7e2812
MC
1170static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1171{
1172 if (!rxr->bnapi->in_reset) {
1173 rxr->bnapi->in_reset = true;
1174 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
c213eae8 1175 bnxt_queue_sp_work(bp);
fa7e2812
MC
1176 }
1177 rxr->rx_next_cons = 0xffff;
1178}
1179
ec4d8e7c
MC
1180static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1181{
1182 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1183 u16 idx = agg_id & MAX_TPA_P5_MASK;
1184
1185 if (test_bit(idx, map->agg_idx_bmap))
1186 idx = find_first_zero_bit(map->agg_idx_bmap,
1187 BNXT_AGG_IDX_BMAP_SIZE);
1188 __set_bit(idx, map->agg_idx_bmap);
1189 map->agg_id_tbl[agg_id] = idx;
1190 return idx;
1191}
1192
1193static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1194{
1195 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1196
1197 __clear_bit(idx, map->agg_idx_bmap);
1198}
1199
1200static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1201{
1202 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1203
1204 return map->agg_id_tbl[agg_id];
1205}
1206
c0c050c5
MC
1207static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1208 struct rx_tpa_start_cmp *tpa_start,
1209 struct rx_tpa_start_cmp_ext *tpa_start1)
1210{
c0c050c5 1211 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
bfcd8d79
MC
1212 struct bnxt_tpa_info *tpa_info;
1213 u16 cons, prod, agg_id;
c0c050c5
MC
1214 struct rx_bd *prod_bd;
1215 dma_addr_t mapping;
1216
ec4d8e7c 1217 if (bp->flags & BNXT_FLAG_CHIP_P5) {
bfcd8d79 1218 agg_id = TPA_START_AGG_ID_P5(tpa_start);
ec4d8e7c
MC
1219 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1220 } else {
bfcd8d79 1221 agg_id = TPA_START_AGG_ID(tpa_start);
ec4d8e7c 1222 }
c0c050c5
MC
1223 cons = tpa_start->rx_tpa_start_cmp_opaque;
1224 prod = rxr->rx_prod;
1225 cons_rx_buf = &rxr->rx_buf_ring[cons];
1226 prod_rx_buf = &rxr->rx_buf_ring[prod];
1227 tpa_info = &rxr->rx_tpa[agg_id];
1228
bfcd8d79
MC
1229 if (unlikely(cons != rxr->rx_next_cons ||
1230 TPA_START_ERROR(tpa_start))) {
1231 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1232 cons, rxr->rx_next_cons,
1233 TPA_START_ERROR_CODE(tpa_start1));
fa7e2812
MC
1234 bnxt_sched_reset(bp, rxr);
1235 return;
1236 }
ee5c7fb3
SP
1237 /* Store cfa_code in tpa_info to use in tpa_end
1238 * completion processing.
1239 */
1240 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
c0c050c5 1241 prod_rx_buf->data = tpa_info->data;
6bb19474 1242 prod_rx_buf->data_ptr = tpa_info->data_ptr;
c0c050c5
MC
1243
1244 mapping = tpa_info->mapping;
11cd119d 1245 prod_rx_buf->mapping = mapping;
c0c050c5
MC
1246
1247 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1248
1249 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1250
1251 tpa_info->data = cons_rx_buf->data;
6bb19474 1252 tpa_info->data_ptr = cons_rx_buf->data_ptr;
c0c050c5 1253 cons_rx_buf->data = NULL;
11cd119d 1254 tpa_info->mapping = cons_rx_buf->mapping;
c0c050c5
MC
1255
1256 tpa_info->len =
1257 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1258 RX_TPA_START_CMP_LEN_SHIFT;
1259 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1260 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1261
1262 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1263 tpa_info->gso_type = SKB_GSO_TCPV4;
1264 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
50f011b6 1265 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
c0c050c5
MC
1266 tpa_info->gso_type = SKB_GSO_TCPV6;
1267 tpa_info->rss_hash =
1268 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1269 } else {
1270 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1271 tpa_info->gso_type = 0;
1272 if (netif_msg_rx_err(bp))
1273 netdev_warn(bp->dev, "TPA packet without valid hash\n");
1274 }
1275 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1276 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
94758f8d 1277 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
bfcd8d79 1278 tpa_info->agg_count = 0;
c0c050c5
MC
1279
1280 rxr->rx_prod = NEXT_RX(prod);
1281 cons = NEXT_RX(cons);
376a5b86 1282 rxr->rx_next_cons = NEXT_RX(cons);
c0c050c5
MC
1283 cons_rx_buf = &rxr->rx_buf_ring[cons];
1284
1285 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1286 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1287 cons_rx_buf->data = NULL;
1288}
1289
4a228a3a 1290static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
c0c050c5
MC
1291{
1292 if (agg_bufs)
4a228a3a 1293 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
c0c050c5
MC
1294}
1295
bee5a188
MC
1296#ifdef CONFIG_INET
1297static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1298{
1299 struct udphdr *uh = NULL;
1300
1301 if (ip_proto == htons(ETH_P_IP)) {
1302 struct iphdr *iph = (struct iphdr *)skb->data;
1303
1304 if (iph->protocol == IPPROTO_UDP)
1305 uh = (struct udphdr *)(iph + 1);
1306 } else {
1307 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1308
1309 if (iph->nexthdr == IPPROTO_UDP)
1310 uh = (struct udphdr *)(iph + 1);
1311 }
1312 if (uh) {
1313 if (uh->check)
1314 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1315 else
1316 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1317 }
1318}
1319#endif
1320
94758f8d
MC
1321static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1322 int payload_off, int tcp_ts,
1323 struct sk_buff *skb)
1324{
1325#ifdef CONFIG_INET
1326 struct tcphdr *th;
1327 int len, nw_off;
1328 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1329 u32 hdr_info = tpa_info->hdr_info;
1330 bool loopback = false;
1331
1332 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1333 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1334 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1335
1336 /* If the packet is an internal loopback packet, the offsets will
1337 * have an extra 4 bytes.
1338 */
1339 if (inner_mac_off == 4) {
1340 loopback = true;
1341 } else if (inner_mac_off > 4) {
1342 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1343 ETH_HLEN - 2));
1344
1345 /* We only support inner iPv4/ipv6. If we don't see the
1346 * correct protocol ID, it must be a loopback packet where
1347 * the offsets are off by 4.
1348 */
09a7636a 1349 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
94758f8d
MC
1350 loopback = true;
1351 }
1352 if (loopback) {
1353 /* internal loopback packet, subtract all offsets by 4 */
1354 inner_ip_off -= 4;
1355 inner_mac_off -= 4;
1356 outer_ip_off -= 4;
1357 }
1358
1359 nw_off = inner_ip_off - ETH_HLEN;
1360 skb_set_network_header(skb, nw_off);
1361 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1362 struct ipv6hdr *iph = ipv6_hdr(skb);
1363
1364 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1365 len = skb->len - skb_transport_offset(skb);
1366 th = tcp_hdr(skb);
1367 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1368 } else {
1369 struct iphdr *iph = ip_hdr(skb);
1370
1371 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1372 len = skb->len - skb_transport_offset(skb);
1373 th = tcp_hdr(skb);
1374 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1375 }
1376
1377 if (inner_mac_off) { /* tunnel */
94758f8d
MC
1378 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1379 ETH_HLEN - 2));
1380
bee5a188 1381 bnxt_gro_tunnel(skb, proto);
94758f8d
MC
1382 }
1383#endif
1384 return skb;
1385}
1386
67912c36
MC
1387static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1388 int payload_off, int tcp_ts,
1389 struct sk_buff *skb)
1390{
1391#ifdef CONFIG_INET
1392 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1393 u32 hdr_info = tpa_info->hdr_info;
1394 int iphdr_len, nw_off;
1395
1396 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1397 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1398 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1399
1400 nw_off = inner_ip_off - ETH_HLEN;
1401 skb_set_network_header(skb, nw_off);
1402 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1403 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1404 skb_set_transport_header(skb, nw_off + iphdr_len);
1405
1406 if (inner_mac_off) { /* tunnel */
1407 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1408 ETH_HLEN - 2));
1409
1410 bnxt_gro_tunnel(skb, proto);
1411 }
1412#endif
1413 return skb;
1414}
1415
c0c050c5
MC
1416#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1417#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1418
309369c9
MC
1419static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1420 int payload_off, int tcp_ts,
c0c050c5
MC
1421 struct sk_buff *skb)
1422{
d1611c3a 1423#ifdef CONFIG_INET
c0c050c5 1424 struct tcphdr *th;
719ca811 1425 int len, nw_off, tcp_opt_len = 0;
27e24189 1426
309369c9 1427 if (tcp_ts)
c0c050c5
MC
1428 tcp_opt_len = 12;
1429
c0c050c5
MC
1430 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1431 struct iphdr *iph;
1432
1433 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1434 ETH_HLEN;
1435 skb_set_network_header(skb, nw_off);
1436 iph = ip_hdr(skb);
1437 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1438 len = skb->len - skb_transport_offset(skb);
1439 th = tcp_hdr(skb);
1440 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1441 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1442 struct ipv6hdr *iph;
1443
1444 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1445 ETH_HLEN;
1446 skb_set_network_header(skb, nw_off);
1447 iph = ipv6_hdr(skb);
1448 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1449 len = skb->len - skb_transport_offset(skb);
1450 th = tcp_hdr(skb);
1451 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1452 } else {
1453 dev_kfree_skb_any(skb);
1454 return NULL;
1455 }
c0c050c5 1456
bee5a188
MC
1457 if (nw_off) /* tunnel */
1458 bnxt_gro_tunnel(skb, skb->protocol);
c0c050c5
MC
1459#endif
1460 return skb;
1461}
1462
309369c9
MC
1463static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1464 struct bnxt_tpa_info *tpa_info,
1465 struct rx_tpa_end_cmp *tpa_end,
1466 struct rx_tpa_end_cmp_ext *tpa_end1,
1467 struct sk_buff *skb)
1468{
1469#ifdef CONFIG_INET
1470 int payload_off;
1471 u16 segs;
1472
1473 segs = TPA_END_TPA_SEGS(tpa_end);
1474 if (segs == 1)
1475 return skb;
1476
1477 NAPI_GRO_CB(skb)->count = segs;
1478 skb_shinfo(skb)->gso_size =
1479 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1480 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
bfcd8d79
MC
1481 if (bp->flags & BNXT_FLAG_CHIP_P5)
1482 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1483 else
1484 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
309369c9 1485 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
5910906c
MC
1486 if (likely(skb))
1487 tcp_gro_complete(skb);
309369c9
MC
1488#endif
1489 return skb;
1490}
1491
ee5c7fb3
SP
1492/* Given the cfa_code of a received packet determine which
1493 * netdev (vf-rep or PF) the packet is destined to.
1494 */
1495static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1496{
1497 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1498
1499 /* if vf-rep dev is NULL, the must belongs to the PF */
1500 return dev ? dev : bp->dev;
1501}
1502
c0c050c5 1503static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
e44758b7 1504 struct bnxt_cp_ring_info *cpr,
c0c050c5
MC
1505 u32 *raw_cons,
1506 struct rx_tpa_end_cmp *tpa_end,
1507 struct rx_tpa_end_cmp_ext *tpa_end1,
4e5dbbda 1508 u8 *event)
c0c050c5 1509{
e44758b7 1510 struct bnxt_napi *bnapi = cpr->bnapi;
b6ab4b01 1511 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
6bb19474 1512 u8 *data_ptr, agg_bufs;
c0c050c5
MC
1513 unsigned int len;
1514 struct bnxt_tpa_info *tpa_info;
1515 dma_addr_t mapping;
1516 struct sk_buff *skb;
bfcd8d79 1517 u16 idx = 0, agg_id;
6bb19474 1518 void *data;
bfcd8d79 1519 bool gro;
c0c050c5 1520
fa7e2812 1521 if (unlikely(bnapi->in_reset)) {
e44758b7 1522 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
fa7e2812
MC
1523
1524 if (rc < 0)
1525 return ERR_PTR(-EBUSY);
1526 return NULL;
1527 }
1528
bfcd8d79
MC
1529 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1530 agg_id = TPA_END_AGG_ID_P5(tpa_end);
ec4d8e7c 1531 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
bfcd8d79
MC
1532 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1533 tpa_info = &rxr->rx_tpa[agg_id];
1534 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1535 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1536 agg_bufs, tpa_info->agg_count);
1537 agg_bufs = tpa_info->agg_count;
1538 }
1539 tpa_info->agg_count = 0;
1540 *event |= BNXT_AGG_EVENT;
ec4d8e7c 1541 bnxt_free_agg_idx(rxr, agg_id);
bfcd8d79
MC
1542 idx = agg_id;
1543 gro = !!(bp->flags & BNXT_FLAG_GRO);
1544 } else {
1545 agg_id = TPA_END_AGG_ID(tpa_end);
1546 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1547 tpa_info = &rxr->rx_tpa[agg_id];
1548 idx = RING_CMP(*raw_cons);
1549 if (agg_bufs) {
1550 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1551 return ERR_PTR(-EBUSY);
1552
1553 *event |= BNXT_AGG_EVENT;
1554 idx = NEXT_CMP(idx);
1555 }
1556 gro = !!TPA_END_GRO(tpa_end);
1557 }
c0c050c5 1558 data = tpa_info->data;
6bb19474
MC
1559 data_ptr = tpa_info->data_ptr;
1560 prefetch(data_ptr);
c0c050c5
MC
1561 len = tpa_info->len;
1562 mapping = tpa_info->mapping;
1563
69c149e2 1564 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
4a228a3a 1565 bnxt_abort_tpa(cpr, idx, agg_bufs);
69c149e2
MC
1566 if (agg_bufs > MAX_SKB_FRAGS)
1567 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1568 agg_bufs, (int)MAX_SKB_FRAGS);
c0c050c5
MC
1569 return NULL;
1570 }
1571
1572 if (len <= bp->rx_copy_thresh) {
6bb19474 1573 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
c0c050c5 1574 if (!skb) {
4a228a3a 1575 bnxt_abort_tpa(cpr, idx, agg_bufs);
c0c050c5
MC
1576 return NULL;
1577 }
1578 } else {
1579 u8 *new_data;
1580 dma_addr_t new_mapping;
1581
1582 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1583 if (!new_data) {
4a228a3a 1584 bnxt_abort_tpa(cpr, idx, agg_bufs);
c0c050c5
MC
1585 return NULL;
1586 }
1587
1588 tpa_info->data = new_data;
b3dba77c 1589 tpa_info->data_ptr = new_data + bp->rx_offset;
c0c050c5
MC
1590 tpa_info->mapping = new_mapping;
1591
1592 skb = build_skb(data, 0);
c519fe9a
SN
1593 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1594 bp->rx_buf_use_size, bp->rx_dir,
1595 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
1596
1597 if (!skb) {
1598 kfree(data);
4a228a3a 1599 bnxt_abort_tpa(cpr, idx, agg_bufs);
c0c050c5
MC
1600 return NULL;
1601 }
b3dba77c 1602 skb_reserve(skb, bp->rx_offset);
c0c050c5
MC
1603 skb_put(skb, len);
1604 }
1605
1606 if (agg_bufs) {
4a228a3a 1607 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
c0c050c5
MC
1608 if (!skb) {
1609 /* Page reuse already handled by bnxt_rx_pages(). */
1610 return NULL;
1611 }
1612 }
ee5c7fb3
SP
1613
1614 skb->protocol =
1615 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
c0c050c5
MC
1616
1617 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1618 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1619
8852ddb4
MC
1620 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1621 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
c0c050c5
MC
1622 u16 vlan_proto = tpa_info->metadata >>
1623 RX_CMP_FLAGS2_METADATA_TPID_SFT;
ed7bc602 1624 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
c0c050c5 1625
8852ddb4 1626 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
c0c050c5
MC
1627 }
1628
1629 skb_checksum_none_assert(skb);
1630 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1631 skb->ip_summed = CHECKSUM_UNNECESSARY;
1632 skb->csum_level =
1633 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1634 }
1635
bfcd8d79 1636 if (gro)
309369c9 1637 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
c0c050c5
MC
1638
1639 return skb;
1640}
1641
8fe88ce7
MC
1642static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1643 struct rx_agg_cmp *rx_agg)
1644{
1645 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1646 struct bnxt_tpa_info *tpa_info;
1647
ec4d8e7c 1648 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
8fe88ce7
MC
1649 tpa_info = &rxr->rx_tpa[agg_id];
1650 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1651 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1652}
1653
ee5c7fb3
SP
1654static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1655 struct sk_buff *skb)
1656{
1657 if (skb->dev != bp->dev) {
1658 /* this packet belongs to a vf-rep */
1659 bnxt_vf_rep_rx(bp, skb);
1660 return;
1661 }
1662 skb_record_rx_queue(skb, bnapi->index);
1663 napi_gro_receive(&bnapi->napi, skb);
1664}
1665
c0c050c5
MC
1666/* returns the following:
1667 * 1 - 1 packet successfully received
1668 * 0 - successful TPA_START, packet not completed yet
1669 * -EBUSY - completion ring does not have all the agg buffers yet
1670 * -ENOMEM - packet aborted due to out of memory
1671 * -EIO - packet aborted due to hw error indicated in BD
1672 */
e44758b7
MC
1673static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1674 u32 *raw_cons, u8 *event)
c0c050c5 1675{
e44758b7 1676 struct bnxt_napi *bnapi = cpr->bnapi;
b6ab4b01 1677 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
1678 struct net_device *dev = bp->dev;
1679 struct rx_cmp *rxcmp;
1680 struct rx_cmp_ext *rxcmp1;
1681 u32 tmp_raw_cons = *raw_cons;
ee5c7fb3 1682 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
c0c050c5
MC
1683 struct bnxt_sw_rx_bd *rx_buf;
1684 unsigned int len;
6bb19474 1685 u8 *data_ptr, agg_bufs, cmp_type;
c0c050c5
MC
1686 dma_addr_t dma_addr;
1687 struct sk_buff *skb;
6bb19474 1688 void *data;
c0c050c5 1689 int rc = 0;
c61fb99c 1690 u32 misc;
c0c050c5
MC
1691
1692 rxcmp = (struct rx_cmp *)
1693 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1694
8fe88ce7
MC
1695 cmp_type = RX_CMP_TYPE(rxcmp);
1696
1697 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1698 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1699 goto next_rx_no_prod_no_len;
1700 }
1701
c0c050c5
MC
1702 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1703 cp_cons = RING_CMP(tmp_raw_cons);
1704 rxcmp1 = (struct rx_cmp_ext *)
1705 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1706
1707 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1708 return -EBUSY;
1709
c0c050c5
MC
1710 prod = rxr->rx_prod;
1711
1712 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1713 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1714 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1715
4e5dbbda 1716 *event |= BNXT_RX_EVENT;
e7e70fa6 1717 goto next_rx_no_prod_no_len;
c0c050c5
MC
1718
1719 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
e44758b7 1720 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
c0c050c5 1721 (struct rx_tpa_end_cmp *)rxcmp,
4e5dbbda 1722 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
c0c050c5 1723
1fac4b2f 1724 if (IS_ERR(skb))
c0c050c5
MC
1725 return -EBUSY;
1726
1727 rc = -ENOMEM;
1728 if (likely(skb)) {
ee5c7fb3 1729 bnxt_deliver_skb(bp, bnapi, skb);
c0c050c5
MC
1730 rc = 1;
1731 }
4e5dbbda 1732 *event |= BNXT_RX_EVENT;
e7e70fa6 1733 goto next_rx_no_prod_no_len;
c0c050c5
MC
1734 }
1735
1736 cons = rxcmp->rx_cmp_opaque;
fa7e2812 1737 if (unlikely(cons != rxr->rx_next_cons)) {
e44758b7 1738 int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
fa7e2812 1739
a1b0e4e6
MC
1740 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1741 cons, rxr->rx_next_cons);
fa7e2812
MC
1742 bnxt_sched_reset(bp, rxr);
1743 return rc1;
1744 }
a1b0e4e6
MC
1745 rx_buf = &rxr->rx_buf_ring[cons];
1746 data = rx_buf->data;
1747 data_ptr = rx_buf->data_ptr;
6bb19474 1748 prefetch(data_ptr);
c0c050c5 1749
c61fb99c
MC
1750 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1751 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
c0c050c5
MC
1752
1753 if (agg_bufs) {
1754 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1755 return -EBUSY;
1756
1757 cp_cons = NEXT_CMP(cp_cons);
4e5dbbda 1758 *event |= BNXT_AGG_EVENT;
c0c050c5 1759 }
4e5dbbda 1760 *event |= BNXT_RX_EVENT;
c0c050c5
MC
1761
1762 rx_buf->data = NULL;
1763 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
8e44e96c
MC
1764 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1765
c0c050c5
MC
1766 bnxt_reuse_rx_data(rxr, cons, data);
1767 if (agg_bufs)
4a228a3a
MC
1768 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1769 false);
c0c050c5
MC
1770
1771 rc = -EIO;
8e44e96c 1772 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
19b3751f
MC
1773 bnapi->cp_ring.rx_buf_errors++;
1774 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
1775 netdev_warn(bp->dev, "RX buffer error %x\n",
1776 rx_err);
1777 bnxt_sched_reset(bp, rxr);
1778 }
8e44e96c 1779 }
0b397b17 1780 goto next_rx_no_len;
c0c050c5
MC
1781 }
1782
1783 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
11cd119d 1784 dma_addr = rx_buf->mapping;
c0c050c5 1785
c6d30e83
MC
1786 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1787 rc = 1;
1788 goto next_rx;
1789 }
1790
c0c050c5 1791 if (len <= bp->rx_copy_thresh) {
6bb19474 1792 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
c0c050c5
MC
1793 bnxt_reuse_rx_data(rxr, cons, data);
1794 if (!skb) {
296d5b54 1795 if (agg_bufs)
4a228a3a
MC
1796 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1797 agg_bufs, false);
c0c050c5
MC
1798 rc = -ENOMEM;
1799 goto next_rx;
1800 }
1801 } else {
c61fb99c
MC
1802 u32 payload;
1803
c6d30e83
MC
1804 if (rx_buf->data_ptr == data_ptr)
1805 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1806 else
1807 payload = 0;
6bb19474 1808 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
c61fb99c 1809 payload | len);
c0c050c5
MC
1810 if (!skb) {
1811 rc = -ENOMEM;
1812 goto next_rx;
1813 }
1814 }
1815
1816 if (agg_bufs) {
4a228a3a 1817 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
c0c050c5
MC
1818 if (!skb) {
1819 rc = -ENOMEM;
1820 goto next_rx;
1821 }
1822 }
1823
1824 if (RX_CMP_HASH_VALID(rxcmp)) {
1825 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1826 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1827
1828 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1829 if (hash_type != 1 && hash_type != 3)
1830 type = PKT_HASH_TYPE_L3;
1831 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1832 }
1833
ee5c7fb3
SP
1834 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1835 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
c0c050c5 1836
8852ddb4
MC
1837 if ((rxcmp1->rx_cmp_flags2 &
1838 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1839 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
c0c050c5 1840 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
ed7bc602 1841 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
c0c050c5
MC
1842 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1843
8852ddb4 1844 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
c0c050c5
MC
1845 }
1846
1847 skb_checksum_none_assert(skb);
1848 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1849 if (dev->features & NETIF_F_RXCSUM) {
1850 skb->ip_summed = CHECKSUM_UNNECESSARY;
1851 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1852 }
1853 } else {
665e350d
SB
1854 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1855 if (dev->features & NETIF_F_RXCSUM)
d1981929 1856 bnapi->cp_ring.rx_l4_csum_errors++;
665e350d 1857 }
c0c050c5
MC
1858 }
1859
ee5c7fb3 1860 bnxt_deliver_skb(bp, bnapi, skb);
c0c050c5
MC
1861 rc = 1;
1862
1863next_rx:
6a8788f2
AG
1864 cpr->rx_packets += 1;
1865 cpr->rx_bytes += len;
e7e70fa6 1866
0b397b17
MC
1867next_rx_no_len:
1868 rxr->rx_prod = NEXT_RX(prod);
1869 rxr->rx_next_cons = NEXT_RX(cons);
1870
e7e70fa6 1871next_rx_no_prod_no_len:
c0c050c5
MC
1872 *raw_cons = tmp_raw_cons;
1873
1874 return rc;
1875}
1876
2270bc5d
MC
1877/* In netpoll mode, if we are using a combined completion ring, we need to
1878 * discard the rx packets and recycle the buffers.
1879 */
e44758b7
MC
1880static int bnxt_force_rx_discard(struct bnxt *bp,
1881 struct bnxt_cp_ring_info *cpr,
2270bc5d
MC
1882 u32 *raw_cons, u8 *event)
1883{
2270bc5d
MC
1884 u32 tmp_raw_cons = *raw_cons;
1885 struct rx_cmp_ext *rxcmp1;
1886 struct rx_cmp *rxcmp;
1887 u16 cp_cons;
1888 u8 cmp_type;
1889
1890 cp_cons = RING_CMP(tmp_raw_cons);
1891 rxcmp = (struct rx_cmp *)
1892 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1893
1894 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1895 cp_cons = RING_CMP(tmp_raw_cons);
1896 rxcmp1 = (struct rx_cmp_ext *)
1897 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1898
1899 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1900 return -EBUSY;
1901
1902 cmp_type = RX_CMP_TYPE(rxcmp);
1903 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1904 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1905 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1906 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1907 struct rx_tpa_end_cmp_ext *tpa_end1;
1908
1909 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1910 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1911 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1912 }
e44758b7 1913 return bnxt_rx_pkt(bp, cpr, raw_cons, event);
2270bc5d
MC
1914}
1915
7e914027
MC
1916u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
1917{
1918 struct bnxt_fw_health *fw_health = bp->fw_health;
1919 u32 reg = fw_health->regs[reg_idx];
1920 u32 reg_type, reg_off, val = 0;
1921
1922 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1923 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1924 switch (reg_type) {
1925 case BNXT_FW_HEALTH_REG_TYPE_CFG:
1926 pci_read_config_dword(bp->pdev, reg_off, &val);
1927 break;
1928 case BNXT_FW_HEALTH_REG_TYPE_GRC:
1929 reg_off = fw_health->mapped_regs[reg_idx];
1930 /* fall through */
1931 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1932 val = readl(bp->bar0 + reg_off);
1933 break;
1934 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1935 val = readl(bp->bar1 + reg_off);
1936 break;
1937 }
1938 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
1939 val &= fw_health->fw_reset_inprog_reg_mask;
1940 return val;
1941}
1942
4bb13abf 1943#define BNXT_GET_EVENT_PORT(data) \
87c374de
MC
1944 ((data) & \
1945 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
4bb13abf 1946
c0c050c5
MC
1947static int bnxt_async_event_process(struct bnxt *bp,
1948 struct hwrm_async_event_cmpl *cmpl)
1949{
1950 u16 event_id = le16_to_cpu(cmpl->event_id);
1951
1952 /* TODO CHIMP_FW: Define event id's for link change, error etc */
1953 switch (event_id) {
87c374de 1954 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
8cbde117
MC
1955 u32 data1 = le32_to_cpu(cmpl->event_data1);
1956 struct bnxt_link_info *link_info = &bp->link_info;
1957
1958 if (BNXT_VF(bp))
1959 goto async_event_process_exit;
a8168b6c
MC
1960
1961 /* print unsupported speed warning in forced speed mode only */
1962 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
1963 (data1 & 0x20000)) {
8cbde117
MC
1964 u16 fw_speed = link_info->force_link_speed;
1965 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1966
a8168b6c
MC
1967 if (speed != SPEED_UNKNOWN)
1968 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1969 speed);
8cbde117 1970 }
286ef9d6 1971 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
8cbde117 1972 }
bc171e87 1973 /* fall through */
b1613e78
MC
1974 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
1975 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
1976 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
1977 /* fall through */
87c374de 1978 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
c0c050c5 1979 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
19241368 1980 break;
87c374de 1981 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
19241368 1982 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
c0c050c5 1983 break;
87c374de 1984 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
4bb13abf
MC
1985 u32 data1 = le32_to_cpu(cmpl->event_data1);
1986 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1987
1988 if (BNXT_VF(bp))
1989 break;
1990
1991 if (bp->pf.port_id != port_id)
1992 break;
1993
4bb13abf
MC
1994 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1995 break;
1996 }
87c374de 1997 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
fc0f1929
MC
1998 if (BNXT_PF(bp))
1999 goto async_event_process_exit;
2000 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2001 break;
acfb50e4
VV
2002 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2003 u32 data1 = le32_to_cpu(cmpl->event_data1);
2004
8280b38e
VV
2005 if (!bp->fw_health)
2006 goto async_event_process_exit;
2007
2151fe08
MC
2008 bp->fw_reset_timestamp = jiffies;
2009 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2010 if (!bp->fw_reset_min_dsecs)
2011 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2012 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2013 if (!bp->fw_reset_max_dsecs)
2014 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
acfb50e4
VV
2015 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2016 netdev_warn(bp->dev, "Firmware fatal reset event received\n");
2017 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2018 } else {
2019 netdev_warn(bp->dev, "Firmware non-fatal reset event received, max wait time %d msec\n",
2020 bp->fw_reset_max_dsecs * 100);
2021 }
2151fe08
MC
2022 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2023 break;
acfb50e4 2024 }
7e914027
MC
2025 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2026 struct bnxt_fw_health *fw_health = bp->fw_health;
2027 u32 data1 = le32_to_cpu(cmpl->event_data1);
2028
2029 if (!fw_health)
2030 goto async_event_process_exit;
2031
2032 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
2033 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2034 if (!fw_health->enabled)
2035 break;
2036
2037 if (netif_msg_drv(bp))
2038 netdev_info(bp->dev, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n",
2039 fw_health->enabled, fw_health->master,
2040 bnxt_fw_health_readl(bp,
2041 BNXT_FW_RESET_CNT_REG),
2042 bnxt_fw_health_readl(bp,
2043 BNXT_FW_HEALTH_REG));
2044 fw_health->tmr_multiplier =
2045 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2046 bp->current_interval * 10);
2047 fw_health->tmr_counter = fw_health->tmr_multiplier;
2048 fw_health->last_fw_heartbeat =
2049 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2050 fw_health->last_fw_reset_cnt =
2051 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2052 goto async_event_process_exit;
2053 }
c0c050c5 2054 default:
19241368 2055 goto async_event_process_exit;
c0c050c5 2056 }
c213eae8 2057 bnxt_queue_sp_work(bp);
19241368 2058async_event_process_exit:
a588e458 2059 bnxt_ulp_async_events(bp, cmpl);
c0c050c5
MC
2060 return 0;
2061}
2062
2063static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2064{
2065 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2066 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2067 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2068 (struct hwrm_fwd_req_cmpl *)txcmp;
2069
2070 switch (cmpl_type) {
2071 case CMPL_BASE_TYPE_HWRM_DONE:
2072 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2073 if (seq_id == bp->hwrm_intr_seq_id)
fc718bb2 2074 bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
c0c050c5
MC
2075 else
2076 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
2077 break;
2078
2079 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2080 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2081
2082 if ((vf_id < bp->pf.first_vf_id) ||
2083 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2084 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2085 vf_id);
2086 return -EINVAL;
2087 }
2088
2089 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2090 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
c213eae8 2091 bnxt_queue_sp_work(bp);
c0c050c5
MC
2092 break;
2093
2094 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2095 bnxt_async_event_process(bp,
2096 (struct hwrm_async_event_cmpl *)txcmp);
2097
2098 default:
2099 break;
2100 }
2101
2102 return 0;
2103}
2104
2105static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2106{
2107 struct bnxt_napi *bnapi = dev_instance;
2108 struct bnxt *bp = bnapi->bp;
2109 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2110 u32 cons = RING_CMP(cpr->cp_raw_cons);
2111
6a8788f2 2112 cpr->event_ctr++;
c0c050c5
MC
2113 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2114 napi_schedule(&bnapi->napi);
2115 return IRQ_HANDLED;
2116}
2117
2118static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2119{
2120 u32 raw_cons = cpr->cp_raw_cons;
2121 u16 cons = RING_CMP(raw_cons);
2122 struct tx_cmp *txcmp;
2123
2124 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2125
2126 return TX_CMP_VALID(txcmp, raw_cons);
2127}
2128
c0c050c5
MC
2129static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2130{
2131 struct bnxt_napi *bnapi = dev_instance;
2132 struct bnxt *bp = bnapi->bp;
2133 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2134 u32 cons = RING_CMP(cpr->cp_raw_cons);
2135 u32 int_status;
2136
2137 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2138
2139 if (!bnxt_has_work(bp, cpr)) {
11809490 2140 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
c0c050c5
MC
2141 /* return if erroneous interrupt */
2142 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2143 return IRQ_NONE;
2144 }
2145
2146 /* disable ring IRQ */
697197e5 2147 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
c0c050c5
MC
2148
2149 /* Return here if interrupt is shared and is disabled. */
2150 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2151 return IRQ_HANDLED;
2152
2153 napi_schedule(&bnapi->napi);
2154 return IRQ_HANDLED;
2155}
2156
3675b92f
MC
2157static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2158 int budget)
c0c050c5 2159{
e44758b7 2160 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5
MC
2161 u32 raw_cons = cpr->cp_raw_cons;
2162 u32 cons;
2163 int tx_pkts = 0;
2164 int rx_pkts = 0;
4e5dbbda 2165 u8 event = 0;
c0c050c5
MC
2166 struct tx_cmp *txcmp;
2167
0fcec985 2168 cpr->has_more_work = 0;
c0c050c5
MC
2169 while (1) {
2170 int rc;
2171
2172 cons = RING_CMP(raw_cons);
2173 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2174
2175 if (!TX_CMP_VALID(txcmp, raw_cons))
2176 break;
2177
67a95e20
MC
2178 /* The valid test of the entry must be done first before
2179 * reading any further.
2180 */
b67daab0 2181 dma_rmb();
3675b92f 2182 cpr->had_work_done = 1;
c0c050c5
MC
2183 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2184 tx_pkts++;
2185 /* return full budget so NAPI will complete. */
73f21c65 2186 if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
c0c050c5 2187 rx_pkts = budget;
73f21c65 2188 raw_cons = NEXT_RAW_CMP(raw_cons);
0fcec985
MC
2189 if (budget)
2190 cpr->has_more_work = 1;
73f21c65
MC
2191 break;
2192 }
c0c050c5 2193 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2270bc5d 2194 if (likely(budget))
e44758b7 2195 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2270bc5d 2196 else
e44758b7 2197 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2270bc5d 2198 &event);
c0c050c5
MC
2199 if (likely(rc >= 0))
2200 rx_pkts += rc;
903649e7
MC
2201 /* Increment rx_pkts when rc is -ENOMEM to count towards
2202 * the NAPI budget. Otherwise, we may potentially loop
2203 * here forever if we consistently cannot allocate
2204 * buffers.
2205 */
2edbdb31 2206 else if (rc == -ENOMEM && budget)
903649e7 2207 rx_pkts++;
c0c050c5
MC
2208 else if (rc == -EBUSY) /* partial completion */
2209 break;
c0c050c5
MC
2210 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2211 CMPL_BASE_TYPE_HWRM_DONE) ||
2212 (TX_CMP_TYPE(txcmp) ==
2213 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2214 (TX_CMP_TYPE(txcmp) ==
2215 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2216 bnxt_hwrm_handler(bp, txcmp);
2217 }
2218 raw_cons = NEXT_RAW_CMP(raw_cons);
2219
0fcec985
MC
2220 if (rx_pkts && rx_pkts == budget) {
2221 cpr->has_more_work = 1;
c0c050c5 2222 break;
0fcec985 2223 }
c0c050c5
MC
2224 }
2225
f18c2b77
AG
2226 if (event & BNXT_REDIRECT_EVENT)
2227 xdp_do_flush_map();
2228
38413406
MC
2229 if (event & BNXT_TX_EVENT) {
2230 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
38413406
MC
2231 u16 prod = txr->tx_prod;
2232
2233 /* Sync BD data before updating doorbell */
2234 wmb();
2235
697197e5 2236 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
38413406
MC
2237 }
2238
c0c050c5 2239 cpr->cp_raw_cons = raw_cons;
3675b92f
MC
2240 bnapi->tx_pkts += tx_pkts;
2241 bnapi->events |= event;
2242 return rx_pkts;
2243}
c0c050c5 2244
3675b92f
MC
2245static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2246{
2247 if (bnapi->tx_pkts) {
2248 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2249 bnapi->tx_pkts = 0;
2250 }
c0c050c5 2251
3675b92f 2252 if (bnapi->events & BNXT_RX_EVENT) {
b6ab4b01 2253 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5 2254
3675b92f 2255 if (bnapi->events & BNXT_AGG_EVENT)
697197e5 2256 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
e8f267b0 2257 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
c0c050c5 2258 }
3675b92f
MC
2259 bnapi->events = 0;
2260}
2261
2262static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2263 int budget)
2264{
2265 struct bnxt_napi *bnapi = cpr->bnapi;
2266 int rx_pkts;
2267
2268 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2269
2270 /* ACK completion ring before freeing tx ring and producing new
2271 * buffers in rx/agg rings to prevent overflowing the completion
2272 * ring.
2273 */
2274 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2275
2276 __bnxt_poll_work_done(bp, bnapi);
c0c050c5
MC
2277 return rx_pkts;
2278}
2279
10bbdaf5
PS
2280static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2281{
2282 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2283 struct bnxt *bp = bnapi->bp;
2284 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2285 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2286 struct tx_cmp *txcmp;
2287 struct rx_cmp_ext *rxcmp1;
2288 u32 cp_cons, tmp_raw_cons;
2289 u32 raw_cons = cpr->cp_raw_cons;
2290 u32 rx_pkts = 0;
4e5dbbda 2291 u8 event = 0;
10bbdaf5
PS
2292
2293 while (1) {
2294 int rc;
2295
2296 cp_cons = RING_CMP(raw_cons);
2297 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2298
2299 if (!TX_CMP_VALID(txcmp, raw_cons))
2300 break;
2301
2302 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2303 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2304 cp_cons = RING_CMP(tmp_raw_cons);
2305 rxcmp1 = (struct rx_cmp_ext *)
2306 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2307
2308 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2309 break;
2310
2311 /* force an error to recycle the buffer */
2312 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2313 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2314
e44758b7 2315 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2edbdb31 2316 if (likely(rc == -EIO) && budget)
10bbdaf5
PS
2317 rx_pkts++;
2318 else if (rc == -EBUSY) /* partial completion */
2319 break;
2320 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2321 CMPL_BASE_TYPE_HWRM_DONE)) {
2322 bnxt_hwrm_handler(bp, txcmp);
2323 } else {
2324 netdev_err(bp->dev,
2325 "Invalid completion received on special ring\n");
2326 }
2327 raw_cons = NEXT_RAW_CMP(raw_cons);
2328
2329 if (rx_pkts == budget)
2330 break;
2331 }
2332
2333 cpr->cp_raw_cons = raw_cons;
697197e5
MC
2334 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2335 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
10bbdaf5 2336
434c975a 2337 if (event & BNXT_AGG_EVENT)
697197e5 2338 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
10bbdaf5
PS
2339
2340 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
6ad20165 2341 napi_complete_done(napi, rx_pkts);
697197e5 2342 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
10bbdaf5
PS
2343 }
2344 return rx_pkts;
2345}
2346
c0c050c5
MC
2347static int bnxt_poll(struct napi_struct *napi, int budget)
2348{
2349 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2350 struct bnxt *bp = bnapi->bp;
2351 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2352 int work_done = 0;
2353
c0c050c5 2354 while (1) {
e44758b7 2355 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
c0c050c5 2356
73f21c65
MC
2357 if (work_done >= budget) {
2358 if (!budget)
697197e5 2359 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
c0c050c5 2360 break;
73f21c65 2361 }
c0c050c5
MC
2362
2363 if (!bnxt_has_work(bp, cpr)) {
e7b95691 2364 if (napi_complete_done(napi, work_done))
697197e5 2365 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
c0c050c5
MC
2366 break;
2367 }
2368 }
6a8788f2 2369 if (bp->flags & BNXT_FLAG_DIM) {
f06d0ca4 2370 struct dim_sample dim_sample = {};
6a8788f2 2371
8960b389
TG
2372 dim_update_sample(cpr->event_ctr,
2373 cpr->rx_packets,
2374 cpr->rx_bytes,
2375 &dim_sample);
6a8788f2
AG
2376 net_dim(&cpr->dim, dim_sample);
2377 }
c0c050c5
MC
2378 return work_done;
2379}
2380
0fcec985
MC
2381static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2382{
2383 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2384 int i, work_done = 0;
2385
2386 for (i = 0; i < 2; i++) {
2387 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2388
2389 if (cpr2) {
2390 work_done += __bnxt_poll_work(bp, cpr2,
2391 budget - work_done);
2392 cpr->has_more_work |= cpr2->has_more_work;
2393 }
2394 }
2395 return work_done;
2396}
2397
2398static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2399 u64 dbr_type, bool all)
2400{
2401 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2402 int i;
2403
2404 for (i = 0; i < 2; i++) {
2405 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2406 struct bnxt_db_info *db;
2407
2408 if (cpr2 && (all || cpr2->had_work_done)) {
2409 db = &cpr2->cp_db;
2410 writeq(db->db_key64 | dbr_type |
2411 RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2412 cpr2->had_work_done = 0;
2413 }
2414 }
2415 __bnxt_poll_work_done(bp, bnapi);
2416}
2417
2418static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2419{
2420 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2421 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2422 u32 raw_cons = cpr->cp_raw_cons;
2423 struct bnxt *bp = bnapi->bp;
2424 struct nqe_cn *nqcmp;
2425 int work_done = 0;
2426 u32 cons;
2427
2428 if (cpr->has_more_work) {
2429 cpr->has_more_work = 0;
2430 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2431 if (cpr->has_more_work) {
2432 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false);
2433 return work_done;
2434 }
2435 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true);
2436 if (napi_complete_done(napi, work_done))
2437 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons);
2438 return work_done;
2439 }
2440 while (1) {
2441 cons = RING_CMP(raw_cons);
2442 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2443
2444 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2445 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
2446 false);
2447 cpr->cp_raw_cons = raw_cons;
2448 if (napi_complete_done(napi, work_done))
2449 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2450 cpr->cp_raw_cons);
2451 return work_done;
2452 }
2453
2454 /* The valid test of the entry must be done first before
2455 * reading any further.
2456 */
2457 dma_rmb();
2458
2459 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2460 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2461 struct bnxt_cp_ring_info *cpr2;
2462
2463 cpr2 = cpr->cp_ring_arr[idx];
2464 work_done += __bnxt_poll_work(bp, cpr2,
2465 budget - work_done);
2466 cpr->has_more_work = cpr2->has_more_work;
2467 } else {
2468 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2469 }
2470 raw_cons = NEXT_RAW_CMP(raw_cons);
2471 if (cpr->has_more_work)
2472 break;
2473 }
2474 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true);
2475 cpr->cp_raw_cons = raw_cons;
2476 return work_done;
2477}
2478
c0c050c5
MC
2479static void bnxt_free_tx_skbs(struct bnxt *bp)
2480{
2481 int i, max_idx;
2482 struct pci_dev *pdev = bp->pdev;
2483
b6ab4b01 2484 if (!bp->tx_ring)
c0c050c5
MC
2485 return;
2486
2487 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2488 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2489 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
2490 int j;
2491
c0c050c5
MC
2492 for (j = 0; j < max_idx;) {
2493 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
f18c2b77 2494 struct sk_buff *skb;
c0c050c5
MC
2495 int k, last;
2496
f18c2b77
AG
2497 if (i < bp->tx_nr_rings_xdp &&
2498 tx_buf->action == XDP_REDIRECT) {
2499 dma_unmap_single(&pdev->dev,
2500 dma_unmap_addr(tx_buf, mapping),
2501 dma_unmap_len(tx_buf, len),
2502 PCI_DMA_TODEVICE);
2503 xdp_return_frame(tx_buf->xdpf);
2504 tx_buf->action = 0;
2505 tx_buf->xdpf = NULL;
2506 j++;
2507 continue;
2508 }
2509
2510 skb = tx_buf->skb;
c0c050c5
MC
2511 if (!skb) {
2512 j++;
2513 continue;
2514 }
2515
2516 tx_buf->skb = NULL;
2517
2518 if (tx_buf->is_push) {
2519 dev_kfree_skb(skb);
2520 j += 2;
2521 continue;
2522 }
2523
2524 dma_unmap_single(&pdev->dev,
2525 dma_unmap_addr(tx_buf, mapping),
2526 skb_headlen(skb),
2527 PCI_DMA_TODEVICE);
2528
2529 last = tx_buf->nr_frags;
2530 j += 2;
d612a579
MC
2531 for (k = 0; k < last; k++, j++) {
2532 int ring_idx = j & bp->tx_ring_mask;
c0c050c5
MC
2533 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2534
d612a579 2535 tx_buf = &txr->tx_buf_ring[ring_idx];
c0c050c5
MC
2536 dma_unmap_page(
2537 &pdev->dev,
2538 dma_unmap_addr(tx_buf, mapping),
2539 skb_frag_size(frag), PCI_DMA_TODEVICE);
2540 }
2541 dev_kfree_skb(skb);
2542 }
2543 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2544 }
2545}
2546
2547static void bnxt_free_rx_skbs(struct bnxt *bp)
2548{
2549 int i, max_idx, max_agg_idx;
2550 struct pci_dev *pdev = bp->pdev;
2551
b6ab4b01 2552 if (!bp->rx_ring)
c0c050c5
MC
2553 return;
2554
2555 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2556 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2557 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 2558 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
ec4d8e7c 2559 struct bnxt_tpa_idx_map *map;
c0c050c5
MC
2560 int j;
2561
c0c050c5 2562 if (rxr->rx_tpa) {
79632e9b 2563 for (j = 0; j < bp->max_tpa; j++) {
c0c050c5
MC
2564 struct bnxt_tpa_info *tpa_info =
2565 &rxr->rx_tpa[j];
2566 u8 *data = tpa_info->data;
2567
2568 if (!data)
2569 continue;
2570
c519fe9a
SN
2571 dma_unmap_single_attrs(&pdev->dev,
2572 tpa_info->mapping,
2573 bp->rx_buf_use_size,
2574 bp->rx_dir,
2575 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
2576
2577 tpa_info->data = NULL;
2578
2579 kfree(data);
2580 }
2581 }
2582
2583 for (j = 0; j < max_idx; j++) {
2584 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
3ed3a83e 2585 dma_addr_t mapping = rx_buf->mapping;
6bb19474 2586 void *data = rx_buf->data;
c0c050c5
MC
2587
2588 if (!data)
2589 continue;
2590
c0c050c5
MC
2591 rx_buf->data = NULL;
2592
3ed3a83e
MC
2593 if (BNXT_RX_PAGE_MODE(bp)) {
2594 mapping -= bp->rx_dma_offset;
c519fe9a
SN
2595 dma_unmap_page_attrs(&pdev->dev, mapping,
2596 PAGE_SIZE, bp->rx_dir,
2597 DMA_ATTR_WEAK_ORDERING);
322b87ca 2598 page_pool_recycle_direct(rxr->page_pool, data);
3ed3a83e 2599 } else {
c519fe9a
SN
2600 dma_unmap_single_attrs(&pdev->dev, mapping,
2601 bp->rx_buf_use_size,
2602 bp->rx_dir,
2603 DMA_ATTR_WEAK_ORDERING);
c61fb99c 2604 kfree(data);
3ed3a83e 2605 }
c0c050c5
MC
2606 }
2607
2608 for (j = 0; j < max_agg_idx; j++) {
2609 struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2610 &rxr->rx_agg_ring[j];
2611 struct page *page = rx_agg_buf->page;
2612
2613 if (!page)
2614 continue;
2615
c519fe9a
SN
2616 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2617 BNXT_RX_PAGE_SIZE,
2618 PCI_DMA_FROMDEVICE,
2619 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
2620
2621 rx_agg_buf->page = NULL;
2622 __clear_bit(j, rxr->rx_agg_bmap);
2623
2624 __free_page(page);
2625 }
89d0a06c
MC
2626 if (rxr->rx_page) {
2627 __free_page(rxr->rx_page);
2628 rxr->rx_page = NULL;
2629 }
ec4d8e7c
MC
2630 map = rxr->rx_tpa_idx_map;
2631 if (map)
2632 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
c0c050c5
MC
2633 }
2634}
2635
2636static void bnxt_free_skbs(struct bnxt *bp)
2637{
2638 bnxt_free_tx_skbs(bp);
2639 bnxt_free_rx_skbs(bp);
2640}
2641
6fe19886 2642static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
c0c050c5
MC
2643{
2644 struct pci_dev *pdev = bp->pdev;
2645 int i;
2646
6fe19886
MC
2647 for (i = 0; i < rmem->nr_pages; i++) {
2648 if (!rmem->pg_arr[i])
c0c050c5
MC
2649 continue;
2650
6fe19886
MC
2651 dma_free_coherent(&pdev->dev, rmem->page_size,
2652 rmem->pg_arr[i], rmem->dma_arr[i]);
c0c050c5 2653
6fe19886 2654 rmem->pg_arr[i] = NULL;
c0c050c5 2655 }
6fe19886 2656 if (rmem->pg_tbl) {
4f49b2b8
MC
2657 size_t pg_tbl_size = rmem->nr_pages * 8;
2658
2659 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2660 pg_tbl_size = rmem->page_size;
2661 dma_free_coherent(&pdev->dev, pg_tbl_size,
6fe19886
MC
2662 rmem->pg_tbl, rmem->pg_tbl_map);
2663 rmem->pg_tbl = NULL;
c0c050c5 2664 }
6fe19886
MC
2665 if (rmem->vmem_size && *rmem->vmem) {
2666 vfree(*rmem->vmem);
2667 *rmem->vmem = NULL;
c0c050c5
MC
2668 }
2669}
2670
6fe19886 2671static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
c0c050c5 2672{
c0c050c5 2673 struct pci_dev *pdev = bp->pdev;
66cca20a 2674 u64 valid_bit = 0;
6fe19886 2675 int i;
c0c050c5 2676
66cca20a
MC
2677 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2678 valid_bit = PTU_PTE_VALID;
4f49b2b8
MC
2679 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2680 size_t pg_tbl_size = rmem->nr_pages * 8;
2681
2682 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2683 pg_tbl_size = rmem->page_size;
2684 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
6fe19886 2685 &rmem->pg_tbl_map,
c0c050c5 2686 GFP_KERNEL);
6fe19886 2687 if (!rmem->pg_tbl)
c0c050c5
MC
2688 return -ENOMEM;
2689 }
2690
6fe19886 2691 for (i = 0; i < rmem->nr_pages; i++) {
66cca20a
MC
2692 u64 extra_bits = valid_bit;
2693
6fe19886
MC
2694 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2695 rmem->page_size,
2696 &rmem->dma_arr[i],
c0c050c5 2697 GFP_KERNEL);
6fe19886 2698 if (!rmem->pg_arr[i])
c0c050c5
MC
2699 return -ENOMEM;
2700
3be8136c
MC
2701 if (rmem->init_val)
2702 memset(rmem->pg_arr[i], rmem->init_val,
2703 rmem->page_size);
4f49b2b8 2704 if (rmem->nr_pages > 1 || rmem->depth > 0) {
66cca20a
MC
2705 if (i == rmem->nr_pages - 2 &&
2706 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2707 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2708 else if (i == rmem->nr_pages - 1 &&
2709 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2710 extra_bits |= PTU_PTE_LAST;
2711 rmem->pg_tbl[i] =
2712 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2713 }
c0c050c5
MC
2714 }
2715
6fe19886
MC
2716 if (rmem->vmem_size) {
2717 *rmem->vmem = vzalloc(rmem->vmem_size);
2718 if (!(*rmem->vmem))
c0c050c5
MC
2719 return -ENOMEM;
2720 }
2721 return 0;
2722}
2723
4a228a3a
MC
2724static void bnxt_free_tpa_info(struct bnxt *bp)
2725{
2726 int i;
2727
2728 for (i = 0; i < bp->rx_nr_rings; i++) {
2729 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2730
ec4d8e7c
MC
2731 kfree(rxr->rx_tpa_idx_map);
2732 rxr->rx_tpa_idx_map = NULL;
79632e9b
MC
2733 if (rxr->rx_tpa) {
2734 kfree(rxr->rx_tpa[0].agg_arr);
2735 rxr->rx_tpa[0].agg_arr = NULL;
2736 }
4a228a3a
MC
2737 kfree(rxr->rx_tpa);
2738 rxr->rx_tpa = NULL;
2739 }
2740}
2741
2742static int bnxt_alloc_tpa_info(struct bnxt *bp)
2743{
79632e9b
MC
2744 int i, j, total_aggs = 0;
2745
2746 bp->max_tpa = MAX_TPA;
2747 if (bp->flags & BNXT_FLAG_CHIP_P5) {
2748 if (!bp->max_tpa_v2)
2749 return 0;
2750 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2751 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
2752 }
4a228a3a
MC
2753
2754 for (i = 0; i < bp->rx_nr_rings; i++) {
2755 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
79632e9b 2756 struct rx_agg_cmp *agg;
4a228a3a 2757
79632e9b 2758 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
4a228a3a
MC
2759 GFP_KERNEL);
2760 if (!rxr->rx_tpa)
2761 return -ENOMEM;
79632e9b
MC
2762
2763 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2764 continue;
2765 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
2766 rxr->rx_tpa[0].agg_arr = agg;
2767 if (!agg)
2768 return -ENOMEM;
2769 for (j = 1; j < bp->max_tpa; j++)
2770 rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
ec4d8e7c
MC
2771 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
2772 GFP_KERNEL);
2773 if (!rxr->rx_tpa_idx_map)
2774 return -ENOMEM;
4a228a3a
MC
2775 }
2776 return 0;
2777}
2778
c0c050c5
MC
2779static void bnxt_free_rx_rings(struct bnxt *bp)
2780{
2781 int i;
2782
b6ab4b01 2783 if (!bp->rx_ring)
c0c050c5
MC
2784 return;
2785
4a228a3a 2786 bnxt_free_tpa_info(bp);
c0c050c5 2787 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 2788 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
2789 struct bnxt_ring_struct *ring;
2790
c6d30e83
MC
2791 if (rxr->xdp_prog)
2792 bpf_prog_put(rxr->xdp_prog);
2793
96a8604f
JDB
2794 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2795 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2796
12479f62 2797 page_pool_destroy(rxr->page_pool);
322b87ca
AG
2798 rxr->page_pool = NULL;
2799
c0c050c5
MC
2800 kfree(rxr->rx_agg_bmap);
2801 rxr->rx_agg_bmap = NULL;
2802
2803 ring = &rxr->rx_ring_struct;
6fe19886 2804 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
2805
2806 ring = &rxr->rx_agg_ring_struct;
6fe19886 2807 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
2808 }
2809}
2810
322b87ca
AG
2811static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
2812 struct bnxt_rx_ring_info *rxr)
2813{
2814 struct page_pool_params pp = { 0 };
2815
2816 pp.pool_size = bp->rx_ring_size;
2817 pp.nid = dev_to_node(&bp->pdev->dev);
2818 pp.dev = &bp->pdev->dev;
2819 pp.dma_dir = DMA_BIDIRECTIONAL;
2820
2821 rxr->page_pool = page_pool_create(&pp);
2822 if (IS_ERR(rxr->page_pool)) {
2823 int err = PTR_ERR(rxr->page_pool);
2824
2825 rxr->page_pool = NULL;
2826 return err;
2827 }
2828 return 0;
2829}
2830
c0c050c5
MC
2831static int bnxt_alloc_rx_rings(struct bnxt *bp)
2832{
4a228a3a 2833 int i, rc = 0, agg_rings = 0;
c0c050c5 2834
b6ab4b01
MC
2835 if (!bp->rx_ring)
2836 return -ENOMEM;
2837
c0c050c5
MC
2838 if (bp->flags & BNXT_FLAG_AGG_RINGS)
2839 agg_rings = 1;
2840
c0c050c5 2841 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 2842 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
2843 struct bnxt_ring_struct *ring;
2844
c0c050c5
MC
2845 ring = &rxr->rx_ring_struct;
2846
322b87ca
AG
2847 rc = bnxt_alloc_rx_page_pool(bp, rxr);
2848 if (rc)
2849 return rc;
2850
96a8604f 2851 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
12479f62 2852 if (rc < 0)
96a8604f
JDB
2853 return rc;
2854
f18c2b77 2855 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
322b87ca
AG
2856 MEM_TYPE_PAGE_POOL,
2857 rxr->page_pool);
f18c2b77
AG
2858 if (rc) {
2859 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2860 return rc;
2861 }
2862
6fe19886 2863 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
2864 if (rc)
2865 return rc;
2866
2c61d211 2867 ring->grp_idx = i;
c0c050c5
MC
2868 if (agg_rings) {
2869 u16 mem_size;
2870
2871 ring = &rxr->rx_agg_ring_struct;
6fe19886 2872 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
2873 if (rc)
2874 return rc;
2875
9899bb59 2876 ring->grp_idx = i;
c0c050c5
MC
2877 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2878 mem_size = rxr->rx_agg_bmap_size / 8;
2879 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2880 if (!rxr->rx_agg_bmap)
2881 return -ENOMEM;
c0c050c5
MC
2882 }
2883 }
4a228a3a
MC
2884 if (bp->flags & BNXT_FLAG_TPA)
2885 rc = bnxt_alloc_tpa_info(bp);
2886 return rc;
c0c050c5
MC
2887}
2888
2889static void bnxt_free_tx_rings(struct bnxt *bp)
2890{
2891 int i;
2892 struct pci_dev *pdev = bp->pdev;
2893
b6ab4b01 2894 if (!bp->tx_ring)
c0c050c5
MC
2895 return;
2896
2897 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2898 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
2899 struct bnxt_ring_struct *ring;
2900
c0c050c5
MC
2901 if (txr->tx_push) {
2902 dma_free_coherent(&pdev->dev, bp->tx_push_size,
2903 txr->tx_push, txr->tx_push_mapping);
2904 txr->tx_push = NULL;
2905 }
2906
2907 ring = &txr->tx_ring_struct;
2908
6fe19886 2909 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
2910 }
2911}
2912
2913static int bnxt_alloc_tx_rings(struct bnxt *bp)
2914{
2915 int i, j, rc;
2916 struct pci_dev *pdev = bp->pdev;
2917
2918 bp->tx_push_size = 0;
2919 if (bp->tx_push_thresh) {
2920 int push_size;
2921
2922 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2923 bp->tx_push_thresh);
2924
4419dbe6 2925 if (push_size > 256) {
c0c050c5
MC
2926 push_size = 0;
2927 bp->tx_push_thresh = 0;
2928 }
2929
2930 bp->tx_push_size = push_size;
2931 }
2932
2933 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2934 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5 2935 struct bnxt_ring_struct *ring;
2e8ef77e 2936 u8 qidx;
c0c050c5 2937
c0c050c5
MC
2938 ring = &txr->tx_ring_struct;
2939
6fe19886 2940 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
2941 if (rc)
2942 return rc;
2943
9899bb59 2944 ring->grp_idx = txr->bnapi->index;
c0c050c5 2945 if (bp->tx_push_size) {
c0c050c5
MC
2946 dma_addr_t mapping;
2947
2948 /* One pre-allocated DMA buffer to backup
2949 * TX push operation
2950 */
2951 txr->tx_push = dma_alloc_coherent(&pdev->dev,
2952 bp->tx_push_size,
2953 &txr->tx_push_mapping,
2954 GFP_KERNEL);
2955
2956 if (!txr->tx_push)
2957 return -ENOMEM;
2958
c0c050c5
MC
2959 mapping = txr->tx_push_mapping +
2960 sizeof(struct tx_push_bd);
4419dbe6 2961 txr->data_mapping = cpu_to_le64(mapping);
c0c050c5 2962 }
2e8ef77e
MC
2963 qidx = bp->tc_to_qidx[j];
2964 ring->queue_id = bp->q_info[qidx].queue_id;
5f449249
MC
2965 if (i < bp->tx_nr_rings_xdp)
2966 continue;
c0c050c5
MC
2967 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2968 j++;
2969 }
2970 return 0;
2971}
2972
2973static void bnxt_free_cp_rings(struct bnxt *bp)
2974{
2975 int i;
2976
2977 if (!bp->bnapi)
2978 return;
2979
2980 for (i = 0; i < bp->cp_nr_rings; i++) {
2981 struct bnxt_napi *bnapi = bp->bnapi[i];
2982 struct bnxt_cp_ring_info *cpr;
2983 struct bnxt_ring_struct *ring;
50e3ab78 2984 int j;
c0c050c5
MC
2985
2986 if (!bnapi)
2987 continue;
2988
2989 cpr = &bnapi->cp_ring;
2990 ring = &cpr->cp_ring_struct;
2991
6fe19886 2992 bnxt_free_ring(bp, &ring->ring_mem);
50e3ab78
MC
2993
2994 for (j = 0; j < 2; j++) {
2995 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2996
2997 if (cpr2) {
2998 ring = &cpr2->cp_ring_struct;
2999 bnxt_free_ring(bp, &ring->ring_mem);
3000 kfree(cpr2);
3001 cpr->cp_ring_arr[j] = NULL;
3002 }
3003 }
c0c050c5
MC
3004 }
3005}
3006
50e3ab78
MC
3007static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3008{
3009 struct bnxt_ring_mem_info *rmem;
3010 struct bnxt_ring_struct *ring;
3011 struct bnxt_cp_ring_info *cpr;
3012 int rc;
3013
3014 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3015 if (!cpr)
3016 return NULL;
3017
3018 ring = &cpr->cp_ring_struct;
3019 rmem = &ring->ring_mem;
3020 rmem->nr_pages = bp->cp_nr_pages;
3021 rmem->page_size = HW_CMPD_RING_SIZE;
3022 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3023 rmem->dma_arr = cpr->cp_desc_mapping;
3024 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3025 rc = bnxt_alloc_ring(bp, rmem);
3026 if (rc) {
3027 bnxt_free_ring(bp, rmem);
3028 kfree(cpr);
3029 cpr = NULL;
3030 }
3031 return cpr;
3032}
3033
c0c050c5
MC
3034static int bnxt_alloc_cp_rings(struct bnxt *bp)
3035{
50e3ab78 3036 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
e5811b8c 3037 int i, rc, ulp_base_vec, ulp_msix;
c0c050c5 3038
e5811b8c
MC
3039 ulp_msix = bnxt_get_ulp_msix_num(bp);
3040 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
c0c050c5
MC
3041 for (i = 0; i < bp->cp_nr_rings; i++) {
3042 struct bnxt_napi *bnapi = bp->bnapi[i];
3043 struct bnxt_cp_ring_info *cpr;
3044 struct bnxt_ring_struct *ring;
3045
3046 if (!bnapi)
3047 continue;
3048
3049 cpr = &bnapi->cp_ring;
50e3ab78 3050 cpr->bnapi = bnapi;
c0c050c5
MC
3051 ring = &cpr->cp_ring_struct;
3052
6fe19886 3053 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
3054 if (rc)
3055 return rc;
e5811b8c
MC
3056
3057 if (ulp_msix && i >= ulp_base_vec)
3058 ring->map_idx = i + ulp_msix;
3059 else
3060 ring->map_idx = i;
50e3ab78
MC
3061
3062 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3063 continue;
3064
3065 if (i < bp->rx_nr_rings) {
3066 struct bnxt_cp_ring_info *cpr2 =
3067 bnxt_alloc_cp_sub_ring(bp);
3068
3069 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3070 if (!cpr2)
3071 return -ENOMEM;
3072 cpr2->bnapi = bnapi;
3073 }
3074 if ((sh && i < bp->tx_nr_rings) ||
3075 (!sh && i >= bp->rx_nr_rings)) {
3076 struct bnxt_cp_ring_info *cpr2 =
3077 bnxt_alloc_cp_sub_ring(bp);
3078
3079 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3080 if (!cpr2)
3081 return -ENOMEM;
3082 cpr2->bnapi = bnapi;
3083 }
c0c050c5
MC
3084 }
3085 return 0;
3086}
3087
3088static void bnxt_init_ring_struct(struct bnxt *bp)
3089{
3090 int i;
3091
3092 for (i = 0; i < bp->cp_nr_rings; i++) {
3093 struct bnxt_napi *bnapi = bp->bnapi[i];
6fe19886 3094 struct bnxt_ring_mem_info *rmem;
c0c050c5
MC
3095 struct bnxt_cp_ring_info *cpr;
3096 struct bnxt_rx_ring_info *rxr;
3097 struct bnxt_tx_ring_info *txr;
3098 struct bnxt_ring_struct *ring;
3099
3100 if (!bnapi)
3101 continue;
3102
3103 cpr = &bnapi->cp_ring;
3104 ring = &cpr->cp_ring_struct;
6fe19886
MC
3105 rmem = &ring->ring_mem;
3106 rmem->nr_pages = bp->cp_nr_pages;
3107 rmem->page_size = HW_CMPD_RING_SIZE;
3108 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3109 rmem->dma_arr = cpr->cp_desc_mapping;
3110 rmem->vmem_size = 0;
c0c050c5 3111
b6ab4b01 3112 rxr = bnapi->rx_ring;
3b2b7d9d
MC
3113 if (!rxr)
3114 goto skip_rx;
3115
c0c050c5 3116 ring = &rxr->rx_ring_struct;
6fe19886
MC
3117 rmem = &ring->ring_mem;
3118 rmem->nr_pages = bp->rx_nr_pages;
3119 rmem->page_size = HW_RXBD_RING_SIZE;
3120 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3121 rmem->dma_arr = rxr->rx_desc_mapping;
3122 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3123 rmem->vmem = (void **)&rxr->rx_buf_ring;
c0c050c5
MC
3124
3125 ring = &rxr->rx_agg_ring_struct;
6fe19886
MC
3126 rmem = &ring->ring_mem;
3127 rmem->nr_pages = bp->rx_agg_nr_pages;
3128 rmem->page_size = HW_RXBD_RING_SIZE;
3129 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3130 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3131 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3132 rmem->vmem = (void **)&rxr->rx_agg_ring;
c0c050c5 3133
3b2b7d9d 3134skip_rx:
b6ab4b01 3135 txr = bnapi->tx_ring;
3b2b7d9d
MC
3136 if (!txr)
3137 continue;
3138
c0c050c5 3139 ring = &txr->tx_ring_struct;
6fe19886
MC
3140 rmem = &ring->ring_mem;
3141 rmem->nr_pages = bp->tx_nr_pages;
3142 rmem->page_size = HW_RXBD_RING_SIZE;
3143 rmem->pg_arr = (void **)txr->tx_desc_ring;
3144 rmem->dma_arr = txr->tx_desc_mapping;
3145 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3146 rmem->vmem = (void **)&txr->tx_buf_ring;
c0c050c5
MC
3147 }
3148}
3149
3150static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3151{
3152 int i;
3153 u32 prod;
3154 struct rx_bd **rx_buf_ring;
3155
6fe19886
MC
3156 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3157 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
c0c050c5
MC
3158 int j;
3159 struct rx_bd *rxbd;
3160
3161 rxbd = rx_buf_ring[i];
3162 if (!rxbd)
3163 continue;
3164
3165 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3166 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3167 rxbd->rx_bd_opaque = prod;
3168 }
3169 }
3170}
3171
3172static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3173{
3174 struct net_device *dev = bp->dev;
c0c050c5
MC
3175 struct bnxt_rx_ring_info *rxr;
3176 struct bnxt_ring_struct *ring;
3177 u32 prod, type;
3178 int i;
3179
c0c050c5
MC
3180 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3181 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3182
3183 if (NET_IP_ALIGN == 2)
3184 type |= RX_BD_FLAGS_SOP;
3185
b6ab4b01 3186 rxr = &bp->rx_ring[ring_nr];
c0c050c5
MC
3187 ring = &rxr->rx_ring_struct;
3188 bnxt_init_rxbd_pages(ring, type);
3189
c6d30e83 3190 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
85192dbf
AN
3191 bpf_prog_add(bp->xdp_prog, 1);
3192 rxr->xdp_prog = bp->xdp_prog;
c6d30e83 3193 }
c0c050c5
MC
3194 prod = rxr->rx_prod;
3195 for (i = 0; i < bp->rx_ring_size; i++) {
3196 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
3197 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3198 ring_nr, i, bp->rx_ring_size);
3199 break;
3200 }
3201 prod = NEXT_RX(prod);
3202 }
3203 rxr->rx_prod = prod;
3204 ring->fw_ring_id = INVALID_HW_RING_ID;
3205
edd0c2cc
MC
3206 ring = &rxr->rx_agg_ring_struct;
3207 ring->fw_ring_id = INVALID_HW_RING_ID;
3208
c0c050c5
MC
3209 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3210 return 0;
3211
2839f28b 3212 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
c0c050c5
MC
3213 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3214
3215 bnxt_init_rxbd_pages(ring, type);
3216
3217 prod = rxr->rx_agg_prod;
3218 for (i = 0; i < bp->rx_agg_ring_size; i++) {
3219 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
3220 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3221 ring_nr, i, bp->rx_ring_size);
3222 break;
3223 }
3224 prod = NEXT_RX_AGG(prod);
3225 }
3226 rxr->rx_agg_prod = prod;
c0c050c5
MC
3227
3228 if (bp->flags & BNXT_FLAG_TPA) {
3229 if (rxr->rx_tpa) {
3230 u8 *data;
3231 dma_addr_t mapping;
3232
79632e9b 3233 for (i = 0; i < bp->max_tpa; i++) {
c0c050c5
MC
3234 data = __bnxt_alloc_rx_data(bp, &mapping,
3235 GFP_KERNEL);
3236 if (!data)
3237 return -ENOMEM;
3238
3239 rxr->rx_tpa[i].data = data;
b3dba77c 3240 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
c0c050c5
MC
3241 rxr->rx_tpa[i].mapping = mapping;
3242 }
3243 } else {
3244 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
3245 return -ENOMEM;
3246 }
3247 }
3248
3249 return 0;
3250}
3251
2247925f
SP
3252static void bnxt_init_cp_rings(struct bnxt *bp)
3253{
3e08b184 3254 int i, j;
2247925f
SP
3255
3256 for (i = 0; i < bp->cp_nr_rings; i++) {
3257 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3258 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3259
3260 ring->fw_ring_id = INVALID_HW_RING_ID;
6a8788f2
AG
3261 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3262 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3e08b184
MC
3263 for (j = 0; j < 2; j++) {
3264 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3265
3266 if (!cpr2)
3267 continue;
3268
3269 ring = &cpr2->cp_ring_struct;
3270 ring->fw_ring_id = INVALID_HW_RING_ID;
3271 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3272 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3273 }
2247925f
SP
3274 }
3275}
3276
c0c050c5
MC
3277static int bnxt_init_rx_rings(struct bnxt *bp)
3278{
3279 int i, rc = 0;
3280
c61fb99c 3281 if (BNXT_RX_PAGE_MODE(bp)) {
c6d30e83
MC
3282 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3283 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
c61fb99c
MC
3284 } else {
3285 bp->rx_offset = BNXT_RX_OFFSET;
3286 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3287 }
b3dba77c 3288
c0c050c5
MC
3289 for (i = 0; i < bp->rx_nr_rings; i++) {
3290 rc = bnxt_init_one_rx_ring(bp, i);
3291 if (rc)
3292 break;
3293 }
3294
3295 return rc;
3296}
3297
3298static int bnxt_init_tx_rings(struct bnxt *bp)
3299{
3300 u16 i;
3301
3302 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3303 MAX_SKB_FRAGS + 1);
3304
3305 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 3306 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
3307 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3308
3309 ring->fw_ring_id = INVALID_HW_RING_ID;
3310 }
3311
3312 return 0;
3313}
3314
3315static void bnxt_free_ring_grps(struct bnxt *bp)
3316{
3317 kfree(bp->grp_info);
3318 bp->grp_info = NULL;
3319}
3320
3321static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3322{
3323 int i;
3324
3325 if (irq_re_init) {
3326 bp->grp_info = kcalloc(bp->cp_nr_rings,
3327 sizeof(struct bnxt_ring_grp_info),
3328 GFP_KERNEL);
3329 if (!bp->grp_info)
3330 return -ENOMEM;
3331 }
3332 for (i = 0; i < bp->cp_nr_rings; i++) {
3333 if (irq_re_init)
3334 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3335 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3336 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3337 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3338 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3339 }
3340 return 0;
3341}
3342
3343static void bnxt_free_vnics(struct bnxt *bp)
3344{
3345 kfree(bp->vnic_info);
3346 bp->vnic_info = NULL;
3347 bp->nr_vnics = 0;
3348}
3349
3350static int bnxt_alloc_vnics(struct bnxt *bp)
3351{
3352 int num_vnics = 1;
3353
3354#ifdef CONFIG_RFS_ACCEL
9b3d15e6 3355 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
c0c050c5
MC
3356 num_vnics += bp->rx_nr_rings;
3357#endif
3358
dc52c6c7
PS
3359 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3360 num_vnics++;
3361
c0c050c5
MC
3362 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3363 GFP_KERNEL);
3364 if (!bp->vnic_info)
3365 return -ENOMEM;
3366
3367 bp->nr_vnics = num_vnics;
3368 return 0;
3369}
3370
3371static void bnxt_init_vnics(struct bnxt *bp)
3372{
3373 int i;
3374
3375 for (i = 0; i < bp->nr_vnics; i++) {
3376 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
44c6f72a 3377 int j;
c0c050c5
MC
3378
3379 vnic->fw_vnic_id = INVALID_HW_RING_ID;
44c6f72a
MC
3380 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3381 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3382
c0c050c5
MC
3383 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3384
3385 if (bp->vnic_info[i].rss_hash_key) {
3386 if (i == 0)
3387 prandom_bytes(vnic->rss_hash_key,
3388 HW_HASH_KEY_SIZE);
3389 else
3390 memcpy(vnic->rss_hash_key,
3391 bp->vnic_info[0].rss_hash_key,
3392 HW_HASH_KEY_SIZE);
3393 }
3394 }
3395}
3396
3397static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3398{
3399 int pages;
3400
3401 pages = ring_size / desc_per_pg;
3402
3403 if (!pages)
3404 return 1;
3405
3406 pages++;
3407
3408 while (pages & (pages - 1))
3409 pages++;
3410
3411 return pages;
3412}
3413
c6d30e83 3414void bnxt_set_tpa_flags(struct bnxt *bp)
c0c050c5
MC
3415{
3416 bp->flags &= ~BNXT_FLAG_TPA;
341138c3
MC
3417 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3418 return;
c0c050c5
MC
3419 if (bp->dev->features & NETIF_F_LRO)
3420 bp->flags |= BNXT_FLAG_LRO;
1054aee8 3421 else if (bp->dev->features & NETIF_F_GRO_HW)
c0c050c5
MC
3422 bp->flags |= BNXT_FLAG_GRO;
3423}
3424
3425/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3426 * be set on entry.
3427 */
3428void bnxt_set_ring_params(struct bnxt *bp)
3429{
3430 u32 ring_size, rx_size, rx_space;
3431 u32 agg_factor = 0, agg_ring_size = 0;
3432
3433 /* 8 for CRC and VLAN */
3434 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3435
3436 rx_space = rx_size + NET_SKB_PAD +
3437 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3438
3439 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3440 ring_size = bp->rx_ring_size;
3441 bp->rx_agg_ring_size = 0;
3442 bp->rx_agg_nr_pages = 0;
3443
3444 if (bp->flags & BNXT_FLAG_TPA)
2839f28b 3445 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
c0c050c5
MC
3446
3447 bp->flags &= ~BNXT_FLAG_JUMBO;
bdbd1eb5 3448 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
c0c050c5
MC
3449 u32 jumbo_factor;
3450
3451 bp->flags |= BNXT_FLAG_JUMBO;
3452 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3453 if (jumbo_factor > agg_factor)
3454 agg_factor = jumbo_factor;
3455 }
3456 agg_ring_size = ring_size * agg_factor;
3457
3458 if (agg_ring_size) {
3459 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3460 RX_DESC_CNT);
3461 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3462 u32 tmp = agg_ring_size;
3463
3464 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3465 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3466 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3467 tmp, agg_ring_size);
3468 }
3469 bp->rx_agg_ring_size = agg_ring_size;
3470 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3471 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3472 rx_space = rx_size + NET_SKB_PAD +
3473 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3474 }
3475
3476 bp->rx_buf_use_size = rx_size;
3477 bp->rx_buf_size = rx_space;
3478
3479 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3480 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3481
3482 ring_size = bp->tx_ring_size;
3483 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3484 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3485
3486 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
3487 bp->cp_ring_size = ring_size;
3488
3489 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3490 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3491 bp->cp_nr_pages = MAX_CP_PAGES;
3492 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3493 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3494 ring_size, bp->cp_ring_size);
3495 }
3496 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3497 bp->cp_ring_mask = bp->cp_bit - 1;
3498}
3499
96a8604f
JDB
3500/* Changing allocation mode of RX rings.
3501 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3502 */
c61fb99c 3503int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
6bb19474 3504{
c61fb99c
MC
3505 if (page_mode) {
3506 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3507 return -EOPNOTSUPP;
7eb9bb3a
MC
3508 bp->dev->max_mtu =
3509 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
c61fb99c
MC
3510 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3511 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
c61fb99c
MC
3512 bp->rx_dir = DMA_BIDIRECTIONAL;
3513 bp->rx_skb_func = bnxt_rx_page_skb;
1054aee8
MC
3514 /* Disable LRO or GRO_HW */
3515 netdev_update_features(bp->dev);
c61fb99c 3516 } else {
7eb9bb3a 3517 bp->dev->max_mtu = bp->max_mtu;
c61fb99c
MC
3518 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3519 bp->rx_dir = DMA_FROM_DEVICE;
3520 bp->rx_skb_func = bnxt_rx_skb;
3521 }
6bb19474
MC
3522 return 0;
3523}
3524
c0c050c5
MC
3525static void bnxt_free_vnic_attributes(struct bnxt *bp)
3526{
3527 int i;
3528 struct bnxt_vnic_info *vnic;
3529 struct pci_dev *pdev = bp->pdev;
3530
3531 if (!bp->vnic_info)
3532 return;
3533
3534 for (i = 0; i < bp->nr_vnics; i++) {
3535 vnic = &bp->vnic_info[i];
3536
3537 kfree(vnic->fw_grp_ids);
3538 vnic->fw_grp_ids = NULL;
3539
3540 kfree(vnic->uc_list);
3541 vnic->uc_list = NULL;
3542
3543 if (vnic->mc_list) {
3544 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3545 vnic->mc_list, vnic->mc_list_mapping);
3546 vnic->mc_list = NULL;
3547 }
3548
3549 if (vnic->rss_table) {
3550 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3551 vnic->rss_table,
3552 vnic->rss_table_dma_addr);
3553 vnic->rss_table = NULL;
3554 }
3555
3556 vnic->rss_hash_key = NULL;
3557 vnic->flags = 0;
3558 }
3559}
3560
3561static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3562{
3563 int i, rc = 0, size;
3564 struct bnxt_vnic_info *vnic;
3565 struct pci_dev *pdev = bp->pdev;
3566 int max_rings;
3567
3568 for (i = 0; i < bp->nr_vnics; i++) {
3569 vnic = &bp->vnic_info[i];
3570
3571 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3572 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3573
3574 if (mem_size > 0) {
3575 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3576 if (!vnic->uc_list) {
3577 rc = -ENOMEM;
3578 goto out;
3579 }
3580 }
3581 }
3582
3583 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3584 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3585 vnic->mc_list =
3586 dma_alloc_coherent(&pdev->dev,
3587 vnic->mc_list_size,
3588 &vnic->mc_list_mapping,
3589 GFP_KERNEL);
3590 if (!vnic->mc_list) {
3591 rc = -ENOMEM;
3592 goto out;
3593 }
3594 }
3595
44c6f72a
MC
3596 if (bp->flags & BNXT_FLAG_CHIP_P5)
3597 goto vnic_skip_grps;
3598
c0c050c5
MC
3599 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3600 max_rings = bp->rx_nr_rings;
3601 else
3602 max_rings = 1;
3603
3604 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3605 if (!vnic->fw_grp_ids) {
3606 rc = -ENOMEM;
3607 goto out;
3608 }
44c6f72a 3609vnic_skip_grps:
ae10ae74
MC
3610 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3611 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3612 continue;
3613
c0c050c5
MC
3614 /* Allocate rss table and hash key */
3615 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3616 &vnic->rss_table_dma_addr,
3617 GFP_KERNEL);
3618 if (!vnic->rss_table) {
3619 rc = -ENOMEM;
3620 goto out;
3621 }
3622
3623 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3624
3625 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3626 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3627 }
3628 return 0;
3629
3630out:
3631 return rc;
3632}
3633
3634static void bnxt_free_hwrm_resources(struct bnxt *bp)
3635{
3636 struct pci_dev *pdev = bp->pdev;
3637
a2bf74f4
VD
3638 if (bp->hwrm_cmd_resp_addr) {
3639 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3640 bp->hwrm_cmd_resp_dma_addr);
3641 bp->hwrm_cmd_resp_addr = NULL;
3642 }
760b6d33
VD
3643
3644 if (bp->hwrm_cmd_kong_resp_addr) {
3645 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3646 bp->hwrm_cmd_kong_resp_addr,
3647 bp->hwrm_cmd_kong_resp_dma_addr);
3648 bp->hwrm_cmd_kong_resp_addr = NULL;
3649 }
3650}
3651
3652static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3653{
3654 struct pci_dev *pdev = bp->pdev;
3655
ba642ab7
MC
3656 if (bp->hwrm_cmd_kong_resp_addr)
3657 return 0;
3658
760b6d33
VD
3659 bp->hwrm_cmd_kong_resp_addr =
3660 dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3661 &bp->hwrm_cmd_kong_resp_dma_addr,
3662 GFP_KERNEL);
3663 if (!bp->hwrm_cmd_kong_resp_addr)
3664 return -ENOMEM;
3665
3666 return 0;
c0c050c5
MC
3667}
3668
3669static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3670{
3671 struct pci_dev *pdev = bp->pdev;
3672
3673 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3674 &bp->hwrm_cmd_resp_dma_addr,
3675 GFP_KERNEL);
3676 if (!bp->hwrm_cmd_resp_addr)
3677 return -ENOMEM;
c0c050c5
MC
3678
3679 return 0;
3680}
3681
e605db80
DK
3682static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3683{
3684 if (bp->hwrm_short_cmd_req_addr) {
3685 struct pci_dev *pdev = bp->pdev;
3686
1dfddc41 3687 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
e605db80
DK
3688 bp->hwrm_short_cmd_req_addr,
3689 bp->hwrm_short_cmd_req_dma_addr);
3690 bp->hwrm_short_cmd_req_addr = NULL;
3691 }
3692}
3693
3694static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3695{
3696 struct pci_dev *pdev = bp->pdev;
3697
ba642ab7
MC
3698 if (bp->hwrm_short_cmd_req_addr)
3699 return 0;
3700
e605db80 3701 bp->hwrm_short_cmd_req_addr =
1dfddc41 3702 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
e605db80
DK
3703 &bp->hwrm_short_cmd_req_dma_addr,
3704 GFP_KERNEL);
3705 if (!bp->hwrm_short_cmd_req_addr)
3706 return -ENOMEM;
3707
3708 return 0;
3709}
3710
fd3ab1c7 3711static void bnxt_free_port_stats(struct bnxt *bp)
c0c050c5 3712{
c0c050c5
MC
3713 struct pci_dev *pdev = bp->pdev;
3714
00db3cba
VV
3715 bp->flags &= ~BNXT_FLAG_PORT_STATS;
3716 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
3717
3bdf56c4
MC
3718 if (bp->hw_rx_port_stats) {
3719 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
3720 bp->hw_rx_port_stats,
3721 bp->hw_rx_port_stats_map);
3722 bp->hw_rx_port_stats = NULL;
00db3cba
VV
3723 }
3724
36e53349
MC
3725 if (bp->hw_tx_port_stats_ext) {
3726 dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext),
3727 bp->hw_tx_port_stats_ext,
3728 bp->hw_tx_port_stats_ext_map);
3729 bp->hw_tx_port_stats_ext = NULL;
3730 }
3731
00db3cba
VV
3732 if (bp->hw_rx_port_stats_ext) {
3733 dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3734 bp->hw_rx_port_stats_ext,
3735 bp->hw_rx_port_stats_ext_map);
3736 bp->hw_rx_port_stats_ext = NULL;
3bdf56c4 3737 }
55e4398d
VV
3738
3739 if (bp->hw_pcie_stats) {
3740 dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
3741 bp->hw_pcie_stats, bp->hw_pcie_stats_map);
3742 bp->hw_pcie_stats = NULL;
3743 }
fd3ab1c7
MC
3744}
3745
3746static void bnxt_free_ring_stats(struct bnxt *bp)
3747{
3748 struct pci_dev *pdev = bp->pdev;
3749 int size, i;
3bdf56c4 3750
c0c050c5
MC
3751 if (!bp->bnapi)
3752 return;
3753
4e748506 3754 size = bp->hw_ring_stats_size;
c0c050c5
MC
3755
3756 for (i = 0; i < bp->cp_nr_rings; i++) {
3757 struct bnxt_napi *bnapi = bp->bnapi[i];
3758 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3759
3760 if (cpr->hw_stats) {
3761 dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
3762 cpr->hw_stats_map);
3763 cpr->hw_stats = NULL;
3764 }
3765 }
3766}
3767
3768static int bnxt_alloc_stats(struct bnxt *bp)
3769{
3770 u32 size, i;
3771 struct pci_dev *pdev = bp->pdev;
3772
4e748506 3773 size = bp->hw_ring_stats_size;
c0c050c5
MC
3774
3775 for (i = 0; i < bp->cp_nr_rings; i++) {
3776 struct bnxt_napi *bnapi = bp->bnapi[i];
3777 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3778
3779 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
3780 &cpr->hw_stats_map,
3781 GFP_KERNEL);
3782 if (!cpr->hw_stats)
3783 return -ENOMEM;
3784
3785 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3786 }
3bdf56c4 3787
a220eabc
VV
3788 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
3789 return 0;
fd3ab1c7 3790
a220eabc
VV
3791 if (bp->hw_rx_port_stats)
3792 goto alloc_ext_stats;
3bdf56c4 3793
a220eabc
VV
3794 bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
3795 sizeof(struct tx_port_stats) + 1024;
3bdf56c4 3796
a220eabc
VV
3797 bp->hw_rx_port_stats =
3798 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
3799 &bp->hw_rx_port_stats_map,
3800 GFP_KERNEL);
3801 if (!bp->hw_rx_port_stats)
3802 return -ENOMEM;
3bdf56c4 3803
a220eabc
VV
3804 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512;
3805 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
3806 sizeof(struct rx_port_stats) + 512;
3807 bp->flags |= BNXT_FLAG_PORT_STATS;
00db3cba 3808
fd3ab1c7 3809alloc_ext_stats:
a220eabc
VV
3810 /* Display extended statistics only if FW supports it */
3811 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
6154532f 3812 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
00db3cba
VV
3813 return 0;
3814
a220eabc
VV
3815 if (bp->hw_rx_port_stats_ext)
3816 goto alloc_tx_ext_stats;
fd3ab1c7 3817
a220eabc
VV
3818 bp->hw_rx_port_stats_ext =
3819 dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3820 &bp->hw_rx_port_stats_ext_map, GFP_KERNEL);
3821 if (!bp->hw_rx_port_stats_ext)
3822 return 0;
00db3cba 3823
fd3ab1c7 3824alloc_tx_ext_stats:
a220eabc 3825 if (bp->hw_tx_port_stats_ext)
55e4398d 3826 goto alloc_pcie_stats;
fd3ab1c7 3827
6154532f
VV
3828 if (bp->hwrm_spec_code >= 0x10902 ||
3829 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
a220eabc
VV
3830 bp->hw_tx_port_stats_ext =
3831 dma_alloc_coherent(&pdev->dev,
3832 sizeof(struct tx_port_stats_ext),
3833 &bp->hw_tx_port_stats_ext_map,
3834 GFP_KERNEL);
3bdf56c4 3835 }
a220eabc 3836 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
55e4398d
VV
3837
3838alloc_pcie_stats:
3839 if (bp->hw_pcie_stats ||
3840 !(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
3841 return 0;
3842
3843 bp->hw_pcie_stats =
3844 dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
3845 &bp->hw_pcie_stats_map, GFP_KERNEL);
3846 if (!bp->hw_pcie_stats)
3847 return 0;
3848
3849 bp->flags |= BNXT_FLAG_PCIE_STATS;
c0c050c5
MC
3850 return 0;
3851}
3852
3853static void bnxt_clear_ring_indices(struct bnxt *bp)
3854{
3855 int i;
3856
3857 if (!bp->bnapi)
3858 return;
3859
3860 for (i = 0; i < bp->cp_nr_rings; i++) {
3861 struct bnxt_napi *bnapi = bp->bnapi[i];
3862 struct bnxt_cp_ring_info *cpr;
3863 struct bnxt_rx_ring_info *rxr;
3864 struct bnxt_tx_ring_info *txr;
3865
3866 if (!bnapi)
3867 continue;
3868
3869 cpr = &bnapi->cp_ring;
3870 cpr->cp_raw_cons = 0;
3871
b6ab4b01 3872 txr = bnapi->tx_ring;
3b2b7d9d
MC
3873 if (txr) {
3874 txr->tx_prod = 0;
3875 txr->tx_cons = 0;
3876 }
c0c050c5 3877
b6ab4b01 3878 rxr = bnapi->rx_ring;
3b2b7d9d
MC
3879 if (rxr) {
3880 rxr->rx_prod = 0;
3881 rxr->rx_agg_prod = 0;
3882 rxr->rx_sw_agg_prod = 0;
376a5b86 3883 rxr->rx_next_cons = 0;
3b2b7d9d 3884 }
c0c050c5
MC
3885 }
3886}
3887
3888static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
3889{
3890#ifdef CONFIG_RFS_ACCEL
3891 int i;
3892
3893 /* Under rtnl_lock and all our NAPIs have been disabled. It's
3894 * safe to delete the hash table.
3895 */
3896 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
3897 struct hlist_head *head;
3898 struct hlist_node *tmp;
3899 struct bnxt_ntuple_filter *fltr;
3900
3901 head = &bp->ntp_fltr_hash_tbl[i];
3902 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
3903 hlist_del(&fltr->hash);
3904 kfree(fltr);
3905 }
3906 }
3907 if (irq_reinit) {
3908 kfree(bp->ntp_fltr_bmap);
3909 bp->ntp_fltr_bmap = NULL;
3910 }
3911 bp->ntp_fltr_count = 0;
3912#endif
3913}
3914
3915static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
3916{
3917#ifdef CONFIG_RFS_ACCEL
3918 int i, rc = 0;
3919
3920 if (!(bp->flags & BNXT_FLAG_RFS))
3921 return 0;
3922
3923 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
3924 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
3925
3926 bp->ntp_fltr_count = 0;
ac45bd93
DC
3927 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
3928 sizeof(long),
c0c050c5
MC
3929 GFP_KERNEL);
3930
3931 if (!bp->ntp_fltr_bmap)
3932 rc = -ENOMEM;
3933
3934 return rc;
3935#else
3936 return 0;
3937#endif
3938}
3939
3940static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
3941{
3942 bnxt_free_vnic_attributes(bp);
3943 bnxt_free_tx_rings(bp);
3944 bnxt_free_rx_rings(bp);
3945 bnxt_free_cp_rings(bp);
3946 bnxt_free_ntp_fltrs(bp, irq_re_init);
3947 if (irq_re_init) {
fd3ab1c7 3948 bnxt_free_ring_stats(bp);
c0c050c5
MC
3949 bnxt_free_ring_grps(bp);
3950 bnxt_free_vnics(bp);
a960dec9
MC
3951 kfree(bp->tx_ring_map);
3952 bp->tx_ring_map = NULL;
b6ab4b01
MC
3953 kfree(bp->tx_ring);
3954 bp->tx_ring = NULL;
3955 kfree(bp->rx_ring);
3956 bp->rx_ring = NULL;
c0c050c5
MC
3957 kfree(bp->bnapi);
3958 bp->bnapi = NULL;
3959 } else {
3960 bnxt_clear_ring_indices(bp);
3961 }
3962}
3963
3964static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
3965{
01657bcd 3966 int i, j, rc, size, arr_size;
c0c050c5
MC
3967 void *bnapi;
3968
3969 if (irq_re_init) {
3970 /* Allocate bnapi mem pointer array and mem block for
3971 * all queues
3972 */
3973 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
3974 bp->cp_nr_rings);
3975 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
3976 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
3977 if (!bnapi)
3978 return -ENOMEM;
3979
3980 bp->bnapi = bnapi;
3981 bnapi += arr_size;
3982 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
3983 bp->bnapi[i] = bnapi;
3984 bp->bnapi[i]->index = i;
3985 bp->bnapi[i]->bp = bp;
e38287b7
MC
3986 if (bp->flags & BNXT_FLAG_CHIP_P5) {
3987 struct bnxt_cp_ring_info *cpr =
3988 &bp->bnapi[i]->cp_ring;
3989
3990 cpr->cp_ring_struct.ring_mem.flags =
3991 BNXT_RMEM_RING_PTE_FLAG;
3992 }
c0c050c5
MC
3993 }
3994
b6ab4b01
MC
3995 bp->rx_ring = kcalloc(bp->rx_nr_rings,
3996 sizeof(struct bnxt_rx_ring_info),
3997 GFP_KERNEL);
3998 if (!bp->rx_ring)
3999 return -ENOMEM;
4000
4001 for (i = 0; i < bp->rx_nr_rings; i++) {
e38287b7
MC
4002 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4003
4004 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4005 rxr->rx_ring_struct.ring_mem.flags =
4006 BNXT_RMEM_RING_PTE_FLAG;
4007 rxr->rx_agg_ring_struct.ring_mem.flags =
4008 BNXT_RMEM_RING_PTE_FLAG;
4009 }
4010 rxr->bnapi = bp->bnapi[i];
b6ab4b01
MC
4011 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4012 }
4013
4014 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4015 sizeof(struct bnxt_tx_ring_info),
4016 GFP_KERNEL);
4017 if (!bp->tx_ring)
4018 return -ENOMEM;
4019
a960dec9
MC
4020 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4021 GFP_KERNEL);
4022
4023 if (!bp->tx_ring_map)
4024 return -ENOMEM;
4025
01657bcd
MC
4026 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4027 j = 0;
4028 else
4029 j = bp->rx_nr_rings;
4030
4031 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
e38287b7
MC
4032 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4033
4034 if (bp->flags & BNXT_FLAG_CHIP_P5)
4035 txr->tx_ring_struct.ring_mem.flags =
4036 BNXT_RMEM_RING_PTE_FLAG;
4037 txr->bnapi = bp->bnapi[j];
4038 bp->bnapi[j]->tx_ring = txr;
5f449249 4039 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
38413406 4040 if (i >= bp->tx_nr_rings_xdp) {
e38287b7 4041 txr->txq_index = i - bp->tx_nr_rings_xdp;
38413406
MC
4042 bp->bnapi[j]->tx_int = bnxt_tx_int;
4043 } else {
fa3e93e8 4044 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
38413406
MC
4045 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4046 }
b6ab4b01
MC
4047 }
4048
c0c050c5
MC
4049 rc = bnxt_alloc_stats(bp);
4050 if (rc)
4051 goto alloc_mem_err;
4052
4053 rc = bnxt_alloc_ntp_fltrs(bp);
4054 if (rc)
4055 goto alloc_mem_err;
4056
4057 rc = bnxt_alloc_vnics(bp);
4058 if (rc)
4059 goto alloc_mem_err;
4060 }
4061
4062 bnxt_init_ring_struct(bp);
4063
4064 rc = bnxt_alloc_rx_rings(bp);
4065 if (rc)
4066 goto alloc_mem_err;
4067
4068 rc = bnxt_alloc_tx_rings(bp);
4069 if (rc)
4070 goto alloc_mem_err;
4071
4072 rc = bnxt_alloc_cp_rings(bp);
4073 if (rc)
4074 goto alloc_mem_err;
4075
4076 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4077 BNXT_VNIC_UCAST_FLAG;
4078 rc = bnxt_alloc_vnic_attributes(bp);
4079 if (rc)
4080 goto alloc_mem_err;
4081 return 0;
4082
4083alloc_mem_err:
4084 bnxt_free_mem(bp, true);
4085 return rc;
4086}
4087
9d8bc097
MC
4088static void bnxt_disable_int(struct bnxt *bp)
4089{
4090 int i;
4091
4092 if (!bp->bnapi)
4093 return;
4094
4095 for (i = 0; i < bp->cp_nr_rings; i++) {
4096 struct bnxt_napi *bnapi = bp->bnapi[i];
4097 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
daf1f1e7 4098 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
9d8bc097 4099
daf1f1e7 4100 if (ring->fw_ring_id != INVALID_HW_RING_ID)
697197e5 4101 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
9d8bc097
MC
4102 }
4103}
4104
e5811b8c
MC
4105static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4106{
4107 struct bnxt_napi *bnapi = bp->bnapi[n];
4108 struct bnxt_cp_ring_info *cpr;
4109
4110 cpr = &bnapi->cp_ring;
4111 return cpr->cp_ring_struct.map_idx;
4112}
4113
9d8bc097
MC
4114static void bnxt_disable_int_sync(struct bnxt *bp)
4115{
4116 int i;
4117
4118 atomic_inc(&bp->intr_sem);
4119
4120 bnxt_disable_int(bp);
e5811b8c
MC
4121 for (i = 0; i < bp->cp_nr_rings; i++) {
4122 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4123
4124 synchronize_irq(bp->irq_tbl[map_idx].vector);
4125 }
9d8bc097
MC
4126}
4127
4128static void bnxt_enable_int(struct bnxt *bp)
4129{
4130 int i;
4131
4132 atomic_set(&bp->intr_sem, 0);
4133 for (i = 0; i < bp->cp_nr_rings; i++) {
4134 struct bnxt_napi *bnapi = bp->bnapi[i];
4135 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4136
697197e5 4137 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
9d8bc097
MC
4138 }
4139}
4140
c0c050c5
MC
4141void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
4142 u16 cmpl_ring, u16 target_id)
4143{
a8643e16 4144 struct input *req = request;
c0c050c5 4145
a8643e16
MC
4146 req->req_type = cpu_to_le16(req_type);
4147 req->cmpl_ring = cpu_to_le16(cmpl_ring);
4148 req->target_id = cpu_to_le16(target_id);
760b6d33
VD
4149 if (bnxt_kong_hwrm_message(bp, req))
4150 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
4151 else
4152 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
c0c050c5
MC
4153}
4154
d4f1420d
MC
4155static int bnxt_hwrm_to_stderr(u32 hwrm_err)
4156{
4157 switch (hwrm_err) {
4158 case HWRM_ERR_CODE_SUCCESS:
4159 return 0;
4160 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
4161 return -EACCES;
4162 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
4163 return -ENOSPC;
4164 case HWRM_ERR_CODE_INVALID_PARAMS:
4165 case HWRM_ERR_CODE_INVALID_FLAGS:
4166 case HWRM_ERR_CODE_INVALID_ENABLES:
4167 case HWRM_ERR_CODE_UNSUPPORTED_TLV:
4168 case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
4169 return -EINVAL;
4170 case HWRM_ERR_CODE_NO_BUFFER:
4171 return -ENOMEM;
4172 case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
4173 return -EAGAIN;
4174 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
4175 return -EOPNOTSUPP;
4176 default:
4177 return -EIO;
4178 }
4179}
4180
fbfbc485
MC
4181static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
4182 int timeout, bool silent)
c0c050c5 4183{
a11fa2be 4184 int i, intr_process, rc, tmo_count;
a8643e16 4185 struct input *req = msg;
c0c050c5 4186 u32 *data = msg;
845adfe4
MC
4187 __le32 *resp_len;
4188 u8 *valid;
c0c050c5
MC
4189 u16 cp_ring_id, len = 0;
4190 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
e605db80 4191 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
ebd5818c 4192 struct hwrm_short_input short_input = {0};
2e9ee398 4193 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
89455017 4194 u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr;
2e9ee398 4195 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
760b6d33 4196 u16 dst = BNXT_HWRM_CHNL_CHIMP;
c0c050c5 4197
b4fff207
MC
4198 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4199 return -EBUSY;
4200
1dfddc41
MC
4201 if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4202 if (msg_len > bp->hwrm_max_ext_req_len ||
4203 !bp->hwrm_short_cmd_req_addr)
4204 return -EINVAL;
4205 }
4206
760b6d33
VD
4207 if (bnxt_hwrm_kong_chnl(bp, req)) {
4208 dst = BNXT_HWRM_CHNL_KONG;
4209 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
4210 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
4211 resp = bp->hwrm_cmd_kong_resp_addr;
4212 resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr;
4213 }
4214
4215 memset(resp, 0, PAGE_SIZE);
4216 cp_ring_id = le16_to_cpu(req->cmpl_ring);
4217 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
4218
4219 req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
4220 /* currently supports only one outstanding message */
4221 if (intr_process)
4222 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
4223
1dfddc41
MC
4224 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
4225 msg_len > BNXT_HWRM_MAX_REQ_LEN) {
e605db80 4226 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
1dfddc41
MC
4227 u16 max_msg_len;
4228
4229 /* Set boundary for maximum extended request length for short
4230 * cmd format. If passed up from device use the max supported
4231 * internal req length.
4232 */
4233 max_msg_len = bp->hwrm_max_ext_req_len;
e605db80
DK
4234
4235 memcpy(short_cmd_req, req, msg_len);
1dfddc41
MC
4236 if (msg_len < max_msg_len)
4237 memset(short_cmd_req + msg_len, 0,
4238 max_msg_len - msg_len);
e605db80
DK
4239
4240 short_input.req_type = req->req_type;
4241 short_input.signature =
4242 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
4243 short_input.size = cpu_to_le16(msg_len);
4244 short_input.req_addr =
4245 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
4246
4247 data = (u32 *)&short_input;
4248 msg_len = sizeof(short_input);
4249
4250 /* Sync memory write before updating doorbell */
4251 wmb();
4252
4253 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
4254 }
4255
c0c050c5 4256 /* Write request msg to hwrm channel */
2e9ee398 4257 __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
c0c050c5 4258
e605db80 4259 for (i = msg_len; i < max_req_len; i += 4)
2e9ee398 4260 writel(0, bp->bar0 + bar_offset + i);
d79979a1 4261
c0c050c5 4262 /* Ring channel doorbell */
2e9ee398 4263 writel(1, bp->bar0 + doorbell_offset);
c0c050c5 4264
5bedb529
MC
4265 if (!pci_is_enabled(bp->pdev))
4266 return 0;
4267
ff4fe81d
MC
4268 if (!timeout)
4269 timeout = DFLT_HWRM_CMD_TIMEOUT;
9751e8e7
AG
4270 /* convert timeout to usec */
4271 timeout *= 1000;
ff4fe81d 4272
c0c050c5 4273 i = 0;
9751e8e7
AG
4274 /* Short timeout for the first few iterations:
4275 * number of loops = number of loops for short timeout +
4276 * number of loops for standard timeout.
4277 */
4278 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
4279 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
4280 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
89455017
VD
4281 resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET);
4282
c0c050c5 4283 if (intr_process) {
fc718bb2
VD
4284 u16 seq_id = bp->hwrm_intr_seq_id;
4285
c0c050c5 4286 /* Wait until hwrm response cmpl interrupt is processed */
fc718bb2 4287 while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
a11fa2be 4288 i++ < tmo_count) {
642aebde
PC
4289 /* Abort the wait for completion if the FW health
4290 * check has failed.
4291 */
4292 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4293 return -EBUSY;
9751e8e7
AG
4294 /* on first few passes, just barely sleep */
4295 if (i < HWRM_SHORT_TIMEOUT_COUNTER)
4296 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4297 HWRM_SHORT_MAX_TIMEOUT);
4298 else
4299 usleep_range(HWRM_MIN_TIMEOUT,
4300 HWRM_MAX_TIMEOUT);
c0c050c5
MC
4301 }
4302
fc718bb2 4303 if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
5bedb529
MC
4304 if (!silent)
4305 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
4306 le16_to_cpu(req->req_type));
a935cb7e 4307 return -EBUSY;
c0c050c5 4308 }
845adfe4
MC
4309 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
4310 HWRM_RESP_LEN_SFT;
89455017 4311 valid = resp_addr + len - 1;
c0c050c5 4312 } else {
cc559c1a
MC
4313 int j;
4314
c0c050c5 4315 /* Check if response len is updated */
a11fa2be 4316 for (i = 0; i < tmo_count; i++) {
642aebde
PC
4317 /* Abort the wait for completion if the FW health
4318 * check has failed.
4319 */
4320 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4321 return -EBUSY;
c0c050c5
MC
4322 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
4323 HWRM_RESP_LEN_SFT;
4324 if (len)
4325 break;
9751e8e7 4326 /* on first few passes, just barely sleep */
67681d02 4327 if (i < HWRM_SHORT_TIMEOUT_COUNTER)
9751e8e7
AG
4328 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4329 HWRM_SHORT_MAX_TIMEOUT);
4330 else
4331 usleep_range(HWRM_MIN_TIMEOUT,
4332 HWRM_MAX_TIMEOUT);
c0c050c5
MC
4333 }
4334
a11fa2be 4335 if (i >= tmo_count) {
5bedb529
MC
4336 if (!silent)
4337 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
4338 HWRM_TOTAL_TIMEOUT(i),
4339 le16_to_cpu(req->req_type),
4340 le16_to_cpu(req->seq_id), len);
a935cb7e 4341 return -EBUSY;
c0c050c5
MC
4342 }
4343
845adfe4 4344 /* Last byte of resp contains valid bit */
89455017 4345 valid = resp_addr + len - 1;
cc559c1a 4346 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
845adfe4
MC
4347 /* make sure we read from updated DMA memory */
4348 dma_rmb();
4349 if (*valid)
c0c050c5 4350 break;
0000b81a 4351 usleep_range(1, 5);
c0c050c5
MC
4352 }
4353
cc559c1a 4354 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
5bedb529
MC
4355 if (!silent)
4356 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
4357 HWRM_TOTAL_TIMEOUT(i),
4358 le16_to_cpu(req->req_type),
4359 le16_to_cpu(req->seq_id), len,
4360 *valid);
a935cb7e 4361 return -EBUSY;
c0c050c5
MC
4362 }
4363 }
4364
845adfe4
MC
4365 /* Zero valid bit for compatibility. Valid bit in an older spec
4366 * may become a new field in a newer spec. We must make sure that
4367 * a new field not implemented by old spec will read zero.
4368 */
4369 *valid = 0;
c0c050c5 4370 rc = le16_to_cpu(resp->error_code);
fbfbc485 4371 if (rc && !silent)
c0c050c5
MC
4372 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
4373 le16_to_cpu(resp->req_type),
4374 le16_to_cpu(resp->seq_id), rc);
d4f1420d 4375 return bnxt_hwrm_to_stderr(rc);
fbfbc485
MC
4376}
4377
4378int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4379{
4380 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
c0c050c5
MC
4381}
4382
cc72f3b1
MC
4383int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4384 int timeout)
4385{
4386 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4387}
4388
c0c050c5
MC
4389int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4390{
4391 int rc;
4392
4393 mutex_lock(&bp->hwrm_cmd_lock);
4394 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
4395 mutex_unlock(&bp->hwrm_cmd_lock);
4396 return rc;
4397}
4398
90e20921
MC
4399int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4400 int timeout)
4401{
4402 int rc;
4403
4404 mutex_lock(&bp->hwrm_cmd_lock);
4405 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4406 mutex_unlock(&bp->hwrm_cmd_lock);
4407 return rc;
4408}
4409
2e882468
VV
4410int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4411 bool async_only)
c0c050c5 4412{
2e882468 4413 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
c0c050c5 4414 struct hwrm_func_drv_rgtr_input req = {0};
25be8623
MC
4415 DECLARE_BITMAP(async_events_bmap, 256);
4416 u32 *events = (u32 *)async_events_bmap;
acfb50e4 4417 u32 flags;
2e882468 4418 int rc, i;
a1653b13
MC
4419
4420 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4421
4422 req.enables =
4423 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
2e882468
VV
4424 FUNC_DRV_RGTR_REQ_ENABLES_VER |
4425 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
a1653b13 4426
11f15ed3 4427 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
8280b38e
VV
4428 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4429 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4430 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
acfb50e4 4431 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
e633a329
VV
4432 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4433 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
acfb50e4 4434 req.flags = cpu_to_le32(flags);
d4f52de0
MC
4435 req.ver_maj_8b = DRV_VER_MAJ;
4436 req.ver_min_8b = DRV_VER_MIN;
4437 req.ver_upd_8b = DRV_VER_UPD;
4438 req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4439 req.ver_min = cpu_to_le16(DRV_VER_MIN);
4440 req.ver_upd = cpu_to_le16(DRV_VER_UPD);
c0c050c5
MC
4441
4442 if (BNXT_PF(bp)) {
9b0436c3 4443 u32 data[8];
a1653b13 4444 int i;
c0c050c5 4445
9b0436c3
MC
4446 memset(data, 0, sizeof(data));
4447 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4448 u16 cmd = bnxt_vf_req_snif[i];
4449 unsigned int bit, idx;
4450
4451 idx = cmd / 32;
4452 bit = cmd % 32;
4453 data[idx] |= 1 << bit;
4454 }
c0c050c5 4455
de68f5de
MC
4456 for (i = 0; i < 8; i++)
4457 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4458
c0c050c5
MC
4459 req.enables |=
4460 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4461 }
4462
abd43a13
VD
4463 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4464 req.flags |= cpu_to_le32(
4465 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4466
2e882468
VV
4467 memset(async_events_bmap, 0, sizeof(async_events_bmap));
4468 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4469 u16 event_id = bnxt_async_events_arr[i];
4470
4471 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4472 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4473 continue;
4474 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4475 }
4476 if (bmap && bmap_size) {
4477 for (i = 0; i < bmap_size; i++) {
4478 if (test_bit(i, bmap))
4479 __set_bit(i, async_events_bmap);
4480 }
4481 }
4482 for (i = 0; i < 8; i++)
4483 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4484
4485 if (async_only)
4486 req.enables =
4487 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4488
25e1acd6
MC
4489 mutex_lock(&bp->hwrm_cmd_lock);
4490 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
bdb38602
VV
4491 if (!rc) {
4492 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4493 if (resp->flags &
4494 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4495 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4496 }
25e1acd6
MC
4497 mutex_unlock(&bp->hwrm_cmd_lock);
4498 return rc;
c0c050c5
MC
4499}
4500
be58a0da
JH
4501static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4502{
4503 struct hwrm_func_drv_unrgtr_input req = {0};
4504
bdb38602
VV
4505 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4506 return 0;
4507
be58a0da
JH
4508 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4509 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4510}
4511
c0c050c5
MC
4512static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4513{
4514 u32 rc = 0;
4515 struct hwrm_tunnel_dst_port_free_input req = {0};
4516
4517 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4518 req.tunnel_type = tunnel_type;
4519
4520 switch (tunnel_type) {
4521 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4522 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
4523 break;
4524 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4525 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
4526 break;
4527 default:
4528 break;
4529 }
4530
4531 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4532 if (rc)
4533 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4534 rc);
4535 return rc;
4536}
4537
4538static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4539 u8 tunnel_type)
4540{
4541 u32 rc = 0;
4542 struct hwrm_tunnel_dst_port_alloc_input req = {0};
4543 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4544
4545 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4546
4547 req.tunnel_type = tunnel_type;
4548 req.tunnel_dst_port_val = port;
4549
4550 mutex_lock(&bp->hwrm_cmd_lock);
4551 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4552 if (rc) {
4553 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4554 rc);
4555 goto err_out;
4556 }
4557
57aac71b
CJ
4558 switch (tunnel_type) {
4559 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
c0c050c5 4560 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
57aac71b
CJ
4561 break;
4562 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
c0c050c5 4563 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
57aac71b
CJ
4564 break;
4565 default:
4566 break;
4567 }
4568
c0c050c5
MC
4569err_out:
4570 mutex_unlock(&bp->hwrm_cmd_lock);
4571 return rc;
4572}
4573
4574static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4575{
4576 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4577 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4578
4579 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
c193554e 4580 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
c0c050c5
MC
4581
4582 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4583 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4584 req.mask = cpu_to_le32(vnic->rx_mask);
4585 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4586}
4587
4588#ifdef CONFIG_RFS_ACCEL
4589static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4590 struct bnxt_ntuple_filter *fltr)
4591{
4592 struct hwrm_cfa_ntuple_filter_free_input req = {0};
4593
4594 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4595 req.ntuple_filter_id = fltr->filter_id;
4596 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4597}
4598
4599#define BNXT_NTP_FLTR_FLAGS \
4600 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4601 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
4602 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
4603 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
4604 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
4605 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
4606 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
4607 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
4608 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
4609 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
4610 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
4611 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
4612 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
c193554e 4613 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
c0c050c5 4614
61aad724
MC
4615#define BNXT_NTP_TUNNEL_FLTR_FLAG \
4616 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4617
c0c050c5
MC
4618static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4619 struct bnxt_ntuple_filter *fltr)
4620{
c0c050c5 4621 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
5c209fc8 4622 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
c0c050c5 4623 struct flow_keys *keys = &fltr->fkeys;
ac33906c 4624 struct bnxt_vnic_info *vnic;
41136ab3 4625 u32 flags = 0;
5c209fc8 4626 int rc = 0;
c0c050c5
MC
4627
4628 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
a54c4d74 4629 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
c0c050c5 4630
41136ab3
MC
4631 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4632 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4633 req.dst_id = cpu_to_le16(fltr->rxq);
ac33906c
MC
4634 } else {
4635 vnic = &bp->vnic_info[fltr->rxq + 1];
41136ab3 4636 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
ac33906c 4637 }
41136ab3
MC
4638 req.flags = cpu_to_le32(flags);
4639 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
c0c050c5
MC
4640
4641 req.ethertype = htons(ETH_P_IP);
4642 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
c193554e 4643 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
c0c050c5
MC
4644 req.ip_protocol = keys->basic.ip_proto;
4645
dda0e746
MC
4646 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4647 int i;
4648
4649 req.ethertype = htons(ETH_P_IPV6);
4650 req.ip_addr_type =
4651 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4652 *(struct in6_addr *)&req.src_ipaddr[0] =
4653 keys->addrs.v6addrs.src;
4654 *(struct in6_addr *)&req.dst_ipaddr[0] =
4655 keys->addrs.v6addrs.dst;
4656 for (i = 0; i < 4; i++) {
4657 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4658 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4659 }
4660 } else {
4661 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4662 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4663 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4664 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4665 }
61aad724
MC
4666 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4667 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4668 req.tunnel_type =
4669 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4670 }
c0c050c5
MC
4671
4672 req.src_port = keys->ports.src;
4673 req.src_port_mask = cpu_to_be16(0xffff);
4674 req.dst_port = keys->ports.dst;
4675 req.dst_port_mask = cpu_to_be16(0xffff);
4676
c0c050c5
MC
4677 mutex_lock(&bp->hwrm_cmd_lock);
4678 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5c209fc8
VD
4679 if (!rc) {
4680 resp = bnxt_get_hwrm_resp_addr(bp, &req);
c0c050c5 4681 fltr->filter_id = resp->ntuple_filter_id;
5c209fc8 4682 }
c0c050c5
MC
4683 mutex_unlock(&bp->hwrm_cmd_lock);
4684 return rc;
4685}
4686#endif
4687
4688static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4689 u8 *mac_addr)
4690{
4691 u32 rc = 0;
4692 struct hwrm_cfa_l2_filter_alloc_input req = {0};
4693 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4694
4695 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
dc52c6c7
PS
4696 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4697 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4698 req.flags |=
4699 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
c193554e 4700 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
c0c050c5
MC
4701 req.enables =
4702 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
c193554e 4703 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
c0c050c5
MC
4704 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4705 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
4706 req.l2_addr_mask[0] = 0xff;
4707 req.l2_addr_mask[1] = 0xff;
4708 req.l2_addr_mask[2] = 0xff;
4709 req.l2_addr_mask[3] = 0xff;
4710 req.l2_addr_mask[4] = 0xff;
4711 req.l2_addr_mask[5] = 0xff;
4712
4713 mutex_lock(&bp->hwrm_cmd_lock);
4714 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4715 if (!rc)
4716 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4717 resp->l2_filter_id;
4718 mutex_unlock(&bp->hwrm_cmd_lock);
4719 return rc;
4720}
4721
4722static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4723{
4724 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4725 int rc = 0;
4726
4727 /* Any associated ntuple filters will also be cleared by firmware. */
4728 mutex_lock(&bp->hwrm_cmd_lock);
4729 for (i = 0; i < num_of_vnics; i++) {
4730 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4731
4732 for (j = 0; j < vnic->uc_filter_count; j++) {
4733 struct hwrm_cfa_l2_filter_free_input req = {0};
4734
4735 bnxt_hwrm_cmd_hdr_init(bp, &req,
4736 HWRM_CFA_L2_FILTER_FREE, -1, -1);
4737
4738 req.l2_filter_id = vnic->fw_l2_filter_id[j];
4739
4740 rc = _hwrm_send_message(bp, &req, sizeof(req),
4741 HWRM_CMD_TIMEOUT);
4742 }
4743 vnic->uc_filter_count = 0;
4744 }
4745 mutex_unlock(&bp->hwrm_cmd_lock);
4746
4747 return rc;
4748}
4749
4750static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4751{
4752 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
79632e9b 4753 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
c0c050c5
MC
4754 struct hwrm_vnic_tpa_cfg_input req = {0};
4755
3c4fe80b
MC
4756 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4757 return 0;
4758
c0c050c5
MC
4759 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
4760
4761 if (tpa_flags) {
4762 u16 mss = bp->dev->mtu - 40;
4763 u32 nsegs, n, segs = 0, flags;
4764
4765 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4766 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4767 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4768 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4769 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4770 if (tpa_flags & BNXT_FLAG_GRO)
4771 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4772
4773 req.flags = cpu_to_le32(flags);
4774
4775 req.enables =
4776 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
c193554e
MC
4777 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4778 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
c0c050c5
MC
4779
4780 /* Number of segs are log2 units, and first packet is not
4781 * included as part of this units.
4782 */
2839f28b
MC
4783 if (mss <= BNXT_RX_PAGE_SIZE) {
4784 n = BNXT_RX_PAGE_SIZE / mss;
c0c050c5
MC
4785 nsegs = (MAX_SKB_FRAGS - 1) * n;
4786 } else {
2839f28b
MC
4787 n = mss / BNXT_RX_PAGE_SIZE;
4788 if (mss & (BNXT_RX_PAGE_SIZE - 1))
c0c050c5
MC
4789 n++;
4790 nsegs = (MAX_SKB_FRAGS - n) / n;
4791 }
4792
79632e9b
MC
4793 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4794 segs = MAX_TPA_SEGS_P5;
4795 max_aggs = bp->max_tpa;
4796 } else {
4797 segs = ilog2(nsegs);
4798 }
c0c050c5 4799 req.max_agg_segs = cpu_to_le16(segs);
79632e9b 4800 req.max_aggs = cpu_to_le16(max_aggs);
c193554e
MC
4801
4802 req.min_agg_len = cpu_to_le32(512);
c0c050c5
MC
4803 }
4804 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4805
4806 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4807}
4808
2c61d211
MC
4809static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4810{
4811 struct bnxt_ring_grp_info *grp_info;
4812
4813 grp_info = &bp->grp_info[ring->grp_idx];
4814 return grp_info->cp_fw_ring_id;
4815}
4816
4817static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
4818{
4819 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4820 struct bnxt_napi *bnapi = rxr->bnapi;
4821 struct bnxt_cp_ring_info *cpr;
4822
4823 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
4824 return cpr->cp_ring_struct.fw_ring_id;
4825 } else {
4826 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
4827 }
4828}
4829
4830static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
4831{
4832 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4833 struct bnxt_napi *bnapi = txr->bnapi;
4834 struct bnxt_cp_ring_info *cpr;
4835
4836 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
4837 return cpr->cp_ring_struct.fw_ring_id;
4838 } else {
4839 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
4840 }
4841}
4842
c0c050c5
MC
4843static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
4844{
4845 u32 i, j, max_rings;
4846 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4847 struct hwrm_vnic_rss_cfg_input req = {0};
4848
7b3af4f7
MC
4849 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
4850 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
c0c050c5
MC
4851 return 0;
4852
4853 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4854 if (set_rss) {
87da7f79 4855 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
50f011b6 4856 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
dc52c6c7
PS
4857 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
4858 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4859 max_rings = bp->rx_nr_rings - 1;
4860 else
4861 max_rings = bp->rx_nr_rings;
4862 } else {
c0c050c5 4863 max_rings = 1;
dc52c6c7 4864 }
c0c050c5
MC
4865
4866 /* Fill the RSS indirection table with ring group ids */
4867 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
4868 if (j == max_rings)
4869 j = 0;
4870 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
4871 }
4872
4873 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4874 req.hash_key_tbl_addr =
4875 cpu_to_le64(vnic->rss_hash_key_dma_addr);
4876 }
94ce9caa 4877 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
c0c050c5
MC
4878 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4879}
4880
7b3af4f7
MC
4881static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
4882{
4883 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4884 u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings;
4885 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4886 struct hwrm_vnic_rss_cfg_input req = {0};
4887
4888 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4889 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4890 if (!set_rss) {
4891 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4892 return 0;
4893 }
4894 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4895 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4896 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4897 req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
4898 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
4899 for (i = 0, k = 0; i < nr_ctxs; i++) {
4900 __le16 *ring_tbl = vnic->rss_table;
4901 int rc;
4902
4903 req.ring_table_pair_index = i;
4904 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
4905 for (j = 0; j < 64; j++) {
4906 u16 ring_id;
4907
4908 ring_id = rxr->rx_ring_struct.fw_ring_id;
4909 *ring_tbl++ = cpu_to_le16(ring_id);
4910 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
4911 *ring_tbl++ = cpu_to_le16(ring_id);
4912 rxr++;
4913 k++;
4914 if (k == max_rings) {
4915 k = 0;
4916 rxr = &bp->rx_ring[0];
4917 }
4918 }
4919 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4920 if (rc)
d4f1420d 4921 return rc;
7b3af4f7
MC
4922 }
4923 return 0;
4924}
4925
c0c050c5
MC
4926static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
4927{
4928 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4929 struct hwrm_vnic_plcmodes_cfg_input req = {0};
4930
4931 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
4932 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
4933 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
4934 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
4935 req.enables =
4936 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
4937 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
4938 /* thresholds not implemented in firmware yet */
4939 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
4940 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
4941 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4942 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4943}
4944
94ce9caa
PS
4945static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
4946 u16 ctx_idx)
c0c050c5
MC
4947{
4948 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
4949
4950 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
4951 req.rss_cos_lb_ctx_id =
94ce9caa 4952 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
c0c050c5
MC
4953
4954 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
94ce9caa 4955 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
c0c050c5
MC
4956}
4957
4958static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
4959{
94ce9caa 4960 int i, j;
c0c050c5
MC
4961
4962 for (i = 0; i < bp->nr_vnics; i++) {
4963 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4964
94ce9caa
PS
4965 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
4966 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
4967 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
4968 }
c0c050c5
MC
4969 }
4970 bp->rsscos_nr_ctxs = 0;
4971}
4972
94ce9caa 4973static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
c0c050c5
MC
4974{
4975 int rc;
4976 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
4977 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
4978 bp->hwrm_cmd_resp_addr;
4979
4980 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
4981 -1);
4982
4983 mutex_lock(&bp->hwrm_cmd_lock);
4984 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4985 if (!rc)
94ce9caa 4986 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
c0c050c5
MC
4987 le16_to_cpu(resp->rss_cos_lb_ctx_id);
4988 mutex_unlock(&bp->hwrm_cmd_lock);
4989
4990 return rc;
4991}
4992
abe93ad2
MC
4993static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
4994{
4995 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
4996 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
4997 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
4998}
4999
a588e458 5000int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
c0c050c5 5001{
b81a90d3 5002 unsigned int ring = 0, grp_idx;
c0c050c5
MC
5003 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5004 struct hwrm_vnic_cfg_input req = {0};
cf6645f8 5005 u16 def_vlan = 0;
c0c050c5
MC
5006
5007 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
dc52c6c7 5008
7b3af4f7
MC
5009 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5010 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5011
5012 req.default_rx_ring_id =
5013 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5014 req.default_cmpl_ring_id =
5015 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5016 req.enables =
5017 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5018 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5019 goto vnic_mru;
5020 }
dc52c6c7 5021 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
c0c050c5 5022 /* Only RSS support for now TBD: COS & LB */
dc52c6c7
PS
5023 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5024 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5025 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5026 VNIC_CFG_REQ_ENABLES_MRU);
ae10ae74
MC
5027 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5028 req.rss_rule =
5029 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5030 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5031 VNIC_CFG_REQ_ENABLES_MRU);
5032 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
dc52c6c7
PS
5033 } else {
5034 req.rss_rule = cpu_to_le16(0xffff);
5035 }
94ce9caa 5036
dc52c6c7
PS
5037 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5038 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
94ce9caa
PS
5039 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5040 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5041 } else {
5042 req.cos_rule = cpu_to_le16(0xffff);
5043 }
5044
c0c050c5 5045 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
b81a90d3 5046 ring = 0;
c0c050c5 5047 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
b81a90d3 5048 ring = vnic_id - 1;
76595193
PS
5049 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5050 ring = bp->rx_nr_rings - 1;
c0c050c5 5051
b81a90d3 5052 grp_idx = bp->rx_ring[ring].bnapi->index;
c0c050c5 5053 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
c0c050c5 5054 req.lb_rule = cpu_to_le16(0xffff);
7b3af4f7 5055vnic_mru:
c0c050c5
MC
5056 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
5057 VLAN_HLEN);
5058
7b3af4f7 5059 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
cf6645f8
MC
5060#ifdef CONFIG_BNXT_SRIOV
5061 if (BNXT_VF(bp))
5062 def_vlan = bp->vf.vlan;
5063#endif
5064 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
c0c050c5 5065 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
a588e458 5066 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
abe93ad2 5067 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
c0c050c5
MC
5068
5069 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5070}
5071
5072static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5073{
5074 u32 rc = 0;
5075
5076 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5077 struct hwrm_vnic_free_input req = {0};
5078
5079 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
5080 req.vnic_id =
5081 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5082
5083 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
c0c050c5
MC
5084 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5085 }
5086 return rc;
5087}
5088
5089static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5090{
5091 u16 i;
5092
5093 for (i = 0; i < bp->nr_vnics; i++)
5094 bnxt_hwrm_vnic_free_one(bp, i);
5095}
5096
b81a90d3
MC
5097static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5098 unsigned int start_rx_ring_idx,
5099 unsigned int nr_rings)
c0c050c5 5100{
b81a90d3
MC
5101 int rc = 0;
5102 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
c0c050c5
MC
5103 struct hwrm_vnic_alloc_input req = {0};
5104 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
44c6f72a
MC
5105 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5106
5107 if (bp->flags & BNXT_FLAG_CHIP_P5)
5108 goto vnic_no_ring_grps;
c0c050c5
MC
5109
5110 /* map ring groups to this vnic */
b81a90d3
MC
5111 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5112 grp_idx = bp->rx_ring[i].bnapi->index;
5113 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
c0c050c5 5114 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
b81a90d3 5115 j, nr_rings);
c0c050c5
MC
5116 break;
5117 }
44c6f72a 5118 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
c0c050c5
MC
5119 }
5120
44c6f72a
MC
5121vnic_no_ring_grps:
5122 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5123 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
c0c050c5
MC
5124 if (vnic_id == 0)
5125 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5126
5127 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
5128
5129 mutex_lock(&bp->hwrm_cmd_lock);
5130 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5131 if (!rc)
44c6f72a 5132 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
c0c050c5
MC
5133 mutex_unlock(&bp->hwrm_cmd_lock);
5134 return rc;
5135}
5136
8fdefd63
MC
5137static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5138{
5139 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5140 struct hwrm_vnic_qcaps_input req = {0};
5141 int rc;
5142
fbbdbc64 5143 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
ba642ab7 5144 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
8fdefd63
MC
5145 if (bp->hwrm_spec_code < 0x10600)
5146 return 0;
5147
5148 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
5149 mutex_lock(&bp->hwrm_cmd_lock);
5150 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5151 if (!rc) {
abe93ad2
MC
5152 u32 flags = le32_to_cpu(resp->flags);
5153
41e8d798
MC
5154 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5155 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
8fdefd63 5156 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
abe93ad2
MC
5157 if (flags &
5158 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5159 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
79632e9b 5160 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
4e748506
MC
5161 if (bp->max_tpa_v2)
5162 bp->hw_ring_stats_size =
5163 sizeof(struct ctx_hw_stats_ext);
8fdefd63
MC
5164 }
5165 mutex_unlock(&bp->hwrm_cmd_lock);
5166 return rc;
5167}
5168
c0c050c5
MC
5169static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5170{
5171 u16 i;
5172 u32 rc = 0;
5173
44c6f72a
MC
5174 if (bp->flags & BNXT_FLAG_CHIP_P5)
5175 return 0;
5176
c0c050c5
MC
5177 mutex_lock(&bp->hwrm_cmd_lock);
5178 for (i = 0; i < bp->rx_nr_rings; i++) {
5179 struct hwrm_ring_grp_alloc_input req = {0};
5180 struct hwrm_ring_grp_alloc_output *resp =
5181 bp->hwrm_cmd_resp_addr;
b81a90d3 5182 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
c0c050c5
MC
5183
5184 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
5185
b81a90d3
MC
5186 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5187 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5188 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5189 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
c0c050c5
MC
5190
5191 rc = _hwrm_send_message(bp, &req, sizeof(req),
5192 HWRM_CMD_TIMEOUT);
5193 if (rc)
5194 break;
5195
b81a90d3
MC
5196 bp->grp_info[grp_idx].fw_grp_id =
5197 le32_to_cpu(resp->ring_group_id);
c0c050c5
MC
5198 }
5199 mutex_unlock(&bp->hwrm_cmd_lock);
5200 return rc;
5201}
5202
5203static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5204{
5205 u16 i;
5206 u32 rc = 0;
5207 struct hwrm_ring_grp_free_input req = {0};
5208
44c6f72a 5209 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
c0c050c5
MC
5210 return 0;
5211
5212 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
5213
5214 mutex_lock(&bp->hwrm_cmd_lock);
5215 for (i = 0; i < bp->cp_nr_rings; i++) {
5216 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5217 continue;
5218 req.ring_group_id =
5219 cpu_to_le32(bp->grp_info[i].fw_grp_id);
5220
5221 rc = _hwrm_send_message(bp, &req, sizeof(req),
5222 HWRM_CMD_TIMEOUT);
c0c050c5
MC
5223 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5224 }
5225 mutex_unlock(&bp->hwrm_cmd_lock);
5226 return rc;
5227}
5228
5229static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5230 struct bnxt_ring_struct *ring,
9899bb59 5231 u32 ring_type, u32 map_index)
c0c050c5
MC
5232{
5233 int rc = 0, err = 0;
5234 struct hwrm_ring_alloc_input req = {0};
5235 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6fe19886 5236 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
9899bb59 5237 struct bnxt_ring_grp_info *grp_info;
c0c050c5
MC
5238 u16 ring_id;
5239
5240 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
5241
5242 req.enables = 0;
6fe19886
MC
5243 if (rmem->nr_pages > 1) {
5244 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
c0c050c5
MC
5245 /* Page size is in log2 units */
5246 req.page_size = BNXT_PAGE_SHIFT;
5247 req.page_tbl_depth = 1;
5248 } else {
6fe19886 5249 req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
c0c050c5
MC
5250 }
5251 req.fbo = 0;
5252 /* Association of ring index with doorbell index and MSIX number */
5253 req.logical_id = cpu_to_le16(map_index);
5254
5255 switch (ring_type) {
2c61d211
MC
5256 case HWRM_RING_ALLOC_TX: {
5257 struct bnxt_tx_ring_info *txr;
5258
5259 txr = container_of(ring, struct bnxt_tx_ring_info,
5260 tx_ring_struct);
c0c050c5
MC
5261 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5262 /* Association of transmit ring with completion ring */
9899bb59 5263 grp_info = &bp->grp_info[ring->grp_idx];
2c61d211 5264 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
c0c050c5 5265 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
9899bb59 5266 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
c0c050c5
MC
5267 req.queue_id = cpu_to_le16(ring->queue_id);
5268 break;
2c61d211 5269 }
c0c050c5
MC
5270 case HWRM_RING_ALLOC_RX:
5271 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5272 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
23aefdd7
MC
5273 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5274 u16 flags = 0;
5275
5276 /* Association of rx ring with stats context */
5277 grp_info = &bp->grp_info[ring->grp_idx];
5278 req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5279 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5280 req.enables |= cpu_to_le32(
5281 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5282 if (NET_IP_ALIGN == 2)
5283 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5284 req.flags = cpu_to_le16(flags);
5285 }
c0c050c5
MC
5286 break;
5287 case HWRM_RING_ALLOC_AGG:
23aefdd7
MC
5288 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5289 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5290 /* Association of agg ring with rx ring */
5291 grp_info = &bp->grp_info[ring->grp_idx];
5292 req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5293 req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5294 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5295 req.enables |= cpu_to_le32(
5296 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5297 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5298 } else {
5299 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5300 }
c0c050c5
MC
5301 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5302 break;
5303 case HWRM_RING_ALLOC_CMPL:
bac9a7e0 5304 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
c0c050c5 5305 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
23aefdd7
MC
5306 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5307 /* Association of cp ring with nq */
5308 grp_info = &bp->grp_info[map_index];
5309 req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5310 req.cq_handle = cpu_to_le64(ring->handle);
5311 req.enables |= cpu_to_le32(
5312 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5313 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5314 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5315 }
5316 break;
5317 case HWRM_RING_ALLOC_NQ:
5318 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5319 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
c0c050c5
MC
5320 if (bp->flags & BNXT_FLAG_USING_MSIX)
5321 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5322 break;
5323 default:
5324 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5325 ring_type);
5326 return -1;
5327 }
5328
5329 mutex_lock(&bp->hwrm_cmd_lock);
5330 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5331 err = le16_to_cpu(resp->error_code);
5332 ring_id = le16_to_cpu(resp->ring_id);
5333 mutex_unlock(&bp->hwrm_cmd_lock);
5334
5335 if (rc || err) {
2727c888
MC
5336 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5337 ring_type, rc, err);
5338 return -EIO;
c0c050c5
MC
5339 }
5340 ring->fw_ring_id = ring_id;
5341 return rc;
5342}
5343
486b5c22
MC
5344static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5345{
5346 int rc;
5347
5348 if (BNXT_PF(bp)) {
5349 struct hwrm_func_cfg_input req = {0};
5350
5351 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5352 req.fid = cpu_to_le16(0xffff);
5353 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5354 req.async_event_cr = cpu_to_le16(idx);
5355 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5356 } else {
5357 struct hwrm_func_vf_cfg_input req = {0};
5358
5359 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
5360 req.enables =
5361 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5362 req.async_event_cr = cpu_to_le16(idx);
5363 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5364 }
5365 return rc;
5366}
5367
697197e5
MC
5368static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5369 u32 map_idx, u32 xid)
5370{
5371 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5372 if (BNXT_PF(bp))
5373 db->doorbell = bp->bar1 + 0x10000;
5374 else
5375 db->doorbell = bp->bar1 + 0x4000;
5376 switch (ring_type) {
5377 case HWRM_RING_ALLOC_TX:
5378 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5379 break;
5380 case HWRM_RING_ALLOC_RX:
5381 case HWRM_RING_ALLOC_AGG:
5382 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5383 break;
5384 case HWRM_RING_ALLOC_CMPL:
5385 db->db_key64 = DBR_PATH_L2;
5386 break;
5387 case HWRM_RING_ALLOC_NQ:
5388 db->db_key64 = DBR_PATH_L2;
5389 break;
5390 }
5391 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5392 } else {
5393 db->doorbell = bp->bar1 + map_idx * 0x80;
5394 switch (ring_type) {
5395 case HWRM_RING_ALLOC_TX:
5396 db->db_key32 = DB_KEY_TX;
5397 break;
5398 case HWRM_RING_ALLOC_RX:
5399 case HWRM_RING_ALLOC_AGG:
5400 db->db_key32 = DB_KEY_RX;
5401 break;
5402 case HWRM_RING_ALLOC_CMPL:
5403 db->db_key32 = DB_KEY_CP;
5404 break;
5405 }
5406 }
5407}
5408
c0c050c5
MC
5409static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5410{
e8f267b0 5411 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
c0c050c5 5412 int i, rc = 0;
697197e5 5413 u32 type;
c0c050c5 5414
23aefdd7
MC
5415 if (bp->flags & BNXT_FLAG_CHIP_P5)
5416 type = HWRM_RING_ALLOC_NQ;
5417 else
5418 type = HWRM_RING_ALLOC_CMPL;
edd0c2cc
MC
5419 for (i = 0; i < bp->cp_nr_rings; i++) {
5420 struct bnxt_napi *bnapi = bp->bnapi[i];
5421 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5422 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
9899bb59 5423 u32 map_idx = ring->map_idx;
5e66e35a 5424 unsigned int vector;
c0c050c5 5425
5e66e35a
MC
5426 vector = bp->irq_tbl[map_idx].vector;
5427 disable_irq_nosync(vector);
697197e5 5428 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5e66e35a
MC
5429 if (rc) {
5430 enable_irq(vector);
edd0c2cc 5431 goto err_out;
5e66e35a 5432 }
697197e5
MC
5433 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5434 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5e66e35a 5435 enable_irq(vector);
edd0c2cc 5436 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
486b5c22
MC
5437
5438 if (!i) {
5439 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5440 if (rc)
5441 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5442 }
c0c050c5
MC
5443 }
5444
697197e5 5445 type = HWRM_RING_ALLOC_TX;
edd0c2cc 5446 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 5447 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3e08b184
MC
5448 struct bnxt_ring_struct *ring;
5449 u32 map_idx;
c0c050c5 5450
3e08b184
MC
5451 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5452 struct bnxt_napi *bnapi = txr->bnapi;
5453 struct bnxt_cp_ring_info *cpr, *cpr2;
5454 u32 type2 = HWRM_RING_ALLOC_CMPL;
5455
5456 cpr = &bnapi->cp_ring;
5457 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5458 ring = &cpr2->cp_ring_struct;
5459 ring->handle = BNXT_TX_HDL;
5460 map_idx = bnapi->index;
5461 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5462 if (rc)
5463 goto err_out;
5464 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5465 ring->fw_ring_id);
5466 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5467 }
5468 ring = &txr->tx_ring_struct;
5469 map_idx = i;
697197e5 5470 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
edd0c2cc
MC
5471 if (rc)
5472 goto err_out;
697197e5 5473 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
c0c050c5
MC
5474 }
5475
697197e5 5476 type = HWRM_RING_ALLOC_RX;
edd0c2cc 5477 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5478 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 5479 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
3e08b184
MC
5480 struct bnxt_napi *bnapi = rxr->bnapi;
5481 u32 map_idx = bnapi->index;
c0c050c5 5482
697197e5 5483 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
edd0c2cc
MC
5484 if (rc)
5485 goto err_out;
697197e5 5486 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
e8f267b0
MC
5487 /* If we have agg rings, post agg buffers first. */
5488 if (!agg_rings)
5489 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
b81a90d3 5490 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
3e08b184
MC
5491 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5492 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5493 u32 type2 = HWRM_RING_ALLOC_CMPL;
5494 struct bnxt_cp_ring_info *cpr2;
5495
5496 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5497 ring = &cpr2->cp_ring_struct;
5498 ring->handle = BNXT_RX_HDL;
5499 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5500 if (rc)
5501 goto err_out;
5502 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5503 ring->fw_ring_id);
5504 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5505 }
c0c050c5
MC
5506 }
5507
e8f267b0 5508 if (agg_rings) {
697197e5 5509 type = HWRM_RING_ALLOC_AGG;
c0c050c5 5510 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5511 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
5512 struct bnxt_ring_struct *ring =
5513 &rxr->rx_agg_ring_struct;
9899bb59 5514 u32 grp_idx = ring->grp_idx;
b81a90d3 5515 u32 map_idx = grp_idx + bp->rx_nr_rings;
c0c050c5 5516
697197e5 5517 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
c0c050c5
MC
5518 if (rc)
5519 goto err_out;
5520
697197e5
MC
5521 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5522 ring->fw_ring_id);
5523 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
e8f267b0 5524 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
b81a90d3 5525 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
c0c050c5
MC
5526 }
5527 }
5528err_out:
5529 return rc;
5530}
5531
5532static int hwrm_ring_free_send_msg(struct bnxt *bp,
5533 struct bnxt_ring_struct *ring,
5534 u32 ring_type, int cmpl_ring_id)
5535{
5536 int rc;
5537 struct hwrm_ring_free_input req = {0};
5538 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5539 u16 error_code;
5540
b4fff207
MC
5541 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
5542 return 0;
5543
74608fc9 5544 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
c0c050c5
MC
5545 req.ring_type = ring_type;
5546 req.ring_id = cpu_to_le16(ring->fw_ring_id);
5547
5548 mutex_lock(&bp->hwrm_cmd_lock);
5549 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5550 error_code = le16_to_cpu(resp->error_code);
5551 mutex_unlock(&bp->hwrm_cmd_lock);
5552
5553 if (rc || error_code) {
2727c888
MC
5554 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5555 ring_type, rc, error_code);
5556 return -EIO;
c0c050c5
MC
5557 }
5558 return 0;
5559}
5560
edd0c2cc 5561static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
c0c050c5 5562{
23aefdd7 5563 u32 type;
edd0c2cc 5564 int i;
c0c050c5
MC
5565
5566 if (!bp->bnapi)
edd0c2cc 5567 return;
c0c050c5 5568
edd0c2cc 5569 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 5570 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
edd0c2cc 5571 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
edd0c2cc
MC
5572
5573 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
5574 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5575
edd0c2cc
MC
5576 hwrm_ring_free_send_msg(bp, ring,
5577 RING_FREE_REQ_RING_TYPE_TX,
5578 close_path ? cmpl_ring_id :
5579 INVALID_HW_RING_ID);
5580 ring->fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
5581 }
5582 }
5583
edd0c2cc 5584 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5585 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 5586 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
b81a90d3 5587 u32 grp_idx = rxr->bnapi->index;
edd0c2cc
MC
5588
5589 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
5590 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5591
edd0c2cc
MC
5592 hwrm_ring_free_send_msg(bp, ring,
5593 RING_FREE_REQ_RING_TYPE_RX,
5594 close_path ? cmpl_ring_id :
5595 INVALID_HW_RING_ID);
5596 ring->fw_ring_id = INVALID_HW_RING_ID;
b81a90d3
MC
5597 bp->grp_info[grp_idx].rx_fw_ring_id =
5598 INVALID_HW_RING_ID;
c0c050c5
MC
5599 }
5600 }
5601
23aefdd7
MC
5602 if (bp->flags & BNXT_FLAG_CHIP_P5)
5603 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5604 else
5605 type = RING_FREE_REQ_RING_TYPE_RX;
edd0c2cc 5606 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5607 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 5608 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
b81a90d3 5609 u32 grp_idx = rxr->bnapi->index;
edd0c2cc
MC
5610
5611 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
5612 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5613
23aefdd7 5614 hwrm_ring_free_send_msg(bp, ring, type,
edd0c2cc
MC
5615 close_path ? cmpl_ring_id :
5616 INVALID_HW_RING_ID);
5617 ring->fw_ring_id = INVALID_HW_RING_ID;
b81a90d3
MC
5618 bp->grp_info[grp_idx].agg_fw_ring_id =
5619 INVALID_HW_RING_ID;
c0c050c5
MC
5620 }
5621 }
5622
9d8bc097
MC
5623 /* The completion rings are about to be freed. After that the
5624 * IRQ doorbell will not work anymore. So we need to disable
5625 * IRQ here.
5626 */
5627 bnxt_disable_int_sync(bp);
5628
23aefdd7
MC
5629 if (bp->flags & BNXT_FLAG_CHIP_P5)
5630 type = RING_FREE_REQ_RING_TYPE_NQ;
5631 else
5632 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
edd0c2cc
MC
5633 for (i = 0; i < bp->cp_nr_rings; i++) {
5634 struct bnxt_napi *bnapi = bp->bnapi[i];
5635 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3e08b184
MC
5636 struct bnxt_ring_struct *ring;
5637 int j;
edd0c2cc 5638
3e08b184
MC
5639 for (j = 0; j < 2; j++) {
5640 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5641
5642 if (cpr2) {
5643 ring = &cpr2->cp_ring_struct;
5644 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5645 continue;
5646 hwrm_ring_free_send_msg(bp, ring,
5647 RING_FREE_REQ_RING_TYPE_L2_CMPL,
5648 INVALID_HW_RING_ID);
5649 ring->fw_ring_id = INVALID_HW_RING_ID;
5650 }
5651 }
5652 ring = &cpr->cp_ring_struct;
edd0c2cc 5653 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
23aefdd7 5654 hwrm_ring_free_send_msg(bp, ring, type,
edd0c2cc
MC
5655 INVALID_HW_RING_ID);
5656 ring->fw_ring_id = INVALID_HW_RING_ID;
5657 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
5658 }
5659 }
c0c050c5
MC
5660}
5661
41e8d798
MC
5662static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5663 bool shared);
5664
674f50a5
MC
5665static int bnxt_hwrm_get_rings(struct bnxt *bp)
5666{
5667 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5668 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5669 struct hwrm_func_qcfg_input req = {0};
5670 int rc;
5671
5672 if (bp->hwrm_spec_code < 0x10601)
5673 return 0;
5674
5675 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5676 req.fid = cpu_to_le16(0xffff);
5677 mutex_lock(&bp->hwrm_cmd_lock);
5678 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5679 if (rc) {
5680 mutex_unlock(&bp->hwrm_cmd_lock);
d4f1420d 5681 return rc;
674f50a5
MC
5682 }
5683
5684 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
f1ca94de 5685 if (BNXT_NEW_RM(bp)) {
674f50a5
MC
5686 u16 cp, stats;
5687
5688 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5689 hw_resc->resv_hw_ring_grps =
5690 le32_to_cpu(resp->alloc_hw_ring_grps);
5691 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
5692 cp = le16_to_cpu(resp->alloc_cmpl_rings);
5693 stats = le16_to_cpu(resp->alloc_stat_ctx);
75720e63 5694 hw_resc->resv_irqs = cp;
41e8d798
MC
5695 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5696 int rx = hw_resc->resv_rx_rings;
5697 int tx = hw_resc->resv_tx_rings;
5698
5699 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5700 rx >>= 1;
5701 if (cp < (rx + tx)) {
5702 bnxt_trim_rings(bp, &rx, &tx, cp, false);
5703 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5704 rx <<= 1;
5705 hw_resc->resv_rx_rings = rx;
5706 hw_resc->resv_tx_rings = tx;
5707 }
75720e63 5708 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
41e8d798
MC
5709 hw_resc->resv_hw_ring_grps = rx;
5710 }
674f50a5 5711 hw_resc->resv_cp_rings = cp;
780baad4 5712 hw_resc->resv_stat_ctxs = stats;
674f50a5
MC
5713 }
5714 mutex_unlock(&bp->hwrm_cmd_lock);
5715 return 0;
5716}
5717
391be5c2
MC
5718/* Caller must hold bp->hwrm_cmd_lock */
5719int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
5720{
5721 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5722 struct hwrm_func_qcfg_input req = {0};
5723 int rc;
5724
5725 if (bp->hwrm_spec_code < 0x10601)
5726 return 0;
5727
5728 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5729 req.fid = cpu_to_le16(fid);
5730 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5731 if (!rc)
5732 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5733
5734 return rc;
5735}
5736
41e8d798
MC
5737static bool bnxt_rfs_supported(struct bnxt *bp);
5738
4ed50ef4
MC
5739static void
5740__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
5741 int tx_rings, int rx_rings, int ring_grps,
780baad4 5742 int cp_rings, int stats, int vnics)
391be5c2 5743{
674f50a5 5744 u32 enables = 0;
391be5c2 5745
4ed50ef4
MC
5746 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
5747 req->fid = cpu_to_le16(0xffff);
674f50a5 5748 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
4ed50ef4 5749 req->num_tx_rings = cpu_to_le16(tx_rings);
f1ca94de 5750 if (BNXT_NEW_RM(bp)) {
674f50a5 5751 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
3f93cd3f 5752 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
41e8d798
MC
5753 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5754 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
5755 enables |= tx_rings + ring_grps ?
3f93cd3f 5756 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
5757 enables |= rx_rings ?
5758 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5759 } else {
5760 enables |= cp_rings ?
3f93cd3f 5761 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
5762 enables |= ring_grps ?
5763 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
5764 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5765 }
dbe80d44 5766 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
674f50a5 5767
4ed50ef4 5768 req->num_rx_rings = cpu_to_le16(rx_rings);
41e8d798
MC
5769 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5770 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5771 req->num_msix = cpu_to_le16(cp_rings);
5772 req->num_rsscos_ctxs =
5773 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5774 } else {
5775 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5776 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5777 req->num_rsscos_ctxs = cpu_to_le16(1);
5778 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
5779 bnxt_rfs_supported(bp))
5780 req->num_rsscos_ctxs =
5781 cpu_to_le16(ring_grps + 1);
5782 }
780baad4 5783 req->num_stat_ctxs = cpu_to_le16(stats);
4ed50ef4 5784 req->num_vnics = cpu_to_le16(vnics);
674f50a5 5785 }
4ed50ef4
MC
5786 req->enables = cpu_to_le32(enables);
5787}
5788
5789static void
5790__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
5791 struct hwrm_func_vf_cfg_input *req, int tx_rings,
5792 int rx_rings, int ring_grps, int cp_rings,
780baad4 5793 int stats, int vnics)
4ed50ef4
MC
5794{
5795 u32 enables = 0;
5796
5797 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
5798 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
41e8d798
MC
5799 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
5800 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
3f93cd3f 5801 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
41e8d798
MC
5802 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5803 enables |= tx_rings + ring_grps ?
3f93cd3f 5804 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
5805 } else {
5806 enables |= cp_rings ?
3f93cd3f 5807 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
5808 enables |= ring_grps ?
5809 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
5810 }
4ed50ef4 5811 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
41e8d798 5812 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
4ed50ef4 5813
41e8d798 5814 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
4ed50ef4
MC
5815 req->num_tx_rings = cpu_to_le16(tx_rings);
5816 req->num_rx_rings = cpu_to_le16(rx_rings);
41e8d798
MC
5817 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5818 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5819 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5820 } else {
5821 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5822 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5823 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
5824 }
780baad4 5825 req->num_stat_ctxs = cpu_to_le16(stats);
4ed50ef4
MC
5826 req->num_vnics = cpu_to_le16(vnics);
5827
5828 req->enables = cpu_to_le32(enables);
5829}
5830
5831static int
5832bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4 5833 int ring_grps, int cp_rings, int stats, int vnics)
4ed50ef4
MC
5834{
5835 struct hwrm_func_cfg_input req = {0};
5836 int rc;
5837
5838 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
780baad4 5839 cp_rings, stats, vnics);
4ed50ef4 5840 if (!req.enables)
391be5c2
MC
5841 return 0;
5842
674f50a5
MC
5843 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5844 if (rc)
d4f1420d 5845 return rc;
674f50a5
MC
5846
5847 if (bp->hwrm_spec_code < 0x10601)
5848 bp->hw_resc.resv_tx_rings = tx_rings;
5849
5850 rc = bnxt_hwrm_get_rings(bp);
5851 return rc;
5852}
5853
5854static int
5855bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4 5856 int ring_grps, int cp_rings, int stats, int vnics)
674f50a5
MC
5857{
5858 struct hwrm_func_vf_cfg_input req = {0};
674f50a5
MC
5859 int rc;
5860
f1ca94de 5861 if (!BNXT_NEW_RM(bp)) {
674f50a5 5862 bp->hw_resc.resv_tx_rings = tx_rings;
391be5c2 5863 return 0;
674f50a5 5864 }
391be5c2 5865
4ed50ef4 5866 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
780baad4 5867 cp_rings, stats, vnics);
391be5c2 5868 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
674f50a5 5869 if (rc)
d4f1420d 5870 return rc;
674f50a5
MC
5871
5872 rc = bnxt_hwrm_get_rings(bp);
5873 return rc;
5874}
5875
5876static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
780baad4 5877 int cp, int stat, int vnic)
674f50a5
MC
5878{
5879 if (BNXT_PF(bp))
780baad4
VV
5880 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
5881 vnic);
674f50a5 5882 else
780baad4
VV
5883 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
5884 vnic);
674f50a5
MC
5885}
5886
b16b6891 5887int bnxt_nq_rings_in_use(struct bnxt *bp)
08654eb2
MC
5888{
5889 int cp = bp->cp_nr_rings;
5890 int ulp_msix, ulp_base;
5891
5892 ulp_msix = bnxt_get_ulp_msix_num(bp);
5893 if (ulp_msix) {
5894 ulp_base = bnxt_get_ulp_msix_base(bp);
5895 cp += ulp_msix;
5896 if ((ulp_base + ulp_msix) > cp)
5897 cp = ulp_base + ulp_msix;
5898 }
5899 return cp;
5900}
5901
c0b8cda0
MC
5902static int bnxt_cp_rings_in_use(struct bnxt *bp)
5903{
5904 int cp;
5905
5906 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5907 return bnxt_nq_rings_in_use(bp);
5908
5909 cp = bp->tx_nr_rings + bp->rx_nr_rings;
5910 return cp;
5911}
5912
780baad4
VV
5913static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
5914{
d77b1ad8
MC
5915 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
5916 int cp = bp->cp_nr_rings;
5917
5918 if (!ulp_stat)
5919 return cp;
5920
5921 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
5922 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
5923
5924 return cp + ulp_stat;
780baad4
VV
5925}
5926
4e41dc5d
MC
5927static bool bnxt_need_reserve_rings(struct bnxt *bp)
5928{
5929 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
fbcfc8e4 5930 int cp = bnxt_cp_rings_in_use(bp);
c0b8cda0 5931 int nq = bnxt_nq_rings_in_use(bp);
780baad4 5932 int rx = bp->rx_nr_rings, stat;
4e41dc5d
MC
5933 int vnic = 1, grp = rx;
5934
5935 if (bp->hwrm_spec_code < 0x10601)
5936 return false;
5937
5938 if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
5939 return true;
5940
41e8d798 5941 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
4e41dc5d
MC
5942 vnic = rx + 1;
5943 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5944 rx <<= 1;
780baad4 5945 stat = bnxt_get_func_stat_ctxs(bp);
f1ca94de 5946 if (BNXT_NEW_RM(bp) &&
4e41dc5d 5947 (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
01989c6b 5948 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
41e8d798
MC
5949 (hw_resc->resv_hw_ring_grps != grp &&
5950 !(bp->flags & BNXT_FLAG_CHIP_P5))))
4e41dc5d 5951 return true;
01989c6b
MC
5952 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
5953 hw_resc->resv_irqs != nq)
5954 return true;
4e41dc5d
MC
5955 return false;
5956}
5957
674f50a5
MC
5958static int __bnxt_reserve_rings(struct bnxt *bp)
5959{
5960 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
c0b8cda0 5961 int cp = bnxt_nq_rings_in_use(bp);
674f50a5
MC
5962 int tx = bp->tx_nr_rings;
5963 int rx = bp->rx_nr_rings;
674f50a5 5964 int grp, rx_rings, rc;
780baad4 5965 int vnic = 1, stat;
674f50a5 5966 bool sh = false;
674f50a5 5967
4e41dc5d 5968 if (!bnxt_need_reserve_rings(bp))
674f50a5
MC
5969 return 0;
5970
5971 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5972 sh = true;
41e8d798 5973 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
674f50a5
MC
5974 vnic = rx + 1;
5975 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5976 rx <<= 1;
674f50a5 5977 grp = bp->rx_nr_rings;
780baad4 5978 stat = bnxt_get_func_stat_ctxs(bp);
674f50a5 5979
780baad4 5980 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
391be5c2
MC
5981 if (rc)
5982 return rc;
5983
674f50a5 5984 tx = hw_resc->resv_tx_rings;
f1ca94de 5985 if (BNXT_NEW_RM(bp)) {
674f50a5 5986 rx = hw_resc->resv_rx_rings;
c0b8cda0 5987 cp = hw_resc->resv_irqs;
674f50a5
MC
5988 grp = hw_resc->resv_hw_ring_grps;
5989 vnic = hw_resc->resv_vnics;
780baad4 5990 stat = hw_resc->resv_stat_ctxs;
674f50a5
MC
5991 }
5992
5993 rx_rings = rx;
5994 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
5995 if (rx >= 2) {
5996 rx_rings = rx >> 1;
5997 } else {
5998 if (netif_running(bp->dev))
5999 return -ENOMEM;
6000
6001 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6002 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6003 bp->dev->hw_features &= ~NETIF_F_LRO;
6004 bp->dev->features &= ~NETIF_F_LRO;
6005 bnxt_set_ring_params(bp);
6006 }
6007 }
6008 rx_rings = min_t(int, rx_rings, grp);
780baad4
VV
6009 cp = min_t(int, cp, bp->cp_nr_rings);
6010 if (stat > bnxt_get_ulp_stat_ctxs(bp))
6011 stat -= bnxt_get_ulp_stat_ctxs(bp);
6012 cp = min_t(int, cp, stat);
674f50a5
MC
6013 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6014 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6015 rx = rx_rings << 1;
6016 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6017 bp->tx_nr_rings = tx;
6018 bp->rx_nr_rings = rx_rings;
6019 bp->cp_nr_rings = cp;
6020
780baad4 6021 if (!tx || !rx || !cp || !grp || !vnic || !stat)
674f50a5
MC
6022 return -ENOMEM;
6023
391be5c2
MC
6024 return rc;
6025}
6026
8f23d638 6027static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
6028 int ring_grps, int cp_rings, int stats,
6029 int vnics)
98fdbe73 6030{
8f23d638 6031 struct hwrm_func_vf_cfg_input req = {0};
6fc2ffdf 6032 u32 flags;
98fdbe73
MC
6033 int rc;
6034
f1ca94de 6035 if (!BNXT_NEW_RM(bp))
98fdbe73
MC
6036 return 0;
6037
6fc2ffdf 6038 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
780baad4 6039 cp_rings, stats, vnics);
8f23d638
MC
6040 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6041 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6042 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8f23d638 6043 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
41e8d798
MC
6044 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6045 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6046 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6047 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8f23d638
MC
6048
6049 req.flags = cpu_to_le32(flags);
8f23d638 6050 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
d4f1420d 6051 return rc;
8f23d638
MC
6052}
6053
6054static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
6055 int ring_grps, int cp_rings, int stats,
6056 int vnics)
8f23d638
MC
6057{
6058 struct hwrm_func_cfg_input req = {0};
6fc2ffdf 6059 u32 flags;
8f23d638 6060 int rc;
98fdbe73 6061
6fc2ffdf 6062 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
780baad4 6063 cp_rings, stats, vnics);
8f23d638 6064 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
41e8d798 6065 if (BNXT_NEW_RM(bp)) {
8f23d638
MC
6066 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6067 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8f23d638
MC
6068 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6069 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
41e8d798 6070 if (bp->flags & BNXT_FLAG_CHIP_P5)
0b815023
MC
6071 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6072 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
41e8d798
MC
6073 else
6074 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6075 }
6fc2ffdf 6076
8f23d638 6077 req.flags = cpu_to_le32(flags);
98fdbe73 6078 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
d4f1420d 6079 return rc;
98fdbe73
MC
6080}
6081
8f23d638 6082static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
6083 int ring_grps, int cp_rings, int stats,
6084 int vnics)
8f23d638
MC
6085{
6086 if (bp->hwrm_spec_code < 0x10801)
6087 return 0;
6088
6089 if (BNXT_PF(bp))
6090 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
780baad4
VV
6091 ring_grps, cp_rings, stats,
6092 vnics);
8f23d638
MC
6093
6094 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
780baad4 6095 cp_rings, stats, vnics);
8f23d638
MC
6096}
6097
74706afa
MC
6098static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6099{
6100 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6101 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6102 struct hwrm_ring_aggint_qcaps_input req = {0};
6103 int rc;
6104
6105 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6106 coal_cap->num_cmpl_dma_aggr_max = 63;
6107 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6108 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6109 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6110 coal_cap->int_lat_tmr_min_max = 65535;
6111 coal_cap->int_lat_tmr_max_max = 65535;
6112 coal_cap->num_cmpl_aggr_int_max = 65535;
6113 coal_cap->timer_units = 80;
6114
6115 if (bp->hwrm_spec_code < 0x10902)
6116 return;
6117
6118 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
6119 mutex_lock(&bp->hwrm_cmd_lock);
6120 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6121 if (!rc) {
6122 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
58590c8d 6123 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
74706afa
MC
6124 coal_cap->num_cmpl_dma_aggr_max =
6125 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6126 coal_cap->num_cmpl_dma_aggr_during_int_max =
6127 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6128 coal_cap->cmpl_aggr_dma_tmr_max =
6129 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6130 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6131 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6132 coal_cap->int_lat_tmr_min_max =
6133 le16_to_cpu(resp->int_lat_tmr_min_max);
6134 coal_cap->int_lat_tmr_max_max =
6135 le16_to_cpu(resp->int_lat_tmr_max_max);
6136 coal_cap->num_cmpl_aggr_int_max =
6137 le16_to_cpu(resp->num_cmpl_aggr_int_max);
6138 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6139 }
6140 mutex_unlock(&bp->hwrm_cmd_lock);
6141}
6142
6143static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6144{
6145 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6146
6147 return usec * 1000 / coal_cap->timer_units;
6148}
6149
6150static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6151 struct bnxt_coal *hw_coal,
bb053f52
MC
6152 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6153{
74706afa
MC
6154 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6155 u32 cmpl_params = coal_cap->cmpl_params;
6156 u16 val, tmr, max, flags = 0;
f8503969
MC
6157
6158 max = hw_coal->bufs_per_record * 128;
6159 if (hw_coal->budget)
6160 max = hw_coal->bufs_per_record * hw_coal->budget;
74706afa 6161 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
f8503969
MC
6162
6163 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6164 req->num_cmpl_aggr_int = cpu_to_le16(val);
b153cbc5 6165
74706afa 6166 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
f8503969
MC
6167 req->num_cmpl_dma_aggr = cpu_to_le16(val);
6168
74706afa
MC
6169 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6170 coal_cap->num_cmpl_dma_aggr_during_int_max);
f8503969
MC
6171 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6172
74706afa
MC
6173 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6174 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
f8503969
MC
6175 req->int_lat_tmr_max = cpu_to_le16(tmr);
6176
6177 /* min timer set to 1/2 of interrupt timer */
74706afa
MC
6178 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6179 val = tmr / 2;
6180 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6181 req->int_lat_tmr_min = cpu_to_le16(val);
6182 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6183 }
f8503969
MC
6184
6185 /* buf timer set to 1/4 of interrupt timer */
74706afa 6186 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
f8503969
MC
6187 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6188
74706afa
MC
6189 if (cmpl_params &
6190 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6191 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6192 val = clamp_t(u16, tmr, 1,
6193 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6adc4601 6194 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
74706afa
MC
6195 req->enables |=
6196 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6197 }
f8503969 6198
74706afa
MC
6199 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6200 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6201 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6202 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
f8503969 6203 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
bb053f52 6204 req->flags = cpu_to_le16(flags);
74706afa 6205 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
bb053f52
MC
6206}
6207
58590c8d
MC
6208/* Caller holds bp->hwrm_cmd_lock */
6209static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6210 struct bnxt_coal *hw_coal)
6211{
6212 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
6213 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6214 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6215 u32 nq_params = coal_cap->nq_params;
6216 u16 tmr;
6217
6218 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6219 return 0;
6220
6221 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
6222 -1, -1);
6223 req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6224 req.flags =
6225 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6226
6227 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6228 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6229 req.int_lat_tmr_min = cpu_to_le16(tmr);
6230 req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6231 return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6232}
6233
6a8788f2
AG
6234int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6235{
6236 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
6237 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6238 struct bnxt_coal coal;
6a8788f2
AG
6239
6240 /* Tick values in micro seconds.
6241 * 1 coal_buf x bufs_per_record = 1 completion record.
6242 */
6243 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6244
6245 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6246 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6247
6248 if (!bnapi->rx_ring)
6249 return -ENODEV;
6250
6251 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6252 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6253
74706afa 6254 bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
6a8788f2 6255
2c61d211 6256 req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6a8788f2
AG
6257
6258 return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
6259 HWRM_CMD_TIMEOUT);
6260}
6261
c0c050c5
MC
6262int bnxt_hwrm_set_coal(struct bnxt *bp)
6263{
6264 int i, rc = 0;
dfc9c94a
MC
6265 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
6266 req_tx = {0}, *req;
c0c050c5 6267
dfc9c94a
MC
6268 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6269 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6270 bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
6271 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
c0c050c5 6272
74706afa
MC
6273 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
6274 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
c0c050c5
MC
6275
6276 mutex_lock(&bp->hwrm_cmd_lock);
6277 for (i = 0; i < bp->cp_nr_rings; i++) {
dfc9c94a 6278 struct bnxt_napi *bnapi = bp->bnapi[i];
58590c8d 6279 struct bnxt_coal *hw_coal;
2c61d211 6280 u16 ring_id;
c0c050c5 6281
dfc9c94a 6282 req = &req_rx;
2c61d211
MC
6283 if (!bnapi->rx_ring) {
6284 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
dfc9c94a 6285 req = &req_tx;
2c61d211
MC
6286 } else {
6287 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6288 }
6289 req->ring_id = cpu_to_le16(ring_id);
dfc9c94a
MC
6290
6291 rc = _hwrm_send_message(bp, req, sizeof(*req),
c0c050c5
MC
6292 HWRM_CMD_TIMEOUT);
6293 if (rc)
6294 break;
58590c8d
MC
6295
6296 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6297 continue;
6298
6299 if (bnapi->rx_ring && bnapi->tx_ring) {
6300 req = &req_tx;
6301 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6302 req->ring_id = cpu_to_le16(ring_id);
6303 rc = _hwrm_send_message(bp, req, sizeof(*req),
6304 HWRM_CMD_TIMEOUT);
6305 if (rc)
6306 break;
6307 }
6308 if (bnapi->rx_ring)
6309 hw_coal = &bp->rx_coal;
6310 else
6311 hw_coal = &bp->tx_coal;
6312 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
c0c050c5
MC
6313 }
6314 mutex_unlock(&bp->hwrm_cmd_lock);
6315 return rc;
6316}
6317
6318static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6319{
6320 int rc = 0, i;
6321 struct hwrm_stat_ctx_free_input req = {0};
6322
6323 if (!bp->bnapi)
6324 return 0;
6325
3e8060fa
PS
6326 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6327 return 0;
6328
c0c050c5
MC
6329 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
6330
6331 mutex_lock(&bp->hwrm_cmd_lock);
6332 for (i = 0; i < bp->cp_nr_rings; i++) {
6333 struct bnxt_napi *bnapi = bp->bnapi[i];
6334 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6335
6336 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6337 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6338
6339 rc = _hwrm_send_message(bp, &req, sizeof(req),
6340 HWRM_CMD_TIMEOUT);
c0c050c5
MC
6341
6342 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6343 }
6344 }
6345 mutex_unlock(&bp->hwrm_cmd_lock);
6346 return rc;
6347}
6348
6349static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6350{
6351 int rc = 0, i;
6352 struct hwrm_stat_ctx_alloc_input req = {0};
6353 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6354
3e8060fa
PS
6355 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6356 return 0;
6357
c0c050c5
MC
6358 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
6359
4e748506 6360 req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
51f30785 6361 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
c0c050c5
MC
6362
6363 mutex_lock(&bp->hwrm_cmd_lock);
6364 for (i = 0; i < bp->cp_nr_rings; i++) {
6365 struct bnxt_napi *bnapi = bp->bnapi[i];
6366 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6367
6368 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
6369
6370 rc = _hwrm_send_message(bp, &req, sizeof(req),
6371 HWRM_CMD_TIMEOUT);
6372 if (rc)
6373 break;
6374
6375 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6376
6377 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6378 }
6379 mutex_unlock(&bp->hwrm_cmd_lock);
89aa8445 6380 return rc;
c0c050c5
MC
6381}
6382
cf6645f8
MC
6383static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6384{
6385 struct hwrm_func_qcfg_input req = {0};
567b2abe 6386 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9315edca 6387 u16 flags;
cf6645f8
MC
6388 int rc;
6389
6390 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6391 req.fid = cpu_to_le16(0xffff);
6392 mutex_lock(&bp->hwrm_cmd_lock);
6393 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6394 if (rc)
6395 goto func_qcfg_exit;
6396
6397#ifdef CONFIG_BNXT_SRIOV
6398 if (BNXT_VF(bp)) {
cf6645f8
MC
6399 struct bnxt_vf_info *vf = &bp->vf;
6400
6401 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
230d1f0d
MC
6402 } else {
6403 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
cf6645f8
MC
6404 }
6405#endif
9315edca
MC
6406 flags = le16_to_cpu(resp->flags);
6407 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6408 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
97381a18 6409 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
9315edca 6410 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
97381a18 6411 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
9315edca
MC
6412 }
6413 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6414 bp->flags |= BNXT_FLAG_MULTI_HOST;
bc39f885 6415
567b2abe
SB
6416 switch (resp->port_partition_type) {
6417 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6418 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6419 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6420 bp->port_partition_type = resp->port_partition_type;
6421 break;
6422 }
32e8239c
MC
6423 if (bp->hwrm_spec_code < 0x10707 ||
6424 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6425 bp->br_mode = BRIDGE_MODE_VEB;
6426 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6427 bp->br_mode = BRIDGE_MODE_VEPA;
6428 else
6429 bp->br_mode = BRIDGE_MODE_UNDEF;
cf6645f8 6430
7eb9bb3a
MC
6431 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6432 if (!bp->max_mtu)
6433 bp->max_mtu = BNXT_MAX_MTU;
6434
cf6645f8
MC
6435func_qcfg_exit:
6436 mutex_unlock(&bp->hwrm_cmd_lock);
6437 return rc;
6438}
6439
98f04cf0
MC
6440static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6441{
6442 struct hwrm_func_backing_store_qcaps_input req = {0};
6443 struct hwrm_func_backing_store_qcaps_output *resp =
6444 bp->hwrm_cmd_resp_addr;
6445 int rc;
6446
6447 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6448 return 0;
6449
6450 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
6451 mutex_lock(&bp->hwrm_cmd_lock);
6452 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6453 if (!rc) {
6454 struct bnxt_ctx_pg_info *ctx_pg;
6455 struct bnxt_ctx_mem_info *ctx;
6456 int i;
6457
6458 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6459 if (!ctx) {
6460 rc = -ENOMEM;
6461 goto ctx_err;
6462 }
6463 ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL);
6464 if (!ctx_pg) {
6465 kfree(ctx);
6466 rc = -ENOMEM;
6467 goto ctx_err;
6468 }
6469 for (i = 0; i < bp->max_q + 1; i++, ctx_pg++)
6470 ctx->tqm_mem[i] = ctx_pg;
6471
6472 bp->ctx = ctx;
6473 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6474 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6475 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6476 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6477 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6478 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6479 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6480 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6481 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6482 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6483 ctx->vnic_max_vnic_entries =
6484 le16_to_cpu(resp->vnic_max_vnic_entries);
6485 ctx->vnic_max_ring_table_entries =
6486 le16_to_cpu(resp->vnic_max_ring_table_entries);
6487 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6488 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6489 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6490 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6491 ctx->tqm_min_entries_per_ring =
6492 le32_to_cpu(resp->tqm_min_entries_per_ring);
6493 ctx->tqm_max_entries_per_ring =
6494 le32_to_cpu(resp->tqm_max_entries_per_ring);
6495 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6496 if (!ctx->tqm_entries_multiple)
6497 ctx->tqm_entries_multiple = 1;
6498 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6499 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
53579e37
DS
6500 ctx->mrav_num_entries_units =
6501 le16_to_cpu(resp->mrav_num_entries_units);
98f04cf0
MC
6502 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6503 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
3be8136c 6504 ctx->ctx_kind_initializer = resp->ctx_kind_initializer;
98f04cf0
MC
6505 } else {
6506 rc = 0;
6507 }
6508ctx_err:
6509 mutex_unlock(&bp->hwrm_cmd_lock);
6510 return rc;
6511}
6512
1b9394e5
MC
6513static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6514 __le64 *pg_dir)
6515{
6516 u8 pg_size = 0;
6517
6518 if (BNXT_PAGE_SHIFT == 13)
6519 pg_size = 1 << 4;
6520 else if (BNXT_PAGE_SIZE == 16)
6521 pg_size = 2 << 4;
6522
6523 *pg_attr = pg_size;
08fe9d18
MC
6524 if (rmem->depth >= 1) {
6525 if (rmem->depth == 2)
6526 *pg_attr |= 2;
6527 else
6528 *pg_attr |= 1;
1b9394e5
MC
6529 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6530 } else {
6531 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6532 }
6533}
6534
6535#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
6536 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
6537 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
6538 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
6539 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
6540 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6541
6542static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6543{
6544 struct hwrm_func_backing_store_cfg_input req = {0};
6545 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6546 struct bnxt_ctx_pg_info *ctx_pg;
6547 __le32 *num_entries;
6548 __le64 *pg_dir;
53579e37 6549 u32 flags = 0;
1b9394e5
MC
6550 u8 *pg_attr;
6551 int i, rc;
6552 u32 ena;
6553
6554 if (!ctx)
6555 return 0;
6556
6557 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
6558 req.enables = cpu_to_le32(enables);
6559
6560 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
6561 ctx_pg = &ctx->qp_mem;
6562 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
6563 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
6564 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
6565 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
6566 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6567 &req.qpc_pg_size_qpc_lvl,
6568 &req.qpc_page_dir);
6569 }
6570 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
6571 ctx_pg = &ctx->srq_mem;
6572 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
6573 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
6574 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
6575 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6576 &req.srq_pg_size_srq_lvl,
6577 &req.srq_page_dir);
6578 }
6579 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
6580 ctx_pg = &ctx->cq_mem;
6581 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
6582 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
6583 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
6584 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
6585 &req.cq_page_dir);
6586 }
6587 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
6588 ctx_pg = &ctx->vnic_mem;
6589 req.vnic_num_vnic_entries =
6590 cpu_to_le16(ctx->vnic_max_vnic_entries);
6591 req.vnic_num_ring_table_entries =
6592 cpu_to_le16(ctx->vnic_max_ring_table_entries);
6593 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
6594 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6595 &req.vnic_pg_size_vnic_lvl,
6596 &req.vnic_page_dir);
6597 }
6598 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
6599 ctx_pg = &ctx->stat_mem;
6600 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
6601 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
6602 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6603 &req.stat_pg_size_stat_lvl,
6604 &req.stat_page_dir);
6605 }
cf6daed0
MC
6606 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
6607 ctx_pg = &ctx->mrav_mem;
6608 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
53579e37
DS
6609 if (ctx->mrav_num_entries_units)
6610 flags |=
6611 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
cf6daed0
MC
6612 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
6613 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6614 &req.mrav_pg_size_mrav_lvl,
6615 &req.mrav_page_dir);
6616 }
6617 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
6618 ctx_pg = &ctx->tim_mem;
6619 req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
6620 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
6621 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6622 &req.tim_pg_size_tim_lvl,
6623 &req.tim_page_dir);
6624 }
1b9394e5
MC
6625 for (i = 0, num_entries = &req.tqm_sp_num_entries,
6626 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
6627 pg_dir = &req.tqm_sp_page_dir,
6628 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
6629 i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
6630 if (!(enables & ena))
6631 continue;
6632
6633 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
6634 ctx_pg = ctx->tqm_mem[i];
6635 *num_entries = cpu_to_le32(ctx_pg->entries);
6636 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
6637 }
53579e37 6638 req.flags = cpu_to_le32(flags);
1b9394e5 6639 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1b9394e5
MC
6640 return rc;
6641}
6642
98f04cf0 6643static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
08fe9d18 6644 struct bnxt_ctx_pg_info *ctx_pg)
98f04cf0
MC
6645{
6646 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6647
98f04cf0
MC
6648 rmem->page_size = BNXT_PAGE_SIZE;
6649 rmem->pg_arr = ctx_pg->ctx_pg_arr;
6650 rmem->dma_arr = ctx_pg->ctx_dma_arr;
1b9394e5 6651 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
08fe9d18
MC
6652 if (rmem->depth >= 1)
6653 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
98f04cf0
MC
6654 return bnxt_alloc_ring(bp, rmem);
6655}
6656
08fe9d18
MC
6657static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
6658 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
3be8136c 6659 u8 depth, bool use_init_val)
08fe9d18
MC
6660{
6661 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6662 int rc;
6663
6664 if (!mem_size)
6665 return 0;
6666
6667 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6668 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
6669 ctx_pg->nr_pages = 0;
6670 return -EINVAL;
6671 }
6672 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
6673 int nr_tbls, i;
6674
6675 rmem->depth = 2;
6676 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
6677 GFP_KERNEL);
6678 if (!ctx_pg->ctx_pg_tbl)
6679 return -ENOMEM;
6680 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
6681 rmem->nr_pages = nr_tbls;
6682 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6683 if (rc)
6684 return rc;
6685 for (i = 0; i < nr_tbls; i++) {
6686 struct bnxt_ctx_pg_info *pg_tbl;
6687
6688 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
6689 if (!pg_tbl)
6690 return -ENOMEM;
6691 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
6692 rmem = &pg_tbl->ring_mem;
6693 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
6694 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
6695 rmem->depth = 1;
6696 rmem->nr_pages = MAX_CTX_PAGES;
3be8136c
MC
6697 if (use_init_val)
6698 rmem->init_val = bp->ctx->ctx_kind_initializer;
6ef982de
MC
6699 if (i == (nr_tbls - 1)) {
6700 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
6701
6702 if (rem)
6703 rmem->nr_pages = rem;
6704 }
08fe9d18
MC
6705 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
6706 if (rc)
6707 break;
6708 }
6709 } else {
6710 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6711 if (rmem->nr_pages > 1 || depth)
6712 rmem->depth = 1;
3be8136c
MC
6713 if (use_init_val)
6714 rmem->init_val = bp->ctx->ctx_kind_initializer;
08fe9d18
MC
6715 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6716 }
6717 return rc;
6718}
6719
6720static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
6721 struct bnxt_ctx_pg_info *ctx_pg)
6722{
6723 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6724
6725 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
6726 ctx_pg->ctx_pg_tbl) {
6727 int i, nr_tbls = rmem->nr_pages;
6728
6729 for (i = 0; i < nr_tbls; i++) {
6730 struct bnxt_ctx_pg_info *pg_tbl;
6731 struct bnxt_ring_mem_info *rmem2;
6732
6733 pg_tbl = ctx_pg->ctx_pg_tbl[i];
6734 if (!pg_tbl)
6735 continue;
6736 rmem2 = &pg_tbl->ring_mem;
6737 bnxt_free_ring(bp, rmem2);
6738 ctx_pg->ctx_pg_arr[i] = NULL;
6739 kfree(pg_tbl);
6740 ctx_pg->ctx_pg_tbl[i] = NULL;
6741 }
6742 kfree(ctx_pg->ctx_pg_tbl);
6743 ctx_pg->ctx_pg_tbl = NULL;
6744 }
6745 bnxt_free_ring(bp, rmem);
6746 ctx_pg->nr_pages = 0;
6747}
6748
98f04cf0
MC
6749static void bnxt_free_ctx_mem(struct bnxt *bp)
6750{
6751 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6752 int i;
6753
6754 if (!ctx)
6755 return;
6756
6757 if (ctx->tqm_mem[0]) {
6758 for (i = 0; i < bp->max_q + 1; i++)
08fe9d18 6759 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
98f04cf0
MC
6760 kfree(ctx->tqm_mem[0]);
6761 ctx->tqm_mem[0] = NULL;
6762 }
6763
cf6daed0
MC
6764 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
6765 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
08fe9d18
MC
6766 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
6767 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
6768 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
6769 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
6770 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
98f04cf0
MC
6771 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
6772}
6773
6774static int bnxt_alloc_ctx_mem(struct bnxt *bp)
6775{
6776 struct bnxt_ctx_pg_info *ctx_pg;
6777 struct bnxt_ctx_mem_info *ctx;
1b9394e5 6778 u32 mem_size, ena, entries;
53579e37 6779 u32 num_mr, num_ah;
cf6daed0
MC
6780 u32 extra_srqs = 0;
6781 u32 extra_qps = 0;
6782 u8 pg_lvl = 1;
98f04cf0
MC
6783 int i, rc;
6784
6785 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
6786 if (rc) {
6787 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
6788 rc);
6789 return rc;
6790 }
6791 ctx = bp->ctx;
6792 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
6793 return 0;
6794
d629522e 6795 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
cf6daed0
MC
6796 pg_lvl = 2;
6797 extra_qps = 65536;
6798 extra_srqs = 8192;
6799 }
6800
98f04cf0 6801 ctx_pg = &ctx->qp_mem;
cf6daed0
MC
6802 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
6803 extra_qps;
98f04cf0 6804 mem_size = ctx->qp_entry_size * ctx_pg->entries;
3be8136c 6805 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
98f04cf0
MC
6806 if (rc)
6807 return rc;
6808
6809 ctx_pg = &ctx->srq_mem;
cf6daed0 6810 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
98f04cf0 6811 mem_size = ctx->srq_entry_size * ctx_pg->entries;
3be8136c 6812 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
98f04cf0
MC
6813 if (rc)
6814 return rc;
6815
6816 ctx_pg = &ctx->cq_mem;
cf6daed0 6817 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
98f04cf0 6818 mem_size = ctx->cq_entry_size * ctx_pg->entries;
3be8136c 6819 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
98f04cf0
MC
6820 if (rc)
6821 return rc;
6822
6823 ctx_pg = &ctx->vnic_mem;
6824 ctx_pg->entries = ctx->vnic_max_vnic_entries +
6825 ctx->vnic_max_ring_table_entries;
6826 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
3be8136c 6827 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
98f04cf0
MC
6828 if (rc)
6829 return rc;
6830
6831 ctx_pg = &ctx->stat_mem;
6832 ctx_pg->entries = ctx->stat_max_entries;
6833 mem_size = ctx->stat_entry_size * ctx_pg->entries;
3be8136c 6834 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
98f04cf0
MC
6835 if (rc)
6836 return rc;
6837
cf6daed0
MC
6838 ena = 0;
6839 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
6840 goto skip_rdma;
6841
6842 ctx_pg = &ctx->mrav_mem;
53579e37
DS
6843 /* 128K extra is needed to accommodate static AH context
6844 * allocation by f/w.
6845 */
6846 num_mr = 1024 * 256;
6847 num_ah = 1024 * 128;
6848 ctx_pg->entries = num_mr + num_ah;
cf6daed0 6849 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
3be8136c 6850 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true);
cf6daed0
MC
6851 if (rc)
6852 return rc;
6853 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
53579e37
DS
6854 if (ctx->mrav_num_entries_units)
6855 ctx_pg->entries =
6856 ((num_mr / ctx->mrav_num_entries_units) << 16) |
6857 (num_ah / ctx->mrav_num_entries_units);
cf6daed0
MC
6858
6859 ctx_pg = &ctx->tim_mem;
6860 ctx_pg->entries = ctx->qp_mem.entries;
6861 mem_size = ctx->tim_entry_size * ctx_pg->entries;
3be8136c 6862 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
cf6daed0
MC
6863 if (rc)
6864 return rc;
6865 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
6866
6867skip_rdma:
6868 entries = ctx->qp_max_l2_entries + extra_qps;
98f04cf0
MC
6869 entries = roundup(entries, ctx->tqm_entries_multiple);
6870 entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
6871 ctx->tqm_max_entries_per_ring);
cf6daed0 6872 for (i = 0; i < bp->max_q + 1; i++) {
98f04cf0
MC
6873 ctx_pg = ctx->tqm_mem[i];
6874 ctx_pg->entries = entries;
6875 mem_size = ctx->tqm_entry_size * entries;
3be8136c 6876 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
98f04cf0
MC
6877 if (rc)
6878 return rc;
1b9394e5 6879 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
98f04cf0 6880 }
1b9394e5
MC
6881 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
6882 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
6883 if (rc)
6884 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
6885 rc);
6886 else
6887 ctx->flags |= BNXT_CTX_FLAG_INITED;
6888
98f04cf0
MC
6889 return 0;
6890}
6891
db4723b3 6892int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
be0dd9c4
MC
6893{
6894 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6895 struct hwrm_func_resource_qcaps_input req = {0};
6896 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6897 int rc;
6898
6899 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
6900 req.fid = cpu_to_le16(0xffff);
6901
6902 mutex_lock(&bp->hwrm_cmd_lock);
351cbde9
JT
6903 rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
6904 HWRM_CMD_TIMEOUT);
d4f1420d 6905 if (rc)
be0dd9c4 6906 goto hwrm_func_resc_qcaps_exit;
be0dd9c4 6907
db4723b3
MC
6908 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
6909 if (!all)
6910 goto hwrm_func_resc_qcaps_exit;
6911
be0dd9c4
MC
6912 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
6913 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6914 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
6915 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6916 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
6917 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6918 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
6919 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6920 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
6921 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
6922 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
6923 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6924 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
6925 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6926 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
6927 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6928
9c1fabdf
MC
6929 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6930 u16 max_msix = le16_to_cpu(resp->max_msix);
6931
f7588cd8 6932 hw_resc->max_nqs = max_msix;
9c1fabdf
MC
6933 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
6934 }
6935
4673d664
MC
6936 if (BNXT_PF(bp)) {
6937 struct bnxt_pf_info *pf = &bp->pf;
6938
6939 pf->vf_resv_strategy =
6940 le16_to_cpu(resp->vf_reservation_strategy);
bf82736d 6941 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
4673d664
MC
6942 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
6943 }
be0dd9c4
MC
6944hwrm_func_resc_qcaps_exit:
6945 mutex_unlock(&bp->hwrm_cmd_lock);
6946 return rc;
6947}
6948
6949static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
c0c050c5
MC
6950{
6951 int rc = 0;
6952 struct hwrm_func_qcaps_input req = {0};
6953 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6a4f2947
MC
6954 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6955 u32 flags;
c0c050c5
MC
6956
6957 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
6958 req.fid = cpu_to_le16(0xffff);
6959
6960 mutex_lock(&bp->hwrm_cmd_lock);
6961 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6962 if (rc)
6963 goto hwrm_func_qcaps_exit;
6964
6a4f2947
MC
6965 flags = le32_to_cpu(resp->flags);
6966 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
e4060d30 6967 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
6a4f2947 6968 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
e4060d30 6969 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
55e4398d
VV
6970 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
6971 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
0a3f4e4f
VV
6972 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
6973 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
6154532f
VV
6974 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
6975 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
07f83d72
MC
6976 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
6977 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
4037eb71
VV
6978 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
6979 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
e4060d30 6980
7cc5a20e 6981 bp->tx_push_thresh = 0;
6a4f2947 6982 if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
7cc5a20e
MC
6983 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
6984
6a4f2947
MC
6985 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6986 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6987 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6988 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6989 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
6990 if (!hw_resc->max_hw_ring_grps)
6991 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
6992 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6993 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6994 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6995
c0c050c5
MC
6996 if (BNXT_PF(bp)) {
6997 struct bnxt_pf_info *pf = &bp->pf;
6998
6999 pf->fw_fid = le16_to_cpu(resp->fid);
7000 pf->port_id = le16_to_cpu(resp->port_id);
11f15ed3 7001 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
c0c050c5
MC
7002 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7003 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7004 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7005 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7006 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7007 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7008 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7009 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
ba642ab7 7010 bp->flags &= ~BNXT_FLAG_WOL_CAP;
6a4f2947 7011 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
c1ef146a 7012 bp->flags |= BNXT_FLAG_WOL_CAP;
c0c050c5 7013 } else {
379a80a1 7014#ifdef CONFIG_BNXT_SRIOV
c0c050c5
MC
7015 struct bnxt_vf_info *vf = &bp->vf;
7016
7017 vf->fw_fid = le16_to_cpu(resp->fid);
7cc5a20e 7018 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
379a80a1 7019#endif
c0c050c5
MC
7020 }
7021
c0c050c5
MC
7022hwrm_func_qcaps_exit:
7023 mutex_unlock(&bp->hwrm_cmd_lock);
7024 return rc;
7025}
7026
804fba4e
MC
7027static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7028
be0dd9c4
MC
7029static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7030{
7031 int rc;
7032
7033 rc = __bnxt_hwrm_func_qcaps(bp);
7034 if (rc)
7035 return rc;
804fba4e
MC
7036 rc = bnxt_hwrm_queue_qportcfg(bp);
7037 if (rc) {
7038 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7039 return rc;
7040 }
be0dd9c4 7041 if (bp->hwrm_spec_code >= 0x10803) {
98f04cf0
MC
7042 rc = bnxt_alloc_ctx_mem(bp);
7043 if (rc)
7044 return rc;
db4723b3 7045 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
be0dd9c4 7046 if (!rc)
97381a18 7047 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
be0dd9c4
MC
7048 }
7049 return 0;
7050}
7051
e969ae5b
MC
7052static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7053{
7054 struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
7055 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7056 int rc = 0;
7057 u32 flags;
7058
7059 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7060 return 0;
7061
7062 resp = bp->hwrm_cmd_resp_addr;
7063 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
7064
7065 mutex_lock(&bp->hwrm_cmd_lock);
7066 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7067 if (rc)
7068 goto hwrm_cfa_adv_qcaps_exit;
7069
7070 flags = le32_to_cpu(resp->flags);
7071 if (flags &
41136ab3
MC
7072 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7073 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
e969ae5b
MC
7074
7075hwrm_cfa_adv_qcaps_exit:
7076 mutex_unlock(&bp->hwrm_cmd_lock);
7077 return rc;
7078}
7079
9ffbd677
MC
7080static int bnxt_map_fw_health_regs(struct bnxt *bp)
7081{
7082 struct bnxt_fw_health *fw_health = bp->fw_health;
7083 u32 reg_base = 0xffffffff;
7084 int i;
7085
7086 /* Only pre-map the monitoring GRC registers using window 3 */
7087 for (i = 0; i < 4; i++) {
7088 u32 reg = fw_health->regs[i];
7089
7090 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7091 continue;
7092 if (reg_base == 0xffffffff)
7093 reg_base = reg & BNXT_GRC_BASE_MASK;
7094 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7095 return -ERANGE;
7096 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_BASE +
7097 (reg & BNXT_GRC_OFFSET_MASK);
7098 }
7099 if (reg_base == 0xffffffff)
7100 return 0;
7101
7102 writel(reg_base, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7103 BNXT_FW_HEALTH_WIN_MAP_OFF);
7104 return 0;
7105}
7106
07f83d72
MC
7107static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7108{
7109 struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7110 struct bnxt_fw_health *fw_health = bp->fw_health;
7111 struct hwrm_error_recovery_qcfg_input req = {0};
7112 int rc, i;
7113
7114 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7115 return 0;
7116
7117 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
7118 mutex_lock(&bp->hwrm_cmd_lock);
7119 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7120 if (rc)
7121 goto err_recovery_out;
07f83d72
MC
7122 fw_health->flags = le32_to_cpu(resp->flags);
7123 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7124 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7125 rc = -EINVAL;
7126 goto err_recovery_out;
7127 }
7128 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7129 fw_health->master_func_wait_dsecs =
7130 le32_to_cpu(resp->master_func_wait_period);
7131 fw_health->normal_func_wait_dsecs =
7132 le32_to_cpu(resp->normal_func_wait_period);
7133 fw_health->post_reset_wait_dsecs =
7134 le32_to_cpu(resp->master_func_wait_period_after_reset);
7135 fw_health->post_reset_max_wait_dsecs =
7136 le32_to_cpu(resp->max_bailout_time_after_reset);
7137 fw_health->regs[BNXT_FW_HEALTH_REG] =
7138 le32_to_cpu(resp->fw_health_status_reg);
7139 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7140 le32_to_cpu(resp->fw_heartbeat_reg);
7141 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7142 le32_to_cpu(resp->fw_reset_cnt_reg);
7143 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7144 le32_to_cpu(resp->reset_inprogress_reg);
7145 fw_health->fw_reset_inprog_reg_mask =
7146 le32_to_cpu(resp->reset_inprogress_reg_mask);
7147 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7148 if (fw_health->fw_reset_seq_cnt >= 16) {
7149 rc = -EINVAL;
7150 goto err_recovery_out;
7151 }
7152 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7153 fw_health->fw_reset_seq_regs[i] =
7154 le32_to_cpu(resp->reset_reg[i]);
7155 fw_health->fw_reset_seq_vals[i] =
7156 le32_to_cpu(resp->reset_reg_val[i]);
7157 fw_health->fw_reset_seq_delay_msec[i] =
7158 resp->delay_after_reset[i];
7159 }
7160err_recovery_out:
7161 mutex_unlock(&bp->hwrm_cmd_lock);
9ffbd677
MC
7162 if (!rc)
7163 rc = bnxt_map_fw_health_regs(bp);
07f83d72
MC
7164 if (rc)
7165 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7166 return rc;
7167}
7168
c0c050c5
MC
7169static int bnxt_hwrm_func_reset(struct bnxt *bp)
7170{
7171 struct hwrm_func_reset_input req = {0};
7172
7173 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
7174 req.enables = 0;
7175
7176 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
7177}
7178
7179static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7180{
7181 int rc = 0;
7182 struct hwrm_queue_qportcfg_input req = {0};
7183 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
aabfc016
MC
7184 u8 i, j, *qptr;
7185 bool no_rdma;
c0c050c5
MC
7186
7187 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
7188
7189 mutex_lock(&bp->hwrm_cmd_lock);
7190 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7191 if (rc)
7192 goto qportcfg_exit;
7193
7194 if (!resp->max_configurable_queues) {
7195 rc = -EINVAL;
7196 goto qportcfg_exit;
7197 }
7198 bp->max_tc = resp->max_configurable_queues;
87c374de 7199 bp->max_lltc = resp->max_configurable_lossless_queues;
c0c050c5
MC
7200 if (bp->max_tc > BNXT_MAX_QUEUE)
7201 bp->max_tc = BNXT_MAX_QUEUE;
7202
aabfc016
MC
7203 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7204 qptr = &resp->queue_id0;
7205 for (i = 0, j = 0; i < bp->max_tc; i++) {
98f04cf0
MC
7206 bp->q_info[j].queue_id = *qptr;
7207 bp->q_ids[i] = *qptr++;
aabfc016
MC
7208 bp->q_info[j].queue_profile = *qptr++;
7209 bp->tc_to_qidx[j] = j;
7210 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7211 (no_rdma && BNXT_PF(bp)))
7212 j++;
7213 }
98f04cf0 7214 bp->max_q = bp->max_tc;
aabfc016
MC
7215 bp->max_tc = max_t(u8, j, 1);
7216
441cabbb
MC
7217 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7218 bp->max_tc = 1;
7219
87c374de
MC
7220 if (bp->max_lltc > bp->max_tc)
7221 bp->max_lltc = bp->max_tc;
7222
c0c050c5
MC
7223qportcfg_exit:
7224 mutex_unlock(&bp->hwrm_cmd_lock);
7225 return rc;
7226}
7227
ba642ab7 7228static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
c0c050c5 7229{
c0c050c5 7230 struct hwrm_ver_get_input req = {0};
ba642ab7 7231 int rc;
c0c050c5
MC
7232
7233 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
7234 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
7235 req.hwrm_intf_min = HWRM_VERSION_MINOR;
7236 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
ba642ab7
MC
7237
7238 rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT,
7239 silent);
7240 return rc;
7241}
7242
7243static int bnxt_hwrm_ver_get(struct bnxt *bp)
7244{
7245 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
7246 u32 dev_caps_cfg;
7247 int rc;
7248
7249 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
c0c050c5 7250 mutex_lock(&bp->hwrm_cmd_lock);
ba642ab7 7251 rc = __bnxt_hwrm_ver_get(bp, false);
c0c050c5
MC
7252 if (rc)
7253 goto hwrm_ver_get_exit;
7254
7255 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7256
894aa69a
MC
7257 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7258 resp->hwrm_intf_min_8b << 8 |
7259 resp->hwrm_intf_upd_8b;
7260 if (resp->hwrm_intf_maj_8b < 1) {
c193554e 7261 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
894aa69a
MC
7262 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7263 resp->hwrm_intf_upd_8b);
c193554e 7264 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
c0c050c5 7265 }
431aa1eb 7266 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
894aa69a
MC
7267 resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
7268 resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
c0c050c5 7269
691aa620
VV
7270 if (strlen(resp->active_pkg_name)) {
7271 int fw_ver_len = strlen(bp->fw_ver_str);
7272
7273 snprintf(bp->fw_ver_str + fw_ver_len,
7274 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
7275 resp->active_pkg_name);
7276 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
7277 }
7278
ff4fe81d
MC
7279 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
7280 if (!bp->hwrm_cmd_timeout)
7281 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
7282
1dfddc41 7283 if (resp->hwrm_intf_maj_8b >= 1) {
e6ef2699 7284 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
1dfddc41
MC
7285 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
7286 }
7287 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
7288 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
e6ef2699 7289
659c805c 7290 bp->chip_num = le16_to_cpu(resp->chip_num);
5313845f 7291 bp->chip_rev = resp->chip_rev;
3e8060fa
PS
7292 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
7293 !resp->chip_metal)
7294 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
659c805c 7295
e605db80
DK
7296 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
7297 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
7298 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
97381a18 7299 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
e605db80 7300
760b6d33
VD
7301 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
7302 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
7303
abd43a13
VD
7304 if (dev_caps_cfg &
7305 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
7306 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
7307
2a516444
MC
7308 if (dev_caps_cfg &
7309 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
7310 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
7311
e969ae5b
MC
7312 if (dev_caps_cfg &
7313 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
7314 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
7315
c0c050c5
MC
7316hwrm_ver_get_exit:
7317 mutex_unlock(&bp->hwrm_cmd_lock);
7318 return rc;
7319}
7320
5ac67d8b
RS
7321int bnxt_hwrm_fw_set_time(struct bnxt *bp)
7322{
7323 struct hwrm_fw_set_time_input req = {0};
7dfaa7bc
AB
7324 struct tm tm;
7325 time64_t now = ktime_get_real_seconds();
5ac67d8b 7326
ca2c39e2
MC
7327 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
7328 bp->hwrm_spec_code < 0x10400)
5ac67d8b
RS
7329 return -EOPNOTSUPP;
7330
7dfaa7bc 7331 time64_to_tm(now, 0, &tm);
5ac67d8b
RS
7332 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
7333 req.year = cpu_to_le16(1900 + tm.tm_year);
7334 req.month = 1 + tm.tm_mon;
7335 req.day = tm.tm_mday;
7336 req.hour = tm.tm_hour;
7337 req.minute = tm.tm_min;
7338 req.second = tm.tm_sec;
7339 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7340}
7341
3bdf56c4
MC
7342static int bnxt_hwrm_port_qstats(struct bnxt *bp)
7343{
7344 int rc;
7345 struct bnxt_pf_info *pf = &bp->pf;
7346 struct hwrm_port_qstats_input req = {0};
7347
7348 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
7349 return 0;
7350
7351 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
7352 req.port_id = cpu_to_le16(pf->port_id);
7353 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
7354 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
7355 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7356 return rc;
7357}
7358
00db3cba
VV
7359static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
7360{
36e53349 7361 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
e37fed79 7362 struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
00db3cba
VV
7363 struct hwrm_port_qstats_ext_input req = {0};
7364 struct bnxt_pf_info *pf = &bp->pf;
ad361adf 7365 u32 tx_stat_size;
36e53349 7366 int rc;
00db3cba
VV
7367
7368 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
7369 return 0;
7370
7371 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
7372 req.port_id = cpu_to_le16(pf->port_id);
7373 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
7374 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
ad361adf
MC
7375 tx_stat_size = bp->hw_tx_port_stats_ext ?
7376 sizeof(*bp->hw_tx_port_stats_ext) : 0;
7377 req.tx_stat_size = cpu_to_le16(tx_stat_size);
36e53349
MC
7378 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
7379 mutex_lock(&bp->hwrm_cmd_lock);
7380 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7381 if (!rc) {
7382 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
ad361adf
MC
7383 bp->fw_tx_stats_ext_size = tx_stat_size ?
7384 le16_to_cpu(resp->tx_stat_size) / 8 : 0;
36e53349
MC
7385 } else {
7386 bp->fw_rx_stats_ext_size = 0;
7387 bp->fw_tx_stats_ext_size = 0;
7388 }
e37fed79
MC
7389 if (bp->fw_tx_stats_ext_size <=
7390 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
7391 mutex_unlock(&bp->hwrm_cmd_lock);
7392 bp->pri2cos_valid = 0;
7393 return rc;
7394 }
7395
7396 bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
7397 req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
7398
7399 rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
7400 if (!rc) {
7401 struct hwrm_queue_pri2cos_qcfg_output *resp2;
7402 u8 *pri2cos;
7403 int i, j;
7404
7405 resp2 = bp->hwrm_cmd_resp_addr;
7406 pri2cos = &resp2->pri0_cos_queue_id;
7407 for (i = 0; i < 8; i++) {
7408 u8 queue_id = pri2cos[i];
a24ec322 7409 u8 queue_idx;
e37fed79 7410
a24ec322
MC
7411 /* Per port queue IDs start from 0, 10, 20, etc */
7412 queue_idx = queue_id % 10;
7413 if (queue_idx > BNXT_MAX_QUEUE) {
7414 bp->pri2cos_valid = false;
7415 goto qstats_done;
7416 }
e37fed79
MC
7417 for (j = 0; j < bp->max_q; j++) {
7418 if (bp->q_ids[j] == queue_id)
a24ec322 7419 bp->pri2cos_idx[i] = queue_idx;
e37fed79
MC
7420 }
7421 }
7422 bp->pri2cos_valid = 1;
7423 }
a24ec322 7424qstats_done:
36e53349
MC
7425 mutex_unlock(&bp->hwrm_cmd_lock);
7426 return rc;
00db3cba
VV
7427}
7428
55e4398d
VV
7429static int bnxt_hwrm_pcie_qstats(struct bnxt *bp)
7430{
7431 struct hwrm_pcie_qstats_input req = {0};
7432
7433 if (!(bp->flags & BNXT_FLAG_PCIE_STATS))
7434 return 0;
7435
7436 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
7437 req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats));
7438 req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map);
7439 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7440}
7441
c0c050c5
MC
7442static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
7443{
7444 if (bp->vxlan_port_cnt) {
7445 bnxt_hwrm_tunnel_dst_port_free(
7446 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
7447 }
7448 bp->vxlan_port_cnt = 0;
7449 if (bp->nge_port_cnt) {
7450 bnxt_hwrm_tunnel_dst_port_free(
7451 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
7452 }
7453 bp->nge_port_cnt = 0;
7454}
7455
7456static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
7457{
7458 int rc, i;
7459 u32 tpa_flags = 0;
7460
7461 if (set_tpa)
7462 tpa_flags = bp->flags & BNXT_FLAG_TPA;
b4fff207
MC
7463 else if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
7464 return 0;
c0c050c5
MC
7465 for (i = 0; i < bp->nr_vnics; i++) {
7466 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
7467 if (rc) {
7468 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
23e12c89 7469 i, rc);
c0c050c5
MC
7470 return rc;
7471 }
7472 }
7473 return 0;
7474}
7475
7476static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
7477{
7478 int i;
7479
7480 for (i = 0; i < bp->nr_vnics; i++)
7481 bnxt_hwrm_vnic_set_rss(bp, i, false);
7482}
7483
a46ecb11 7484static void bnxt_clear_vnic(struct bnxt *bp)
c0c050c5 7485{
a46ecb11
MC
7486 if (!bp->vnic_info)
7487 return;
7488
7489 bnxt_hwrm_clear_vnic_filter(bp);
7490 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
c0c050c5
MC
7491 /* clear all RSS setting before free vnic ctx */
7492 bnxt_hwrm_clear_vnic_rss(bp);
7493 bnxt_hwrm_vnic_ctx_free(bp);
c0c050c5 7494 }
a46ecb11
MC
7495 /* before free the vnic, undo the vnic tpa settings */
7496 if (bp->flags & BNXT_FLAG_TPA)
7497 bnxt_set_tpa(bp, false);
7498 bnxt_hwrm_vnic_free(bp);
7499 if (bp->flags & BNXT_FLAG_CHIP_P5)
7500 bnxt_hwrm_vnic_ctx_free(bp);
7501}
7502
7503static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
7504 bool irq_re_init)
7505{
7506 bnxt_clear_vnic(bp);
c0c050c5
MC
7507 bnxt_hwrm_ring_free(bp, close_path);
7508 bnxt_hwrm_ring_grp_free(bp);
7509 if (irq_re_init) {
7510 bnxt_hwrm_stat_ctx_free(bp);
7511 bnxt_hwrm_free_tunnel_ports(bp);
7512 }
7513}
7514
39d8ba2e
MC
7515static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
7516{
7517 struct hwrm_func_cfg_input req = {0};
7518 int rc;
7519
7520 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
7521 req.fid = cpu_to_le16(0xffff);
7522 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
7523 if (br_mode == BRIDGE_MODE_VEB)
7524 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
7525 else if (br_mode == BRIDGE_MODE_VEPA)
7526 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
7527 else
7528 return -EINVAL;
7529 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
39d8ba2e
MC
7530 return rc;
7531}
7532
c3480a60
MC
7533static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
7534{
7535 struct hwrm_func_cfg_input req = {0};
7536 int rc;
7537
7538 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
7539 return 0;
7540
7541 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
7542 req.fid = cpu_to_le16(0xffff);
7543 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
d4f52de0 7544 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
c3480a60 7545 if (size == 128)
d4f52de0 7546 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
c3480a60
MC
7547
7548 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
c3480a60
MC
7549 return rc;
7550}
7551
7b3af4f7 7552static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
c0c050c5 7553{
ae10ae74 7554 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
c0c050c5
MC
7555 int rc;
7556
ae10ae74
MC
7557 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
7558 goto skip_rss_ctx;
7559
c0c050c5 7560 /* allocate context for vnic */
94ce9caa 7561 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
c0c050c5
MC
7562 if (rc) {
7563 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
7564 vnic_id, rc);
7565 goto vnic_setup_err;
7566 }
7567 bp->rsscos_nr_ctxs++;
7568
94ce9caa
PS
7569 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7570 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
7571 if (rc) {
7572 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
7573 vnic_id, rc);
7574 goto vnic_setup_err;
7575 }
7576 bp->rsscos_nr_ctxs++;
7577 }
7578
ae10ae74 7579skip_rss_ctx:
c0c050c5
MC
7580 /* configure default vnic, ring grp */
7581 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
7582 if (rc) {
7583 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
7584 vnic_id, rc);
7585 goto vnic_setup_err;
7586 }
7587
7588 /* Enable RSS hashing on vnic */
7589 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
7590 if (rc) {
7591 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
7592 vnic_id, rc);
7593 goto vnic_setup_err;
7594 }
7595
7596 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7597 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
7598 if (rc) {
7599 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
7600 vnic_id, rc);
7601 }
7602 }
7603
7604vnic_setup_err:
7605 return rc;
7606}
7607
7b3af4f7
MC
7608static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
7609{
7610 int rc, i, nr_ctxs;
7611
7612 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
7613 for (i = 0; i < nr_ctxs; i++) {
7614 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
7615 if (rc) {
7616 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
7617 vnic_id, i, rc);
7618 break;
7619 }
7620 bp->rsscos_nr_ctxs++;
7621 }
7622 if (i < nr_ctxs)
7623 return -ENOMEM;
7624
7625 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
7626 if (rc) {
7627 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
7628 vnic_id, rc);
7629 return rc;
7630 }
7631 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
7632 if (rc) {
7633 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
7634 vnic_id, rc);
7635 return rc;
7636 }
7637 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7638 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
7639 if (rc) {
7640 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
7641 vnic_id, rc);
7642 }
7643 }
7644 return rc;
7645}
7646
7647static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
7648{
7649 if (bp->flags & BNXT_FLAG_CHIP_P5)
7650 return __bnxt_setup_vnic_p5(bp, vnic_id);
7651 else
7652 return __bnxt_setup_vnic(bp, vnic_id);
7653}
7654
c0c050c5
MC
7655static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
7656{
7657#ifdef CONFIG_RFS_ACCEL
7658 int i, rc = 0;
7659
9b3d15e6
MC
7660 if (bp->flags & BNXT_FLAG_CHIP_P5)
7661 return 0;
7662
c0c050c5 7663 for (i = 0; i < bp->rx_nr_rings; i++) {
ae10ae74 7664 struct bnxt_vnic_info *vnic;
c0c050c5
MC
7665 u16 vnic_id = i + 1;
7666 u16 ring_id = i;
7667
7668 if (vnic_id >= bp->nr_vnics)
7669 break;
7670
ae10ae74
MC
7671 vnic = &bp->vnic_info[vnic_id];
7672 vnic->flags |= BNXT_VNIC_RFS_FLAG;
7673 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
7674 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
b81a90d3 7675 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
c0c050c5
MC
7676 if (rc) {
7677 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
7678 vnic_id, rc);
7679 break;
7680 }
7681 rc = bnxt_setup_vnic(bp, vnic_id);
7682 if (rc)
7683 break;
7684 }
7685 return rc;
7686#else
7687 return 0;
7688#endif
7689}
7690
17c71ac3
MC
7691/* Allow PF and VF with default VLAN to be in promiscuous mode */
7692static bool bnxt_promisc_ok(struct bnxt *bp)
7693{
7694#ifdef CONFIG_BNXT_SRIOV
7695 if (BNXT_VF(bp) && !bp->vf.vlan)
7696 return false;
7697#endif
7698 return true;
7699}
7700
dc52c6c7
PS
7701static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
7702{
7703 unsigned int rc = 0;
7704
7705 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
7706 if (rc) {
7707 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7708 rc);
7709 return rc;
7710 }
7711
7712 rc = bnxt_hwrm_vnic_cfg(bp, 1);
7713 if (rc) {
7714 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7715 rc);
7716 return rc;
7717 }
7718 return rc;
7719}
7720
b664f008 7721static int bnxt_cfg_rx_mode(struct bnxt *);
7d2837dd 7722static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
b664f008 7723
c0c050c5
MC
7724static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
7725{
7d2837dd 7726 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
c0c050c5 7727 int rc = 0;
76595193 7728 unsigned int rx_nr_rings = bp->rx_nr_rings;
c0c050c5
MC
7729
7730 if (irq_re_init) {
7731 rc = bnxt_hwrm_stat_ctx_alloc(bp);
7732 if (rc) {
7733 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
7734 rc);
7735 goto err_out;
7736 }
7737 }
7738
7739 rc = bnxt_hwrm_ring_alloc(bp);
7740 if (rc) {
7741 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
7742 goto err_out;
7743 }
7744
7745 rc = bnxt_hwrm_ring_grp_alloc(bp);
7746 if (rc) {
7747 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
7748 goto err_out;
7749 }
7750
76595193
PS
7751 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7752 rx_nr_rings--;
7753
c0c050c5 7754 /* default vnic 0 */
76595193 7755 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
c0c050c5
MC
7756 if (rc) {
7757 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
7758 goto err_out;
7759 }
7760
7761 rc = bnxt_setup_vnic(bp, 0);
7762 if (rc)
7763 goto err_out;
7764
7765 if (bp->flags & BNXT_FLAG_RFS) {
7766 rc = bnxt_alloc_rfs_vnics(bp);
7767 if (rc)
7768 goto err_out;
7769 }
7770
7771 if (bp->flags & BNXT_FLAG_TPA) {
7772 rc = bnxt_set_tpa(bp, true);
7773 if (rc)
7774 goto err_out;
7775 }
7776
7777 if (BNXT_VF(bp))
7778 bnxt_update_vf_mac(bp);
7779
7780 /* Filter for default vnic 0 */
7781 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
7782 if (rc) {
7783 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
7784 goto err_out;
7785 }
7d2837dd 7786 vnic->uc_filter_count = 1;
c0c050c5 7787
30e33848
MC
7788 vnic->rx_mask = 0;
7789 if (bp->dev->flags & IFF_BROADCAST)
7790 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
c0c050c5 7791
17c71ac3 7792 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
7d2837dd
MC
7793 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
7794
7795 if (bp->dev->flags & IFF_ALLMULTI) {
7796 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
7797 vnic->mc_list_count = 0;
7798 } else {
7799 u32 mask = 0;
7800
7801 bnxt_mc_list_updated(bp, &mask);
7802 vnic->rx_mask |= mask;
7803 }
c0c050c5 7804
b664f008
MC
7805 rc = bnxt_cfg_rx_mode(bp);
7806 if (rc)
c0c050c5 7807 goto err_out;
c0c050c5
MC
7808
7809 rc = bnxt_hwrm_set_coal(bp);
7810 if (rc)
7811 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
dc52c6c7
PS
7812 rc);
7813
7814 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7815 rc = bnxt_setup_nitroa0_vnic(bp);
7816 if (rc)
7817 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
7818 rc);
7819 }
c0c050c5 7820
cf6645f8
MC
7821 if (BNXT_VF(bp)) {
7822 bnxt_hwrm_func_qcfg(bp);
7823 netdev_update_features(bp->dev);
7824 }
7825
c0c050c5
MC
7826 return 0;
7827
7828err_out:
7829 bnxt_hwrm_resource_free(bp, 0, true);
7830
7831 return rc;
7832}
7833
7834static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
7835{
7836 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
7837 return 0;
7838}
7839
7840static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
7841{
2247925f 7842 bnxt_init_cp_rings(bp);
c0c050c5
MC
7843 bnxt_init_rx_rings(bp);
7844 bnxt_init_tx_rings(bp);
7845 bnxt_init_ring_grps(bp, irq_re_init);
7846 bnxt_init_vnics(bp);
7847
7848 return bnxt_init_chip(bp, irq_re_init);
7849}
7850
c0c050c5
MC
7851static int bnxt_set_real_num_queues(struct bnxt *bp)
7852{
7853 int rc;
7854 struct net_device *dev = bp->dev;
7855
5f449249
MC
7856 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
7857 bp->tx_nr_rings_xdp);
c0c050c5
MC
7858 if (rc)
7859 return rc;
7860
7861 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
7862 if (rc)
7863 return rc;
7864
7865#ifdef CONFIG_RFS_ACCEL
45019a18 7866 if (bp->flags & BNXT_FLAG_RFS)
c0c050c5 7867 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
c0c050c5
MC
7868#endif
7869
7870 return rc;
7871}
7872
6e6c5a57
MC
7873static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7874 bool shared)
7875{
7876 int _rx = *rx, _tx = *tx;
7877
7878 if (shared) {
7879 *rx = min_t(int, _rx, max);
7880 *tx = min_t(int, _tx, max);
7881 } else {
7882 if (max < 2)
7883 return -ENOMEM;
7884
7885 while (_rx + _tx > max) {
7886 if (_rx > _tx && _rx > 1)
7887 _rx--;
7888 else if (_tx > 1)
7889 _tx--;
7890 }
7891 *rx = _rx;
7892 *tx = _tx;
7893 }
7894 return 0;
7895}
7896
7809592d
MC
7897static void bnxt_setup_msix(struct bnxt *bp)
7898{
7899 const int len = sizeof(bp->irq_tbl[0].name);
7900 struct net_device *dev = bp->dev;
7901 int tcs, i;
7902
7903 tcs = netdev_get_num_tc(dev);
18e4960c 7904 if (tcs) {
d1e7925e 7905 int i, off, count;
7809592d 7906
d1e7925e
MC
7907 for (i = 0; i < tcs; i++) {
7908 count = bp->tx_nr_rings_per_tc;
7909 off = i * count;
7910 netdev_set_tc_queue(dev, i, count, off);
7809592d
MC
7911 }
7912 }
7913
7914 for (i = 0; i < bp->cp_nr_rings; i++) {
e5811b8c 7915 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7809592d
MC
7916 char *attr;
7917
7918 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7919 attr = "TxRx";
7920 else if (i < bp->rx_nr_rings)
7921 attr = "rx";
7922 else
7923 attr = "tx";
7924
e5811b8c
MC
7925 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
7926 attr, i);
7927 bp->irq_tbl[map_idx].handler = bnxt_msix;
7809592d
MC
7928 }
7929}
7930
7931static void bnxt_setup_inta(struct bnxt *bp)
7932{
7933 const int len = sizeof(bp->irq_tbl[0].name);
7934
7935 if (netdev_get_num_tc(bp->dev))
7936 netdev_reset_tc(bp->dev);
7937
7938 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
7939 0);
7940 bp->irq_tbl[0].handler = bnxt_inta;
7941}
7942
7943static int bnxt_setup_int_mode(struct bnxt *bp)
7944{
7945 int rc;
7946
7947 if (bp->flags & BNXT_FLAG_USING_MSIX)
7948 bnxt_setup_msix(bp);
7949 else
7950 bnxt_setup_inta(bp);
7951
7952 rc = bnxt_set_real_num_queues(bp);
7953 return rc;
7954}
7955
b7429954 7956#ifdef CONFIG_RFS_ACCEL
8079e8f1
MC
7957static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
7958{
6a4f2947 7959 return bp->hw_resc.max_rsscos_ctxs;
8079e8f1
MC
7960}
7961
7962static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
7963{
6a4f2947 7964 return bp->hw_resc.max_vnics;
8079e8f1 7965}
b7429954 7966#endif
8079e8f1 7967
e4060d30
MC
7968unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
7969{
6a4f2947 7970 return bp->hw_resc.max_stat_ctxs;
e4060d30
MC
7971}
7972
7973unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
7974{
6a4f2947 7975 return bp->hw_resc.max_cp_rings;
e4060d30
MC
7976}
7977
e916b081 7978static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
a588e458 7979{
c0b8cda0
MC
7980 unsigned int cp = bp->hw_resc.max_cp_rings;
7981
7982 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
7983 cp -= bnxt_get_ulp_msix_num(bp);
7984
7985 return cp;
a588e458
MC
7986}
7987
ad95c27b 7988static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
7809592d 7989{
6a4f2947
MC
7990 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7991
f7588cd8
MC
7992 if (bp->flags & BNXT_FLAG_CHIP_P5)
7993 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
7994
6a4f2947 7995 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
7809592d
MC
7996}
7997
30f52947 7998static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
33c2657e 7999{
6a4f2947 8000 bp->hw_resc.max_irqs = max_irqs;
33c2657e
MC
8001}
8002
e916b081
MC
8003unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
8004{
8005 unsigned int cp;
8006
8007 cp = bnxt_get_max_func_cp_rings_for_en(bp);
8008 if (bp->flags & BNXT_FLAG_CHIP_P5)
8009 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8010 else
8011 return cp - bp->cp_nr_rings;
8012}
8013
c027c6b4
VV
8014unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8015{
d77b1ad8 8016 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
c027c6b4
VV
8017}
8018
fbcfc8e4
MC
8019int bnxt_get_avail_msix(struct bnxt *bp, int num)
8020{
8021 int max_cp = bnxt_get_max_func_cp_rings(bp);
8022 int max_irq = bnxt_get_max_func_irqs(bp);
8023 int total_req = bp->cp_nr_rings + num;
8024 int max_idx, avail_msix;
8025
75720e63
MC
8026 max_idx = bp->total_irqs;
8027 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8028 max_idx = min_t(int, bp->total_irqs, max_cp);
fbcfc8e4 8029 avail_msix = max_idx - bp->cp_nr_rings;
f1ca94de 8030 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
fbcfc8e4
MC
8031 return avail_msix;
8032
8033 if (max_irq < total_req) {
8034 num = max_irq - bp->cp_nr_rings;
8035 if (num <= 0)
8036 return 0;
8037 }
8038 return num;
8039}
8040
08654eb2
MC
8041static int bnxt_get_num_msix(struct bnxt *bp)
8042{
f1ca94de 8043 if (!BNXT_NEW_RM(bp))
08654eb2
MC
8044 return bnxt_get_max_func_irqs(bp);
8045
c0b8cda0 8046 return bnxt_nq_rings_in_use(bp);
08654eb2
MC
8047}
8048
7809592d 8049static int bnxt_init_msix(struct bnxt *bp)
c0c050c5 8050{
fbcfc8e4 8051 int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
7809592d 8052 struct msix_entry *msix_ent;
c0c050c5 8053
08654eb2
MC
8054 total_vecs = bnxt_get_num_msix(bp);
8055 max = bnxt_get_max_func_irqs(bp);
8056 if (total_vecs > max)
8057 total_vecs = max;
8058
2773dfb2
MC
8059 if (!total_vecs)
8060 return 0;
8061
c0c050c5
MC
8062 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8063 if (!msix_ent)
8064 return -ENOMEM;
8065
8066 for (i = 0; i < total_vecs; i++) {
8067 msix_ent[i].entry = i;
8068 msix_ent[i].vector = 0;
8069 }
8070
01657bcd
MC
8071 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8072 min = 2;
8073
8074 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
fbcfc8e4
MC
8075 ulp_msix = bnxt_get_ulp_msix_num(bp);
8076 if (total_vecs < 0 || total_vecs < ulp_msix) {
c0c050c5
MC
8077 rc = -ENODEV;
8078 goto msix_setup_exit;
8079 }
8080
8081 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8082 if (bp->irq_tbl) {
7809592d
MC
8083 for (i = 0; i < total_vecs; i++)
8084 bp->irq_tbl[i].vector = msix_ent[i].vector;
c0c050c5 8085
7809592d 8086 bp->total_irqs = total_vecs;
c0c050c5 8087 /* Trim rings based upon num of vectors allocated */
6e6c5a57 8088 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
fbcfc8e4 8089 total_vecs - ulp_msix, min == 1);
6e6c5a57
MC
8090 if (rc)
8091 goto msix_setup_exit;
8092
7809592d
MC
8093 bp->cp_nr_rings = (min == 1) ?
8094 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8095 bp->tx_nr_rings + bp->rx_nr_rings;
c0c050c5 8096
c0c050c5
MC
8097 } else {
8098 rc = -ENOMEM;
8099 goto msix_setup_exit;
8100 }
8101 bp->flags |= BNXT_FLAG_USING_MSIX;
8102 kfree(msix_ent);
8103 return 0;
8104
8105msix_setup_exit:
7809592d
MC
8106 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8107 kfree(bp->irq_tbl);
8108 bp->irq_tbl = NULL;
c0c050c5
MC
8109 pci_disable_msix(bp->pdev);
8110 kfree(msix_ent);
8111 return rc;
8112}
8113
7809592d 8114static int bnxt_init_inta(struct bnxt *bp)
c0c050c5 8115{
c0c050c5 8116 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
7809592d
MC
8117 if (!bp->irq_tbl)
8118 return -ENOMEM;
8119
8120 bp->total_irqs = 1;
c0c050c5
MC
8121 bp->rx_nr_rings = 1;
8122 bp->tx_nr_rings = 1;
8123 bp->cp_nr_rings = 1;
01657bcd 8124 bp->flags |= BNXT_FLAG_SHARED_RINGS;
c0c050c5 8125 bp->irq_tbl[0].vector = bp->pdev->irq;
7809592d 8126 return 0;
c0c050c5
MC
8127}
8128
7809592d 8129static int bnxt_init_int_mode(struct bnxt *bp)
c0c050c5
MC
8130{
8131 int rc = 0;
8132
8133 if (bp->flags & BNXT_FLAG_MSIX_CAP)
7809592d 8134 rc = bnxt_init_msix(bp);
c0c050c5 8135
1fa72e29 8136 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
c0c050c5 8137 /* fallback to INTA */
7809592d 8138 rc = bnxt_init_inta(bp);
c0c050c5
MC
8139 }
8140 return rc;
8141}
8142
7809592d
MC
8143static void bnxt_clear_int_mode(struct bnxt *bp)
8144{
8145 if (bp->flags & BNXT_FLAG_USING_MSIX)
8146 pci_disable_msix(bp->pdev);
8147
8148 kfree(bp->irq_tbl);
8149 bp->irq_tbl = NULL;
8150 bp->flags &= ~BNXT_FLAG_USING_MSIX;
8151}
8152
1b3f0b75 8153int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
674f50a5 8154{
674f50a5 8155 int tcs = netdev_get_num_tc(bp->dev);
1b3f0b75 8156 bool irq_cleared = false;
674f50a5
MC
8157 int rc;
8158
8159 if (!bnxt_need_reserve_rings(bp))
8160 return 0;
8161
1b3f0b75
MC
8162 if (irq_re_init && BNXT_NEW_RM(bp) &&
8163 bnxt_get_num_msix(bp) != bp->total_irqs) {
ec86f14e 8164 bnxt_ulp_irq_stop(bp);
674f50a5 8165 bnxt_clear_int_mode(bp);
1b3f0b75 8166 irq_cleared = true;
36d65be9
MC
8167 }
8168 rc = __bnxt_reserve_rings(bp);
1b3f0b75 8169 if (irq_cleared) {
36d65be9
MC
8170 if (!rc)
8171 rc = bnxt_init_int_mode(bp);
ec86f14e 8172 bnxt_ulp_irq_restart(bp, rc);
36d65be9
MC
8173 }
8174 if (rc) {
8175 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
8176 return rc;
674f50a5
MC
8177 }
8178 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
8179 netdev_err(bp->dev, "tx ring reservation failure\n");
8180 netdev_reset_tc(bp->dev);
8181 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8182 return -ENOMEM;
8183 }
674f50a5
MC
8184 return 0;
8185}
8186
c0c050c5
MC
8187static void bnxt_free_irq(struct bnxt *bp)
8188{
8189 struct bnxt_irq *irq;
8190 int i;
8191
8192#ifdef CONFIG_RFS_ACCEL
8193 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
8194 bp->dev->rx_cpu_rmap = NULL;
8195#endif
cb98526b 8196 if (!bp->irq_tbl || !bp->bnapi)
c0c050c5
MC
8197 return;
8198
8199 for (i = 0; i < bp->cp_nr_rings; i++) {
e5811b8c
MC
8200 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8201
8202 irq = &bp->irq_tbl[map_idx];
56f0fd80
VV
8203 if (irq->requested) {
8204 if (irq->have_cpumask) {
8205 irq_set_affinity_hint(irq->vector, NULL);
8206 free_cpumask_var(irq->cpu_mask);
8207 irq->have_cpumask = 0;
8208 }
c0c050c5 8209 free_irq(irq->vector, bp->bnapi[i]);
56f0fd80
VV
8210 }
8211
c0c050c5
MC
8212 irq->requested = 0;
8213 }
c0c050c5
MC
8214}
8215
8216static int bnxt_request_irq(struct bnxt *bp)
8217{
b81a90d3 8218 int i, j, rc = 0;
c0c050c5
MC
8219 unsigned long flags = 0;
8220#ifdef CONFIG_RFS_ACCEL
e5811b8c 8221 struct cpu_rmap *rmap;
c0c050c5
MC
8222#endif
8223
e5811b8c
MC
8224 rc = bnxt_setup_int_mode(bp);
8225 if (rc) {
8226 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
8227 rc);
8228 return rc;
8229 }
8230#ifdef CONFIG_RFS_ACCEL
8231 rmap = bp->dev->rx_cpu_rmap;
8232#endif
c0c050c5
MC
8233 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
8234 flags = IRQF_SHARED;
8235
b81a90d3 8236 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
e5811b8c
MC
8237 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8238 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
8239
c0c050c5 8240#ifdef CONFIG_RFS_ACCEL
b81a90d3 8241 if (rmap && bp->bnapi[i]->rx_ring) {
c0c050c5
MC
8242 rc = irq_cpu_rmap_add(rmap, irq->vector);
8243 if (rc)
8244 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
b81a90d3
MC
8245 j);
8246 j++;
c0c050c5
MC
8247 }
8248#endif
8249 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
8250 bp->bnapi[i]);
8251 if (rc)
8252 break;
8253
8254 irq->requested = 1;
56f0fd80
VV
8255
8256 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
8257 int numa_node = dev_to_node(&bp->pdev->dev);
8258
8259 irq->have_cpumask = 1;
8260 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
8261 irq->cpu_mask);
8262 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
8263 if (rc) {
8264 netdev_warn(bp->dev,
8265 "Set affinity failed, IRQ = %d\n",
8266 irq->vector);
8267 break;
8268 }
8269 }
c0c050c5
MC
8270 }
8271 return rc;
8272}
8273
8274static void bnxt_del_napi(struct bnxt *bp)
8275{
8276 int i;
8277
8278 if (!bp->bnapi)
8279 return;
8280
8281 for (i = 0; i < bp->cp_nr_rings; i++) {
8282 struct bnxt_napi *bnapi = bp->bnapi[i];
8283
8284 napi_hash_del(&bnapi->napi);
8285 netif_napi_del(&bnapi->napi);
8286 }
e5f6f564
ED
8287 /* We called napi_hash_del() before netif_napi_del(), we need
8288 * to respect an RCU grace period before freeing napi structures.
8289 */
8290 synchronize_net();
c0c050c5
MC
8291}
8292
8293static void bnxt_init_napi(struct bnxt *bp)
8294{
8295 int i;
10bbdaf5 8296 unsigned int cp_nr_rings = bp->cp_nr_rings;
c0c050c5
MC
8297 struct bnxt_napi *bnapi;
8298
8299 if (bp->flags & BNXT_FLAG_USING_MSIX) {
0fcec985
MC
8300 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
8301
8302 if (bp->flags & BNXT_FLAG_CHIP_P5)
8303 poll_fn = bnxt_poll_p5;
8304 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
10bbdaf5
PS
8305 cp_nr_rings--;
8306 for (i = 0; i < cp_nr_rings; i++) {
c0c050c5 8307 bnapi = bp->bnapi[i];
0fcec985 8308 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
c0c050c5 8309 }
10bbdaf5
PS
8310 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8311 bnapi = bp->bnapi[cp_nr_rings];
8312 netif_napi_add(bp->dev, &bnapi->napi,
8313 bnxt_poll_nitroa0, 64);
10bbdaf5 8314 }
c0c050c5
MC
8315 } else {
8316 bnapi = bp->bnapi[0];
8317 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
c0c050c5
MC
8318 }
8319}
8320
8321static void bnxt_disable_napi(struct bnxt *bp)
8322{
8323 int i;
8324
8325 if (!bp->bnapi)
8326 return;
8327
0bc0b97f
AG
8328 for (i = 0; i < bp->cp_nr_rings; i++) {
8329 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
8330
8331 if (bp->bnapi[i]->rx_ring)
8332 cancel_work_sync(&cpr->dim.work);
8333
c0c050c5 8334 napi_disable(&bp->bnapi[i]->napi);
0bc0b97f 8335 }
c0c050c5
MC
8336}
8337
8338static void bnxt_enable_napi(struct bnxt *bp)
8339{
8340 int i;
8341
8342 for (i = 0; i < bp->cp_nr_rings; i++) {
6a8788f2 8343 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
fa7e2812 8344 bp->bnapi[i]->in_reset = false;
6a8788f2
AG
8345
8346 if (bp->bnapi[i]->rx_ring) {
8347 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
c002bd52 8348 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6a8788f2 8349 }
c0c050c5
MC
8350 napi_enable(&bp->bnapi[i]->napi);
8351 }
8352}
8353
7df4ae9f 8354void bnxt_tx_disable(struct bnxt *bp)
c0c050c5
MC
8355{
8356 int i;
c0c050c5 8357 struct bnxt_tx_ring_info *txr;
c0c050c5 8358
b6ab4b01 8359 if (bp->tx_ring) {
c0c050c5 8360 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 8361 txr = &bp->tx_ring[i];
c0c050c5 8362 txr->dev_state = BNXT_DEV_STATE_CLOSING;
c0c050c5
MC
8363 }
8364 }
8365 /* Stop all TX queues */
8366 netif_tx_disable(bp->dev);
8367 netif_carrier_off(bp->dev);
8368}
8369
7df4ae9f 8370void bnxt_tx_enable(struct bnxt *bp)
c0c050c5
MC
8371{
8372 int i;
c0c050c5 8373 struct bnxt_tx_ring_info *txr;
c0c050c5
MC
8374
8375 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 8376 txr = &bp->tx_ring[i];
c0c050c5
MC
8377 txr->dev_state = 0;
8378 }
8379 netif_tx_wake_all_queues(bp->dev);
8380 if (bp->link_info.link_up)
8381 netif_carrier_on(bp->dev);
8382}
8383
8384static void bnxt_report_link(struct bnxt *bp)
8385{
8386 if (bp->link_info.link_up) {
8387 const char *duplex;
8388 const char *flow_ctrl;
38a21b34
DK
8389 u32 speed;
8390 u16 fec;
c0c050c5
MC
8391
8392 netif_carrier_on(bp->dev);
8393 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
8394 duplex = "full";
8395 else
8396 duplex = "half";
8397 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
8398 flow_ctrl = "ON - receive & transmit";
8399 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
8400 flow_ctrl = "ON - transmit";
8401 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
8402 flow_ctrl = "ON - receive";
8403 else
8404 flow_ctrl = "none";
8405 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
38a21b34 8406 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
c0c050c5 8407 speed, duplex, flow_ctrl);
170ce013
MC
8408 if (bp->flags & BNXT_FLAG_EEE_CAP)
8409 netdev_info(bp->dev, "EEE is %s\n",
8410 bp->eee.eee_active ? "active" :
8411 "not active");
e70c752f
MC
8412 fec = bp->link_info.fec_cfg;
8413 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
8414 netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
8415 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
8416 (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
8417 (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
c0c050c5
MC
8418 } else {
8419 netif_carrier_off(bp->dev);
8420 netdev_err(bp->dev, "NIC Link is Down\n");
8421 }
8422}
8423
170ce013
MC
8424static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
8425{
8426 int rc = 0;
8427 struct hwrm_port_phy_qcaps_input req = {0};
8428 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
93ed8117 8429 struct bnxt_link_info *link_info = &bp->link_info;
170ce013 8430
ba642ab7
MC
8431 bp->flags &= ~BNXT_FLAG_EEE_CAP;
8432 if (bp->test_info)
8a60efd1
MC
8433 bp->test_info->flags &= ~(BNXT_TEST_FL_EXT_LPBK |
8434 BNXT_TEST_FL_AN_PHY_LPBK);
170ce013
MC
8435 if (bp->hwrm_spec_code < 0x10201)
8436 return 0;
8437
8438 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
8439
8440 mutex_lock(&bp->hwrm_cmd_lock);
8441 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8442 if (rc)
8443 goto hwrm_phy_qcaps_exit;
8444
acb20054 8445 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
170ce013
MC
8446 struct ethtool_eee *eee = &bp->eee;
8447 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
8448
8449 bp->flags |= BNXT_FLAG_EEE_CAP;
8450 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8451 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
8452 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
8453 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
8454 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
8455 }
55fd0cf3
MC
8456 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
8457 if (bp->test_info)
8458 bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
8459 }
8a60efd1
MC
8460 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED) {
8461 if (bp->test_info)
8462 bp->test_info->flags |= BNXT_TEST_FL_AN_PHY_LPBK;
8463 }
c7e457f4
MC
8464 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED) {
8465 if (BNXT_PF(bp))
8466 bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG;
8467 }
520ad89a
MC
8468 if (resp->supported_speeds_auto_mode)
8469 link_info->support_auto_speeds =
8470 le16_to_cpu(resp->supported_speeds_auto_mode);
170ce013 8471
d5430d31
MC
8472 bp->port_count = resp->port_cnt;
8473
170ce013
MC
8474hwrm_phy_qcaps_exit:
8475 mutex_unlock(&bp->hwrm_cmd_lock);
8476 return rc;
8477}
8478
c0c050c5
MC
8479static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
8480{
8481 int rc = 0;
8482 struct bnxt_link_info *link_info = &bp->link_info;
8483 struct hwrm_port_phy_qcfg_input req = {0};
8484 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8485 u8 link_up = link_info->link_up;
286ef9d6 8486 u16 diff;
c0c050c5
MC
8487
8488 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
8489
8490 mutex_lock(&bp->hwrm_cmd_lock);
8491 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8492 if (rc) {
8493 mutex_unlock(&bp->hwrm_cmd_lock);
8494 return rc;
8495 }
8496
8497 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
8498 link_info->phy_link_status = resp->link;
acb20054
MC
8499 link_info->duplex = resp->duplex_cfg;
8500 if (bp->hwrm_spec_code >= 0x10800)
8501 link_info->duplex = resp->duplex_state;
c0c050c5
MC
8502 link_info->pause = resp->pause;
8503 link_info->auto_mode = resp->auto_mode;
8504 link_info->auto_pause_setting = resp->auto_pause;
3277360e 8505 link_info->lp_pause = resp->link_partner_adv_pause;
c0c050c5 8506 link_info->force_pause_setting = resp->force_pause;
acb20054 8507 link_info->duplex_setting = resp->duplex_cfg;
c0c050c5
MC
8508 if (link_info->phy_link_status == BNXT_LINK_LINK)
8509 link_info->link_speed = le16_to_cpu(resp->link_speed);
8510 else
8511 link_info->link_speed = 0;
8512 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
c0c050c5
MC
8513 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
8514 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
3277360e
MC
8515 link_info->lp_auto_link_speeds =
8516 le16_to_cpu(resp->link_partner_adv_speeds);
c0c050c5
MC
8517 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
8518 link_info->phy_ver[0] = resp->phy_maj;
8519 link_info->phy_ver[1] = resp->phy_min;
8520 link_info->phy_ver[2] = resp->phy_bld;
8521 link_info->media_type = resp->media_type;
03efbec0 8522 link_info->phy_type = resp->phy_type;
11f15ed3 8523 link_info->transceiver = resp->xcvr_pkg_type;
170ce013
MC
8524 link_info->phy_addr = resp->eee_config_phy_addr &
8525 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
42ee18fe 8526 link_info->module_status = resp->module_status;
170ce013
MC
8527
8528 if (bp->flags & BNXT_FLAG_EEE_CAP) {
8529 struct ethtool_eee *eee = &bp->eee;
8530 u16 fw_speeds;
8531
8532 eee->eee_active = 0;
8533 if (resp->eee_config_phy_addr &
8534 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
8535 eee->eee_active = 1;
8536 fw_speeds = le16_to_cpu(
8537 resp->link_partner_adv_eee_link_speed_mask);
8538 eee->lp_advertised =
8539 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8540 }
8541
8542 /* Pull initial EEE config */
8543 if (!chng_link_state) {
8544 if (resp->eee_config_phy_addr &
8545 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
8546 eee->eee_enabled = 1;
c0c050c5 8547
170ce013
MC
8548 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
8549 eee->advertised =
8550 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8551
8552 if (resp->eee_config_phy_addr &
8553 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
8554 __le32 tmr;
8555
8556 eee->tx_lpi_enabled = 1;
8557 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
8558 eee->tx_lpi_timer = le32_to_cpu(tmr) &
8559 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
8560 }
8561 }
8562 }
e70c752f
MC
8563
8564 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
8565 if (bp->hwrm_spec_code >= 0x10504)
8566 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
8567
c0c050c5
MC
8568 /* TODO: need to add more logic to report VF link */
8569 if (chng_link_state) {
8570 if (link_info->phy_link_status == BNXT_LINK_LINK)
8571 link_info->link_up = 1;
8572 else
8573 link_info->link_up = 0;
8574 if (link_up != link_info->link_up)
8575 bnxt_report_link(bp);
8576 } else {
8577 /* alwasy link down if not require to update link state */
8578 link_info->link_up = 0;
8579 }
8580 mutex_unlock(&bp->hwrm_cmd_lock);
286ef9d6 8581
c7e457f4 8582 if (!BNXT_PHY_CFG_ABLE(bp))
dac04907
MC
8583 return 0;
8584
286ef9d6
MC
8585 diff = link_info->support_auto_speeds ^ link_info->advertising;
8586 if ((link_info->support_auto_speeds | diff) !=
8587 link_info->support_auto_speeds) {
8588 /* An advertised speed is no longer supported, so we need to
0eaa24b9
MC
8589 * update the advertisement settings. Caller holds RTNL
8590 * so we can modify link settings.
286ef9d6 8591 */
286ef9d6 8592 link_info->advertising = link_info->support_auto_speeds;
0eaa24b9 8593 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
286ef9d6 8594 bnxt_hwrm_set_link_setting(bp, true, false);
286ef9d6 8595 }
c0c050c5
MC
8596 return 0;
8597}
8598
10289bec
MC
8599static void bnxt_get_port_module_status(struct bnxt *bp)
8600{
8601 struct bnxt_link_info *link_info = &bp->link_info;
8602 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
8603 u8 module_status;
8604
8605 if (bnxt_update_link(bp, true))
8606 return;
8607
8608 module_status = link_info->module_status;
8609 switch (module_status) {
8610 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
8611 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
8612 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
8613 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
8614 bp->pf.port_id);
8615 if (bp->hwrm_spec_code >= 0x10201) {
8616 netdev_warn(bp->dev, "Module part number %s\n",
8617 resp->phy_vendor_partnumber);
8618 }
8619 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
8620 netdev_warn(bp->dev, "TX is disabled\n");
8621 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
8622 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
8623 }
8624}
8625
c0c050c5
MC
8626static void
8627bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
8628{
8629 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
c9ee9516
MC
8630 if (bp->hwrm_spec_code >= 0x10201)
8631 req->auto_pause =
8632 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
c0c050c5
MC
8633 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
8634 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
8635 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
49b5c7a1 8636 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
c0c050c5
MC
8637 req->enables |=
8638 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
8639 } else {
8640 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
8641 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
8642 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
8643 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
8644 req->enables |=
8645 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
c9ee9516
MC
8646 if (bp->hwrm_spec_code >= 0x10201) {
8647 req->auto_pause = req->force_pause;
8648 req->enables |= cpu_to_le32(
8649 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
8650 }
c0c050c5
MC
8651 }
8652}
8653
8654static void bnxt_hwrm_set_link_common(struct bnxt *bp,
8655 struct hwrm_port_phy_cfg_input *req)
8656{
8657 u8 autoneg = bp->link_info.autoneg;
8658 u16 fw_link_speed = bp->link_info.req_link_speed;
68515a18 8659 u16 advertising = bp->link_info.advertising;
c0c050c5
MC
8660
8661 if (autoneg & BNXT_AUTONEG_SPEED) {
8662 req->auto_mode |=
11f15ed3 8663 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
c0c050c5
MC
8664
8665 req->enables |= cpu_to_le32(
8666 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
8667 req->auto_link_speed_mask = cpu_to_le16(advertising);
8668
8669 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
8670 req->flags |=
8671 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
8672 } else {
8673 req->force_link_speed = cpu_to_le16(fw_link_speed);
8674 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
8675 }
8676
c0c050c5
MC
8677 /* tell chimp that the setting takes effect immediately */
8678 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
8679}
8680
8681int bnxt_hwrm_set_pause(struct bnxt *bp)
8682{
8683 struct hwrm_port_phy_cfg_input req = {0};
8684 int rc;
8685
8686 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8687 bnxt_hwrm_set_pause_common(bp, &req);
8688
8689 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
8690 bp->link_info.force_link_chng)
8691 bnxt_hwrm_set_link_common(bp, &req);
8692
8693 mutex_lock(&bp->hwrm_cmd_lock);
8694 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8695 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
8696 /* since changing of pause setting doesn't trigger any link
8697 * change event, the driver needs to update the current pause
8698 * result upon successfully return of the phy_cfg command
8699 */
8700 bp->link_info.pause =
8701 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
8702 bp->link_info.auto_pause_setting = 0;
8703 if (!bp->link_info.force_link_chng)
8704 bnxt_report_link(bp);
8705 }
8706 bp->link_info.force_link_chng = false;
8707 mutex_unlock(&bp->hwrm_cmd_lock);
8708 return rc;
8709}
8710
939f7f0c
MC
8711static void bnxt_hwrm_set_eee(struct bnxt *bp,
8712 struct hwrm_port_phy_cfg_input *req)
8713{
8714 struct ethtool_eee *eee = &bp->eee;
8715
8716 if (eee->eee_enabled) {
8717 u16 eee_speeds;
8718 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
8719
8720 if (eee->tx_lpi_enabled)
8721 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
8722 else
8723 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
8724
8725 req->flags |= cpu_to_le32(flags);
8726 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
8727 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
8728 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
8729 } else {
8730 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
8731 }
8732}
8733
8734int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
c0c050c5
MC
8735{
8736 struct hwrm_port_phy_cfg_input req = {0};
8737
8738 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8739 if (set_pause)
8740 bnxt_hwrm_set_pause_common(bp, &req);
8741
8742 bnxt_hwrm_set_link_common(bp, &req);
939f7f0c
MC
8743
8744 if (set_eee)
8745 bnxt_hwrm_set_eee(bp, &req);
c0c050c5
MC
8746 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8747}
8748
33f7d55f
MC
8749static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
8750{
8751 struct hwrm_port_phy_cfg_input req = {0};
8752
567b2abe 8753 if (!BNXT_SINGLE_PF(bp))
33f7d55f
MC
8754 return 0;
8755
8756 if (pci_num_vf(bp->pdev))
8757 return 0;
8758
8759 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
16d663a6 8760 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
33f7d55f
MC
8761 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8762}
8763
ec5d31e3
MC
8764static int bnxt_fw_init_one(struct bnxt *bp);
8765
25e1acd6
MC
8766static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
8767{
8768 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
8769 struct hwrm_func_drv_if_change_input req = {0};
ec5d31e3
MC
8770 bool resc_reinit = false, fw_reset = false;
8771 u32 flags = 0;
25e1acd6
MC
8772 int rc;
8773
8774 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
8775 return 0;
8776
8777 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
8778 if (up)
8779 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
8780 mutex_lock(&bp->hwrm_cmd_lock);
8781 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
ec5d31e3
MC
8782 if (!rc)
8783 flags = le32_to_cpu(resp->flags);
25e1acd6 8784 mutex_unlock(&bp->hwrm_cmd_lock);
ec5d31e3
MC
8785 if (rc)
8786 return rc;
25e1acd6 8787
ec5d31e3
MC
8788 if (!up)
8789 return 0;
25e1acd6 8790
ec5d31e3
MC
8791 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
8792 resc_reinit = true;
8793 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
8794 fw_reset = true;
8795
3bc7d4a3
MC
8796 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
8797 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
8798 return -ENODEV;
8799 }
ec5d31e3
MC
8800 if (resc_reinit || fw_reset) {
8801 if (fw_reset) {
f3a6d206
VV
8802 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
8803 bnxt_ulp_stop(bp);
325f85f3
MC
8804 bnxt_free_ctx_mem(bp);
8805 kfree(bp->ctx);
8806 bp->ctx = NULL;
ec5d31e3
MC
8807 rc = bnxt_fw_init_one(bp);
8808 if (rc) {
8809 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
8810 return rc;
8811 }
8812 bnxt_clear_int_mode(bp);
8813 rc = bnxt_init_int_mode(bp);
8814 if (rc) {
8815 netdev_err(bp->dev, "init int mode failed\n");
8816 return rc;
8817 }
8818 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
8819 }
8820 if (BNXT_NEW_RM(bp)) {
8821 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8822
8823 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
8824 hw_resc->resv_cp_rings = 0;
8825 hw_resc->resv_stat_ctxs = 0;
8826 hw_resc->resv_irqs = 0;
8827 hw_resc->resv_tx_rings = 0;
8828 hw_resc->resv_rx_rings = 0;
8829 hw_resc->resv_hw_ring_grps = 0;
8830 hw_resc->resv_vnics = 0;
8831 if (!fw_reset) {
8832 bp->tx_nr_rings = 0;
8833 bp->rx_nr_rings = 0;
8834 }
8835 }
25e1acd6 8836 }
ec5d31e3 8837 return 0;
25e1acd6
MC
8838}
8839
5ad2cbee
MC
8840static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
8841{
8842 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
8843 struct hwrm_port_led_qcaps_input req = {0};
8844 struct bnxt_pf_info *pf = &bp->pf;
8845 int rc;
8846
ba642ab7 8847 bp->num_leds = 0;
5ad2cbee
MC
8848 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
8849 return 0;
8850
8851 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
8852 req.port_id = cpu_to_le16(pf->port_id);
8853 mutex_lock(&bp->hwrm_cmd_lock);
8854 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8855 if (rc) {
8856 mutex_unlock(&bp->hwrm_cmd_lock);
8857 return rc;
8858 }
8859 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
8860 int i;
8861
8862 bp->num_leds = resp->num_leds;
8863 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
8864 bp->num_leds);
8865 for (i = 0; i < bp->num_leds; i++) {
8866 struct bnxt_led_info *led = &bp->leds[i];
8867 __le16 caps = led->led_state_caps;
8868
8869 if (!led->led_group_id ||
8870 !BNXT_LED_ALT_BLINK_CAP(caps)) {
8871 bp->num_leds = 0;
8872 break;
8873 }
8874 }
8875 }
8876 mutex_unlock(&bp->hwrm_cmd_lock);
8877 return 0;
8878}
8879
5282db6c
MC
8880int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
8881{
8882 struct hwrm_wol_filter_alloc_input req = {0};
8883 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
8884 int rc;
8885
8886 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
8887 req.port_id = cpu_to_le16(bp->pf.port_id);
8888 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
8889 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
8890 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
8891 mutex_lock(&bp->hwrm_cmd_lock);
8892 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8893 if (!rc)
8894 bp->wol_filter_id = resp->wol_filter_id;
8895 mutex_unlock(&bp->hwrm_cmd_lock);
8896 return rc;
8897}
8898
8899int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
8900{
8901 struct hwrm_wol_filter_free_input req = {0};
8902 int rc;
8903
8904 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
8905 req.port_id = cpu_to_le16(bp->pf.port_id);
8906 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
8907 req.wol_filter_id = bp->wol_filter_id;
8908 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8909 return rc;
8910}
8911
c1ef146a
MC
8912static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
8913{
8914 struct hwrm_wol_filter_qcfg_input req = {0};
8915 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8916 u16 next_handle = 0;
8917 int rc;
8918
8919 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
8920 req.port_id = cpu_to_le16(bp->pf.port_id);
8921 req.handle = cpu_to_le16(handle);
8922 mutex_lock(&bp->hwrm_cmd_lock);
8923 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8924 if (!rc) {
8925 next_handle = le16_to_cpu(resp->next_handle);
8926 if (next_handle != 0) {
8927 if (resp->wol_type ==
8928 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
8929 bp->wol = 1;
8930 bp->wol_filter_id = resp->wol_filter_id;
8931 }
8932 }
8933 }
8934 mutex_unlock(&bp->hwrm_cmd_lock);
8935 return next_handle;
8936}
8937
8938static void bnxt_get_wol_settings(struct bnxt *bp)
8939{
8940 u16 handle = 0;
8941
ba642ab7 8942 bp->wol = 0;
c1ef146a
MC
8943 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
8944 return;
8945
8946 do {
8947 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
8948 } while (handle && handle != 0xffff);
8949}
8950
cde49a42
VV
8951#ifdef CONFIG_BNXT_HWMON
8952static ssize_t bnxt_show_temp(struct device *dev,
8953 struct device_attribute *devattr, char *buf)
8954{
8955 struct hwrm_temp_monitor_query_input req = {0};
8956 struct hwrm_temp_monitor_query_output *resp;
8957 struct bnxt *bp = dev_get_drvdata(dev);
8958 u32 temp = 0;
8959
8960 resp = bp->hwrm_cmd_resp_addr;
8961 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
8962 mutex_lock(&bp->hwrm_cmd_lock);
8963 if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
8964 temp = resp->temp * 1000; /* display millidegree */
8965 mutex_unlock(&bp->hwrm_cmd_lock);
8966
8967 return sprintf(buf, "%u\n", temp);
8968}
8969static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
8970
8971static struct attribute *bnxt_attrs[] = {
8972 &sensor_dev_attr_temp1_input.dev_attr.attr,
8973 NULL
8974};
8975ATTRIBUTE_GROUPS(bnxt);
8976
8977static void bnxt_hwmon_close(struct bnxt *bp)
8978{
8979 if (bp->hwmon_dev) {
8980 hwmon_device_unregister(bp->hwmon_dev);
8981 bp->hwmon_dev = NULL;
8982 }
8983}
8984
8985static void bnxt_hwmon_open(struct bnxt *bp)
8986{
8987 struct pci_dev *pdev = bp->pdev;
8988
ba642ab7
MC
8989 if (bp->hwmon_dev)
8990 return;
8991
cde49a42
VV
8992 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
8993 DRV_MODULE_NAME, bp,
8994 bnxt_groups);
8995 if (IS_ERR(bp->hwmon_dev)) {
8996 bp->hwmon_dev = NULL;
8997 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
8998 }
8999}
9000#else
9001static void bnxt_hwmon_close(struct bnxt *bp)
9002{
9003}
9004
9005static void bnxt_hwmon_open(struct bnxt *bp)
9006{
9007}
9008#endif
9009
939f7f0c
MC
9010static bool bnxt_eee_config_ok(struct bnxt *bp)
9011{
9012 struct ethtool_eee *eee = &bp->eee;
9013 struct bnxt_link_info *link_info = &bp->link_info;
9014
9015 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
9016 return true;
9017
9018 if (eee->eee_enabled) {
9019 u32 advertising =
9020 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
9021
9022 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9023 eee->eee_enabled = 0;
9024 return false;
9025 }
9026 if (eee->advertised & ~advertising) {
9027 eee->advertised = advertising & eee->supported;
9028 return false;
9029 }
9030 }
9031 return true;
9032}
9033
c0c050c5
MC
9034static int bnxt_update_phy_setting(struct bnxt *bp)
9035{
9036 int rc;
9037 bool update_link = false;
9038 bool update_pause = false;
939f7f0c 9039 bool update_eee = false;
c0c050c5
MC
9040 struct bnxt_link_info *link_info = &bp->link_info;
9041
9042 rc = bnxt_update_link(bp, true);
9043 if (rc) {
9044 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
9045 rc);
9046 return rc;
9047 }
33dac24a
MC
9048 if (!BNXT_SINGLE_PF(bp))
9049 return 0;
9050
c0c050c5 9051 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
c9ee9516
MC
9052 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
9053 link_info->req_flow_ctrl)
c0c050c5
MC
9054 update_pause = true;
9055 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
9056 link_info->force_pause_setting != link_info->req_flow_ctrl)
9057 update_pause = true;
c0c050c5
MC
9058 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9059 if (BNXT_AUTO_MODE(link_info->auto_mode))
9060 update_link = true;
9061 if (link_info->req_link_speed != link_info->force_link_speed)
9062 update_link = true;
de73018f
MC
9063 if (link_info->req_duplex != link_info->duplex_setting)
9064 update_link = true;
c0c050c5
MC
9065 } else {
9066 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
9067 update_link = true;
9068 if (link_info->advertising != link_info->auto_link_speeds)
9069 update_link = true;
c0c050c5
MC
9070 }
9071
16d663a6
MC
9072 /* The last close may have shutdown the link, so need to call
9073 * PHY_CFG to bring it back up.
9074 */
83d8f5e9 9075 if (!bp->link_info.link_up)
16d663a6
MC
9076 update_link = true;
9077
939f7f0c
MC
9078 if (!bnxt_eee_config_ok(bp))
9079 update_eee = true;
9080
c0c050c5 9081 if (update_link)
939f7f0c 9082 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
c0c050c5
MC
9083 else if (update_pause)
9084 rc = bnxt_hwrm_set_pause(bp);
9085 if (rc) {
9086 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
9087 rc);
9088 return rc;
9089 }
9090
9091 return rc;
9092}
9093
11809490
JH
9094/* Common routine to pre-map certain register block to different GRC window.
9095 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
9096 * in PF and 3 windows in VF that can be customized to map in different
9097 * register blocks.
9098 */
9099static void bnxt_preset_reg_win(struct bnxt *bp)
9100{
9101 if (BNXT_PF(bp)) {
9102 /* CAG registers map to GRC window #4 */
9103 writel(BNXT_CAG_REG_BASE,
9104 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
9105 }
9106}
9107
47558acd
MC
9108static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
9109
c0c050c5
MC
9110static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9111{
9112 int rc = 0;
9113
11809490 9114 bnxt_preset_reg_win(bp);
c0c050c5
MC
9115 netif_carrier_off(bp->dev);
9116 if (irq_re_init) {
47558acd
MC
9117 /* Reserve rings now if none were reserved at driver probe. */
9118 rc = bnxt_init_dflt_ring_mode(bp);
9119 if (rc) {
9120 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
9121 return rc;
9122 }
c0c050c5 9123 }
1b3f0b75 9124 rc = bnxt_reserve_rings(bp, irq_re_init);
41e8d798
MC
9125 if (rc)
9126 return rc;
c0c050c5
MC
9127 if ((bp->flags & BNXT_FLAG_RFS) &&
9128 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
9129 /* disable RFS if falling back to INTA */
9130 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
9131 bp->flags &= ~BNXT_FLAG_RFS;
9132 }
9133
9134 rc = bnxt_alloc_mem(bp, irq_re_init);
9135 if (rc) {
9136 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
9137 goto open_err_free_mem;
9138 }
9139
9140 if (irq_re_init) {
9141 bnxt_init_napi(bp);
9142 rc = bnxt_request_irq(bp);
9143 if (rc) {
9144 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
c58387ab 9145 goto open_err_irq;
c0c050c5
MC
9146 }
9147 }
9148
9149 bnxt_enable_napi(bp);
cabfb09d 9150 bnxt_debug_dev_init(bp);
c0c050c5
MC
9151
9152 rc = bnxt_init_nic(bp, irq_re_init);
9153 if (rc) {
9154 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
9155 goto open_err;
9156 }
9157
9158 if (link_re_init) {
e2dc9b6e 9159 mutex_lock(&bp->link_lock);
c0c050c5 9160 rc = bnxt_update_phy_setting(bp);
e2dc9b6e 9161 mutex_unlock(&bp->link_lock);
a1ef4a79 9162 if (rc) {
ba41d46f 9163 netdev_warn(bp->dev, "failed to update phy settings\n");
a1ef4a79
MC
9164 if (BNXT_SINGLE_PF(bp)) {
9165 bp->link_info.phy_retry = true;
9166 bp->link_info.phy_retry_expires =
9167 jiffies + 5 * HZ;
9168 }
9169 }
c0c050c5
MC
9170 }
9171
7cdd5fc3 9172 if (irq_re_init)
ad51b8e9 9173 udp_tunnel_get_rx_info(bp->dev);
c0c050c5 9174
caefe526 9175 set_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
9176 bnxt_enable_int(bp);
9177 /* Enable TX queues */
9178 bnxt_tx_enable(bp);
9179 mod_timer(&bp->timer, jiffies + bp->current_interval);
10289bec
MC
9180 /* Poll link status and check for SFP+ module status */
9181 bnxt_get_port_module_status(bp);
c0c050c5 9182
ee5c7fb3
SP
9183 /* VF-reps may need to be re-opened after the PF is re-opened */
9184 if (BNXT_PF(bp))
9185 bnxt_vf_reps_open(bp);
c0c050c5
MC
9186 return 0;
9187
9188open_err:
cabfb09d 9189 bnxt_debug_dev_exit(bp);
c0c050c5 9190 bnxt_disable_napi(bp);
c58387ab
VG
9191
9192open_err_irq:
c0c050c5
MC
9193 bnxt_del_napi(bp);
9194
9195open_err_free_mem:
9196 bnxt_free_skbs(bp);
9197 bnxt_free_irq(bp);
9198 bnxt_free_mem(bp, true);
9199 return rc;
9200}
9201
9202/* rtnl_lock held */
9203int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9204{
9205 int rc = 0;
9206
9207 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
9208 if (rc) {
9209 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
9210 dev_close(bp->dev);
9211 }
9212 return rc;
9213}
9214
f7dc1ea6
MC
9215/* rtnl_lock held, open the NIC half way by allocating all resources, but
9216 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
9217 * self tests.
9218 */
9219int bnxt_half_open_nic(struct bnxt *bp)
9220{
9221 int rc = 0;
9222
9223 rc = bnxt_alloc_mem(bp, false);
9224 if (rc) {
9225 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
9226 goto half_open_err;
9227 }
9228 rc = bnxt_init_nic(bp, false);
9229 if (rc) {
9230 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
9231 goto half_open_err;
9232 }
9233 return 0;
9234
9235half_open_err:
9236 bnxt_free_skbs(bp);
9237 bnxt_free_mem(bp, false);
9238 dev_close(bp->dev);
9239 return rc;
9240}
9241
9242/* rtnl_lock held, this call can only be made after a previous successful
9243 * call to bnxt_half_open_nic().
9244 */
9245void bnxt_half_close_nic(struct bnxt *bp)
9246{
9247 bnxt_hwrm_resource_free(bp, false, false);
9248 bnxt_free_skbs(bp);
9249 bnxt_free_mem(bp, false);
9250}
9251
c16d4ee0
MC
9252static void bnxt_reenable_sriov(struct bnxt *bp)
9253{
9254 if (BNXT_PF(bp)) {
9255 struct bnxt_pf_info *pf = &bp->pf;
9256 int n = pf->active_vfs;
9257
9258 if (n)
9259 bnxt_cfg_hw_sriov(bp, &n, true);
9260 }
9261}
9262
c0c050c5
MC
9263static int bnxt_open(struct net_device *dev)
9264{
9265 struct bnxt *bp = netdev_priv(dev);
25e1acd6 9266 int rc;
c0c050c5 9267
ec5d31e3
MC
9268 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
9269 netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n");
9270 return -ENODEV;
9271 }
9272
9273 rc = bnxt_hwrm_if_change(bp, true);
25e1acd6 9274 if (rc)
ec5d31e3
MC
9275 return rc;
9276 rc = __bnxt_open_nic(bp, true, true);
9277 if (rc) {
25e1acd6 9278 bnxt_hwrm_if_change(bp, false);
ec5d31e3 9279 } else {
f3a6d206 9280 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
12de2ead 9281 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
f3a6d206 9282 bnxt_ulp_start(bp, 0);
12de2ead
MC
9283 bnxt_reenable_sriov(bp);
9284 }
ec5d31e3
MC
9285 }
9286 bnxt_hwmon_open(bp);
9287 }
cde49a42 9288
25e1acd6 9289 return rc;
c0c050c5
MC
9290}
9291
f9b76ebd
MC
9292static bool bnxt_drv_busy(struct bnxt *bp)
9293{
9294 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
9295 test_bit(BNXT_STATE_READ_STATS, &bp->state));
9296}
9297
b8875ca3
MC
9298static void bnxt_get_ring_stats(struct bnxt *bp,
9299 struct rtnl_link_stats64 *stats);
9300
86e953db
MC
9301static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
9302 bool link_re_init)
c0c050c5 9303{
ee5c7fb3
SP
9304 /* Close the VF-reps before closing PF */
9305 if (BNXT_PF(bp))
9306 bnxt_vf_reps_close(bp);
86e953db 9307
c0c050c5
MC
9308 /* Change device state to avoid TX queue wake up's */
9309 bnxt_tx_disable(bp);
9310
caefe526 9311 clear_bit(BNXT_STATE_OPEN, &bp->state);
4cebdcec 9312 smp_mb__after_atomic();
f9b76ebd 9313 while (bnxt_drv_busy(bp))
4cebdcec 9314 msleep(20);
c0c050c5 9315
9d8bc097 9316 /* Flush rings and and disable interrupts */
c0c050c5
MC
9317 bnxt_shutdown_nic(bp, irq_re_init);
9318
9319 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
9320
cabfb09d 9321 bnxt_debug_dev_exit(bp);
c0c050c5 9322 bnxt_disable_napi(bp);
c0c050c5
MC
9323 del_timer_sync(&bp->timer);
9324 bnxt_free_skbs(bp);
9325
b8875ca3
MC
9326 /* Save ring stats before shutdown */
9327 if (bp->bnapi)
9328 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
c0c050c5
MC
9329 if (irq_re_init) {
9330 bnxt_free_irq(bp);
9331 bnxt_del_napi(bp);
9332 }
9333 bnxt_free_mem(bp, irq_re_init);
86e953db
MC
9334}
9335
9336int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9337{
9338 int rc = 0;
9339
3bc7d4a3
MC
9340 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
9341 /* If we get here, it means firmware reset is in progress
9342 * while we are trying to close. We can safely proceed with
9343 * the close because we are holding rtnl_lock(). Some firmware
9344 * messages may fail as we proceed to close. We set the
9345 * ABORT_ERR flag here so that the FW reset thread will later
9346 * abort when it gets the rtnl_lock() and sees the flag.
9347 */
9348 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
9349 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9350 }
9351
86e953db
MC
9352#ifdef CONFIG_BNXT_SRIOV
9353 if (bp->sriov_cfg) {
9354 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
9355 !bp->sriov_cfg,
9356 BNXT_SRIOV_CFG_WAIT_TMO);
9357 if (rc)
9358 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
9359 }
9360#endif
9361 __bnxt_close_nic(bp, irq_re_init, link_re_init);
c0c050c5
MC
9362 return rc;
9363}
9364
9365static int bnxt_close(struct net_device *dev)
9366{
9367 struct bnxt *bp = netdev_priv(dev);
9368
cde49a42 9369 bnxt_hwmon_close(bp);
c0c050c5 9370 bnxt_close_nic(bp, true, true);
33f7d55f 9371 bnxt_hwrm_shutdown_link(bp);
25e1acd6 9372 bnxt_hwrm_if_change(bp, false);
c0c050c5
MC
9373 return 0;
9374}
9375
0ca12be9
VV
9376static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
9377 u16 *val)
9378{
9379 struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
9380 struct hwrm_port_phy_mdio_read_input req = {0};
9381 int rc;
9382
9383 if (bp->hwrm_spec_code < 0x10a00)
9384 return -EOPNOTSUPP;
9385
9386 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
9387 req.port_id = cpu_to_le16(bp->pf.port_id);
9388 req.phy_addr = phy_addr;
9389 req.reg_addr = cpu_to_le16(reg & 0x1f);
2730214d 9390 if (mdio_phy_id_is_c45(phy_addr)) {
0ca12be9
VV
9391 req.cl45_mdio = 1;
9392 req.phy_addr = mdio_phy_id_prtad(phy_addr);
9393 req.dev_addr = mdio_phy_id_devad(phy_addr);
9394 req.reg_addr = cpu_to_le16(reg);
9395 }
9396
9397 mutex_lock(&bp->hwrm_cmd_lock);
9398 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9399 if (!rc)
9400 *val = le16_to_cpu(resp->reg_data);
9401 mutex_unlock(&bp->hwrm_cmd_lock);
9402 return rc;
9403}
9404
9405static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
9406 u16 val)
9407{
9408 struct hwrm_port_phy_mdio_write_input req = {0};
9409
9410 if (bp->hwrm_spec_code < 0x10a00)
9411 return -EOPNOTSUPP;
9412
9413 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
9414 req.port_id = cpu_to_le16(bp->pf.port_id);
9415 req.phy_addr = phy_addr;
9416 req.reg_addr = cpu_to_le16(reg & 0x1f);
2730214d 9417 if (mdio_phy_id_is_c45(phy_addr)) {
0ca12be9
VV
9418 req.cl45_mdio = 1;
9419 req.phy_addr = mdio_phy_id_prtad(phy_addr);
9420 req.dev_addr = mdio_phy_id_devad(phy_addr);
9421 req.reg_addr = cpu_to_le16(reg);
9422 }
9423 req.reg_data = cpu_to_le16(val);
9424
9425 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9426}
9427
c0c050c5
MC
9428/* rtnl_lock held */
9429static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9430{
0ca12be9
VV
9431 struct mii_ioctl_data *mdio = if_mii(ifr);
9432 struct bnxt *bp = netdev_priv(dev);
9433 int rc;
9434
c0c050c5
MC
9435 switch (cmd) {
9436 case SIOCGMIIPHY:
0ca12be9
VV
9437 mdio->phy_id = bp->link_info.phy_addr;
9438
c0c050c5
MC
9439 /* fallthru */
9440 case SIOCGMIIREG: {
0ca12be9
VV
9441 u16 mii_regval = 0;
9442
c0c050c5
MC
9443 if (!netif_running(dev))
9444 return -EAGAIN;
9445
0ca12be9
VV
9446 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
9447 &mii_regval);
9448 mdio->val_out = mii_regval;
9449 return rc;
c0c050c5
MC
9450 }
9451
9452 case SIOCSMIIREG:
9453 if (!netif_running(dev))
9454 return -EAGAIN;
9455
0ca12be9
VV
9456 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
9457 mdio->val_in);
c0c050c5
MC
9458
9459 default:
9460 /* do nothing */
9461 break;
9462 }
9463 return -EOPNOTSUPP;
9464}
9465
b8875ca3
MC
9466static void bnxt_get_ring_stats(struct bnxt *bp,
9467 struct rtnl_link_stats64 *stats)
c0c050c5 9468{
b8875ca3 9469 int i;
c0c050c5 9470
c0c050c5 9471
c0c050c5
MC
9472 for (i = 0; i < bp->cp_nr_rings; i++) {
9473 struct bnxt_napi *bnapi = bp->bnapi[i];
9474 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
9475 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
9476
9477 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
9478 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
9479 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
9480
9481 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
9482 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
9483 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
9484
9485 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
9486 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
9487 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
9488
9489 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
9490 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
9491 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
9492
9493 stats->rx_missed_errors +=
9494 le64_to_cpu(hw_stats->rx_discard_pkts);
9495
9496 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
9497
c0c050c5
MC
9498 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
9499 }
b8875ca3
MC
9500}
9501
9502static void bnxt_add_prev_stats(struct bnxt *bp,
9503 struct rtnl_link_stats64 *stats)
9504{
9505 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
9506
9507 stats->rx_packets += prev_stats->rx_packets;
9508 stats->tx_packets += prev_stats->tx_packets;
9509 stats->rx_bytes += prev_stats->rx_bytes;
9510 stats->tx_bytes += prev_stats->tx_bytes;
9511 stats->rx_missed_errors += prev_stats->rx_missed_errors;
9512 stats->multicast += prev_stats->multicast;
9513 stats->tx_dropped += prev_stats->tx_dropped;
9514}
9515
9516static void
9517bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
9518{
9519 struct bnxt *bp = netdev_priv(dev);
9520
9521 set_bit(BNXT_STATE_READ_STATS, &bp->state);
9522 /* Make sure bnxt_close_nic() sees that we are reading stats before
9523 * we check the BNXT_STATE_OPEN flag.
9524 */
9525 smp_mb__after_atomic();
9526 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9527 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
9528 *stats = bp->net_stats_prev;
9529 return;
9530 }
9531
9532 bnxt_get_ring_stats(bp, stats);
9533 bnxt_add_prev_stats(bp, stats);
c0c050c5 9534
9947f83f
MC
9535 if (bp->flags & BNXT_FLAG_PORT_STATS) {
9536 struct rx_port_stats *rx = bp->hw_rx_port_stats;
9537 struct tx_port_stats *tx = bp->hw_tx_port_stats;
9538
9539 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
9540 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
9541 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
9542 le64_to_cpu(rx->rx_ovrsz_frames) +
9543 le64_to_cpu(rx->rx_runt_frames);
9544 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
9545 le64_to_cpu(rx->rx_jbr_frames);
9546 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
9547 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
9548 stats->tx_errors = le64_to_cpu(tx->tx_err);
9549 }
f9b76ebd 9550 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
c0c050c5
MC
9551}
9552
9553static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
9554{
9555 struct net_device *dev = bp->dev;
9556 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9557 struct netdev_hw_addr *ha;
9558 u8 *haddr;
9559 int mc_count = 0;
9560 bool update = false;
9561 int off = 0;
9562
9563 netdev_for_each_mc_addr(ha, dev) {
9564 if (mc_count >= BNXT_MAX_MC_ADDRS) {
9565 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9566 vnic->mc_list_count = 0;
9567 return false;
9568 }
9569 haddr = ha->addr;
9570 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
9571 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
9572 update = true;
9573 }
9574 off += ETH_ALEN;
9575 mc_count++;
9576 }
9577 if (mc_count)
9578 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
9579
9580 if (mc_count != vnic->mc_list_count) {
9581 vnic->mc_list_count = mc_count;
9582 update = true;
9583 }
9584 return update;
9585}
9586
9587static bool bnxt_uc_list_updated(struct bnxt *bp)
9588{
9589 struct net_device *dev = bp->dev;
9590 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9591 struct netdev_hw_addr *ha;
9592 int off = 0;
9593
9594 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
9595 return true;
9596
9597 netdev_for_each_uc_addr(ha, dev) {
9598 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
9599 return true;
9600
9601 off += ETH_ALEN;
9602 }
9603 return false;
9604}
9605
9606static void bnxt_set_rx_mode(struct net_device *dev)
9607{
9608 struct bnxt *bp = netdev_priv(dev);
268d0895 9609 struct bnxt_vnic_info *vnic;
c0c050c5
MC
9610 bool mc_update = false;
9611 bool uc_update;
268d0895 9612 u32 mask;
c0c050c5 9613
268d0895 9614 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
c0c050c5
MC
9615 return;
9616
268d0895
MC
9617 vnic = &bp->vnic_info[0];
9618 mask = vnic->rx_mask;
c0c050c5
MC
9619 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
9620 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
30e33848
MC
9621 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
9622 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
c0c050c5 9623
17c71ac3 9624 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
c0c050c5
MC
9625 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
9626
9627 uc_update = bnxt_uc_list_updated(bp);
9628
30e33848
MC
9629 if (dev->flags & IFF_BROADCAST)
9630 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
c0c050c5
MC
9631 if (dev->flags & IFF_ALLMULTI) {
9632 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9633 vnic->mc_list_count = 0;
9634 } else {
9635 mc_update = bnxt_mc_list_updated(bp, &mask);
9636 }
9637
9638 if (mask != vnic->rx_mask || uc_update || mc_update) {
9639 vnic->rx_mask = mask;
9640
9641 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
c213eae8 9642 bnxt_queue_sp_work(bp);
c0c050c5
MC
9643 }
9644}
9645
b664f008 9646static int bnxt_cfg_rx_mode(struct bnxt *bp)
c0c050c5
MC
9647{
9648 struct net_device *dev = bp->dev;
9649 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9650 struct netdev_hw_addr *ha;
9651 int i, off = 0, rc;
9652 bool uc_update;
9653
9654 netif_addr_lock_bh(dev);
9655 uc_update = bnxt_uc_list_updated(bp);
9656 netif_addr_unlock_bh(dev);
9657
9658 if (!uc_update)
9659 goto skip_uc;
9660
9661 mutex_lock(&bp->hwrm_cmd_lock);
9662 for (i = 1; i < vnic->uc_filter_count; i++) {
9663 struct hwrm_cfa_l2_filter_free_input req = {0};
9664
9665 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
9666 -1);
9667
9668 req.l2_filter_id = vnic->fw_l2_filter_id[i];
9669
9670 rc = _hwrm_send_message(bp, &req, sizeof(req),
9671 HWRM_CMD_TIMEOUT);
9672 }
9673 mutex_unlock(&bp->hwrm_cmd_lock);
9674
9675 vnic->uc_filter_count = 1;
9676
9677 netif_addr_lock_bh(dev);
9678 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
9679 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
9680 } else {
9681 netdev_for_each_uc_addr(ha, dev) {
9682 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
9683 off += ETH_ALEN;
9684 vnic->uc_filter_count++;
9685 }
9686 }
9687 netif_addr_unlock_bh(dev);
9688
9689 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
9690 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
9691 if (rc) {
9692 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
9693 rc);
9694 vnic->uc_filter_count = i;
b664f008 9695 return rc;
c0c050c5
MC
9696 }
9697 }
9698
9699skip_uc:
9700 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
b4e30e8e
MC
9701 if (rc && vnic->mc_list_count) {
9702 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
9703 rc);
9704 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9705 vnic->mc_list_count = 0;
9706 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
9707 }
c0c050c5 9708 if (rc)
b4e30e8e 9709 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
c0c050c5 9710 rc);
b664f008
MC
9711
9712 return rc;
c0c050c5
MC
9713}
9714
2773dfb2
MC
9715static bool bnxt_can_reserve_rings(struct bnxt *bp)
9716{
9717#ifdef CONFIG_BNXT_SRIOV
f1ca94de 9718 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
2773dfb2
MC
9719 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9720
9721 /* No minimum rings were provisioned by the PF. Don't
9722 * reserve rings by default when device is down.
9723 */
9724 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
9725 return true;
9726
9727 if (!netif_running(bp->dev))
9728 return false;
9729 }
9730#endif
9731 return true;
9732}
9733
8079e8f1
MC
9734/* If the chip and firmware supports RFS */
9735static bool bnxt_rfs_supported(struct bnxt *bp)
9736{
e969ae5b 9737 if (bp->flags & BNXT_FLAG_CHIP_P5) {
41136ab3 9738 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
e969ae5b 9739 return true;
41e8d798 9740 return false;
e969ae5b 9741 }
8079e8f1
MC
9742 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
9743 return true;
ae10ae74
MC
9744 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
9745 return true;
8079e8f1
MC
9746 return false;
9747}
9748
9749/* If runtime conditions support RFS */
2bcfa6f6
MC
9750static bool bnxt_rfs_capable(struct bnxt *bp)
9751{
9752#ifdef CONFIG_RFS_ACCEL
8079e8f1 9753 int vnics, max_vnics, max_rss_ctxs;
2bcfa6f6 9754
41e8d798 9755 if (bp->flags & BNXT_FLAG_CHIP_P5)
ac33906c 9756 return bnxt_rfs_supported(bp);
2773dfb2 9757 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
2bcfa6f6
MC
9758 return false;
9759
9760 vnics = 1 + bp->rx_nr_rings;
8079e8f1
MC
9761 max_vnics = bnxt_get_max_func_vnics(bp);
9762 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
ae10ae74
MC
9763
9764 /* RSS contexts not a limiting factor */
9765 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
9766 max_rss_ctxs = max_vnics;
8079e8f1 9767 if (vnics > max_vnics || vnics > max_rss_ctxs) {
6a1eef5b
MC
9768 if (bp->rx_nr_rings > 1)
9769 netdev_warn(bp->dev,
9770 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
9771 min(max_rss_ctxs - 1, max_vnics - 1));
2bcfa6f6 9772 return false;
a2304909 9773 }
2bcfa6f6 9774
f1ca94de 9775 if (!BNXT_NEW_RM(bp))
6a1eef5b
MC
9776 return true;
9777
9778 if (vnics == bp->hw_resc.resv_vnics)
9779 return true;
9780
780baad4 9781 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
6a1eef5b
MC
9782 if (vnics <= bp->hw_resc.resv_vnics)
9783 return true;
9784
9785 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
780baad4 9786 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
6a1eef5b 9787 return false;
2bcfa6f6
MC
9788#else
9789 return false;
9790#endif
9791}
9792
c0c050c5
MC
9793static netdev_features_t bnxt_fix_features(struct net_device *dev,
9794 netdev_features_t features)
9795{
2bcfa6f6
MC
9796 struct bnxt *bp = netdev_priv(dev);
9797
a2304909 9798 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
2bcfa6f6 9799 features &= ~NETIF_F_NTUPLE;
5a9f6b23 9800
1054aee8
MC
9801 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
9802 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
9803
9804 if (!(features & NETIF_F_GRO))
9805 features &= ~NETIF_F_GRO_HW;
9806
9807 if (features & NETIF_F_GRO_HW)
9808 features &= ~NETIF_F_LRO;
9809
5a9f6b23
MC
9810 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
9811 * turned on or off together.
9812 */
9813 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
9814 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
9815 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
9816 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
9817 NETIF_F_HW_VLAN_STAG_RX);
9818 else
9819 features |= NETIF_F_HW_VLAN_CTAG_RX |
9820 NETIF_F_HW_VLAN_STAG_RX;
9821 }
cf6645f8
MC
9822#ifdef CONFIG_BNXT_SRIOV
9823 if (BNXT_VF(bp)) {
9824 if (bp->vf.vlan) {
9825 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
9826 NETIF_F_HW_VLAN_STAG_RX);
9827 }
9828 }
9829#endif
c0c050c5
MC
9830 return features;
9831}
9832
9833static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
9834{
9835 struct bnxt *bp = netdev_priv(dev);
9836 u32 flags = bp->flags;
9837 u32 changes;
9838 int rc = 0;
9839 bool re_init = false;
9840 bool update_tpa = false;
9841
9842 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
1054aee8 9843 if (features & NETIF_F_GRO_HW)
c0c050c5 9844 flags |= BNXT_FLAG_GRO;
1054aee8 9845 else if (features & NETIF_F_LRO)
c0c050c5
MC
9846 flags |= BNXT_FLAG_LRO;
9847
bdbd1eb5
MC
9848 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
9849 flags &= ~BNXT_FLAG_TPA;
9850
c0c050c5
MC
9851 if (features & NETIF_F_HW_VLAN_CTAG_RX)
9852 flags |= BNXT_FLAG_STRIP_VLAN;
9853
9854 if (features & NETIF_F_NTUPLE)
9855 flags |= BNXT_FLAG_RFS;
9856
9857 changes = flags ^ bp->flags;
9858 if (changes & BNXT_FLAG_TPA) {
9859 update_tpa = true;
9860 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
f45b7b78
MC
9861 (flags & BNXT_FLAG_TPA) == 0 ||
9862 (bp->flags & BNXT_FLAG_CHIP_P5))
c0c050c5
MC
9863 re_init = true;
9864 }
9865
9866 if (changes & ~BNXT_FLAG_TPA)
9867 re_init = true;
9868
9869 if (flags != bp->flags) {
9870 u32 old_flags = bp->flags;
9871
2bcfa6f6 9872 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
f45b7b78 9873 bp->flags = flags;
c0c050c5
MC
9874 if (update_tpa)
9875 bnxt_set_ring_params(bp);
9876 return rc;
9877 }
9878
9879 if (re_init) {
9880 bnxt_close_nic(bp, false, false);
f45b7b78 9881 bp->flags = flags;
c0c050c5
MC
9882 if (update_tpa)
9883 bnxt_set_ring_params(bp);
9884
9885 return bnxt_open_nic(bp, false, false);
9886 }
9887 if (update_tpa) {
f45b7b78 9888 bp->flags = flags;
c0c050c5
MC
9889 rc = bnxt_set_tpa(bp,
9890 (flags & BNXT_FLAG_TPA) ?
9891 true : false);
9892 if (rc)
9893 bp->flags = old_flags;
9894 }
9895 }
9896 return rc;
9897}
9898
ffd77621
MC
9899static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
9900 u32 ring_id, u32 *prod, u32 *cons)
9901{
9902 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
9903 struct hwrm_dbg_ring_info_get_input req = {0};
9904 int rc;
9905
9906 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
9907 req.ring_type = ring_type;
9908 req.fw_ring_id = cpu_to_le32(ring_id);
9909 mutex_lock(&bp->hwrm_cmd_lock);
9910 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9911 if (!rc) {
9912 *prod = le32_to_cpu(resp->producer_index);
9913 *cons = le32_to_cpu(resp->consumer_index);
9914 }
9915 mutex_unlock(&bp->hwrm_cmd_lock);
9916 return rc;
9917}
9918
9f554590
MC
9919static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
9920{
b6ab4b01 9921 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
9f554590
MC
9922 int i = bnapi->index;
9923
3b2b7d9d
MC
9924 if (!txr)
9925 return;
9926
9f554590
MC
9927 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
9928 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
9929 txr->tx_cons);
9930}
9931
9932static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
9933{
b6ab4b01 9934 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
9f554590
MC
9935 int i = bnapi->index;
9936
3b2b7d9d
MC
9937 if (!rxr)
9938 return;
9939
9f554590
MC
9940 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
9941 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
9942 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
9943 rxr->rx_sw_agg_prod);
9944}
9945
9946static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
9947{
9948 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
9949 int i = bnapi->index;
9950
9951 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
9952 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
9953}
9954
c0c050c5
MC
9955static void bnxt_dbg_dump_states(struct bnxt *bp)
9956{
9957 int i;
9958 struct bnxt_napi *bnapi;
c0c050c5
MC
9959
9960 for (i = 0; i < bp->cp_nr_rings; i++) {
9961 bnapi = bp->bnapi[i];
c0c050c5 9962 if (netif_msg_drv(bp)) {
9f554590
MC
9963 bnxt_dump_tx_sw_state(bnapi);
9964 bnxt_dump_rx_sw_state(bnapi);
9965 bnxt_dump_cp_sw_state(bnapi);
c0c050c5
MC
9966 }
9967 }
9968}
9969
6988bd92 9970static void bnxt_reset_task(struct bnxt *bp, bool silent)
c0c050c5 9971{
6988bd92
MC
9972 if (!silent)
9973 bnxt_dbg_dump_states(bp);
028de140 9974 if (netif_running(bp->dev)) {
b386cd36
MC
9975 int rc;
9976
aa46dfff
VV
9977 if (silent) {
9978 bnxt_close_nic(bp, false, false);
9979 bnxt_open_nic(bp, false, false);
9980 } else {
b386cd36 9981 bnxt_ulp_stop(bp);
aa46dfff
VV
9982 bnxt_close_nic(bp, true, false);
9983 rc = bnxt_open_nic(bp, true, false);
9984 bnxt_ulp_start(bp, rc);
9985 }
028de140 9986 }
c0c050c5
MC
9987}
9988
0290bd29 9989static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
c0c050c5
MC
9990{
9991 struct bnxt *bp = netdev_priv(dev);
9992
9993 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
9994 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
c213eae8 9995 bnxt_queue_sp_work(bp);
c0c050c5
MC
9996}
9997
acfb50e4
VV
9998static void bnxt_fw_health_check(struct bnxt *bp)
9999{
10000 struct bnxt_fw_health *fw_health = bp->fw_health;
10001 u32 val;
10002
0797c10d 10003 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
acfb50e4
VV
10004 return;
10005
10006 if (fw_health->tmr_counter) {
10007 fw_health->tmr_counter--;
10008 return;
10009 }
10010
10011 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
10012 if (val == fw_health->last_fw_heartbeat)
10013 goto fw_reset;
10014
10015 fw_health->last_fw_heartbeat = val;
10016
10017 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
10018 if (val != fw_health->last_fw_reset_cnt)
10019 goto fw_reset;
10020
10021 fw_health->tmr_counter = fw_health->tmr_multiplier;
10022 return;
10023
10024fw_reset:
10025 set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
10026 bnxt_queue_sp_work(bp);
10027}
10028
e99e88a9 10029static void bnxt_timer(struct timer_list *t)
c0c050c5 10030{
e99e88a9 10031 struct bnxt *bp = from_timer(bp, t, timer);
c0c050c5
MC
10032 struct net_device *dev = bp->dev;
10033
10034 if (!netif_running(dev))
10035 return;
10036
10037 if (atomic_read(&bp->intr_sem) != 0)
10038 goto bnxt_restart_timer;
10039
acfb50e4
VV
10040 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
10041 bnxt_fw_health_check(bp);
10042
adcc331e
MC
10043 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
10044 bp->stats_coal_ticks) {
3bdf56c4 10045 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
c213eae8 10046 bnxt_queue_sp_work(bp);
3bdf56c4 10047 }
5a84acbe
SP
10048
10049 if (bnxt_tc_flower_enabled(bp)) {
10050 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
10051 bnxt_queue_sp_work(bp);
10052 }
a1ef4a79 10053
87d67f59
PC
10054#ifdef CONFIG_RFS_ACCEL
10055 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
10056 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
10057 bnxt_queue_sp_work(bp);
10058 }
10059#endif /*CONFIG_RFS_ACCEL*/
10060
a1ef4a79
MC
10061 if (bp->link_info.phy_retry) {
10062 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
acda6180 10063 bp->link_info.phy_retry = false;
a1ef4a79
MC
10064 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
10065 } else {
10066 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
10067 bnxt_queue_sp_work(bp);
10068 }
10069 }
ffd77621 10070
5313845f
MC
10071 if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
10072 netif_carrier_ok(dev)) {
ffd77621
MC
10073 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
10074 bnxt_queue_sp_work(bp);
10075 }
c0c050c5
MC
10076bnxt_restart_timer:
10077 mod_timer(&bp->timer, jiffies + bp->current_interval);
10078}
10079
a551ee94 10080static void bnxt_rtnl_lock_sp(struct bnxt *bp)
6988bd92 10081{
a551ee94
MC
10082 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
10083 * set. If the device is being closed, bnxt_close() may be holding
6988bd92
MC
10084 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
10085 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
10086 */
10087 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10088 rtnl_lock();
a551ee94
MC
10089}
10090
10091static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
10092{
6988bd92
MC
10093 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10094 rtnl_unlock();
10095}
10096
a551ee94
MC
10097/* Only called from bnxt_sp_task() */
10098static void bnxt_reset(struct bnxt *bp, bool silent)
10099{
10100 bnxt_rtnl_lock_sp(bp);
10101 if (test_bit(BNXT_STATE_OPEN, &bp->state))
10102 bnxt_reset_task(bp, silent);
10103 bnxt_rtnl_unlock_sp(bp);
10104}
10105
230d1f0d
MC
10106static void bnxt_fw_reset_close(struct bnxt *bp)
10107{
f3a6d206 10108 bnxt_ulp_stop(bp);
d4073028
VV
10109 /* When firmware is fatal state, disable PCI device to prevent
10110 * any potential bad DMAs before freeing kernel memory.
10111 */
10112 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
10113 pci_disable_device(bp->pdev);
230d1f0d 10114 __bnxt_close_nic(bp, true, false);
230d1f0d
MC
10115 bnxt_clear_int_mode(bp);
10116 bnxt_hwrm_func_drv_unrgtr(bp);
d4073028
VV
10117 if (pci_is_enabled(bp->pdev))
10118 pci_disable_device(bp->pdev);
230d1f0d
MC
10119 bnxt_free_ctx_mem(bp);
10120 kfree(bp->ctx);
10121 bp->ctx = NULL;
10122}
10123
acfb50e4
VV
10124static bool is_bnxt_fw_ok(struct bnxt *bp)
10125{
10126 struct bnxt_fw_health *fw_health = bp->fw_health;
10127 bool no_heartbeat = false, has_reset = false;
10128 u32 val;
10129
10130 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
10131 if (val == fw_health->last_fw_heartbeat)
10132 no_heartbeat = true;
10133
10134 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
10135 if (val != fw_health->last_fw_reset_cnt)
10136 has_reset = true;
10137
10138 if (!no_heartbeat && has_reset)
10139 return true;
10140
10141 return false;
10142}
10143
d1db9e16
MC
10144/* rtnl_lock is acquired before calling this function */
10145static void bnxt_force_fw_reset(struct bnxt *bp)
10146{
10147 struct bnxt_fw_health *fw_health = bp->fw_health;
10148 u32 wait_dsecs;
10149
10150 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
10151 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10152 return;
10153
10154 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10155 bnxt_fw_reset_close(bp);
10156 wait_dsecs = fw_health->master_func_wait_dsecs;
10157 if (fw_health->master) {
10158 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
10159 wait_dsecs = 0;
10160 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
10161 } else {
10162 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
10163 wait_dsecs = fw_health->normal_func_wait_dsecs;
10164 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10165 }
4037eb71
VV
10166
10167 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
d1db9e16
MC
10168 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
10169 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
10170}
10171
10172void bnxt_fw_exception(struct bnxt *bp)
10173{
a2b31e27 10174 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
d1db9e16
MC
10175 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
10176 bnxt_rtnl_lock_sp(bp);
10177 bnxt_force_fw_reset(bp);
10178 bnxt_rtnl_unlock_sp(bp);
10179}
10180
e72cb7d6
MC
10181/* Returns the number of registered VFs, or 1 if VF configuration is pending, or
10182 * < 0 on error.
10183 */
10184static int bnxt_get_registered_vfs(struct bnxt *bp)
230d1f0d 10185{
e72cb7d6 10186#ifdef CONFIG_BNXT_SRIOV
230d1f0d
MC
10187 int rc;
10188
e72cb7d6
MC
10189 if (!BNXT_PF(bp))
10190 return 0;
10191
10192 rc = bnxt_hwrm_func_qcfg(bp);
10193 if (rc) {
10194 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
10195 return rc;
10196 }
10197 if (bp->pf.registered_vfs)
10198 return bp->pf.registered_vfs;
10199 if (bp->sriov_cfg)
10200 return 1;
10201#endif
10202 return 0;
10203}
10204
10205void bnxt_fw_reset(struct bnxt *bp)
10206{
230d1f0d
MC
10207 bnxt_rtnl_lock_sp(bp);
10208 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
10209 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
4037eb71 10210 int n = 0, tmo;
e72cb7d6 10211
230d1f0d 10212 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
e72cb7d6
MC
10213 if (bp->pf.active_vfs &&
10214 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
10215 n = bnxt_get_registered_vfs(bp);
10216 if (n < 0) {
10217 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
10218 n);
10219 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10220 dev_close(bp->dev);
10221 goto fw_reset_exit;
10222 } else if (n > 0) {
10223 u16 vf_tmo_dsecs = n * 10;
10224
10225 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
10226 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
10227 bp->fw_reset_state =
10228 BNXT_FW_RESET_STATE_POLL_VF;
10229 bnxt_queue_fw_reset_work(bp, HZ / 10);
10230 goto fw_reset_exit;
230d1f0d
MC
10231 }
10232 bnxt_fw_reset_close(bp);
4037eb71
VV
10233 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
10234 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
10235 tmo = HZ / 10;
10236 } else {
10237 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10238 tmo = bp->fw_reset_min_dsecs * HZ / 10;
10239 }
10240 bnxt_queue_fw_reset_work(bp, tmo);
230d1f0d
MC
10241 }
10242fw_reset_exit:
10243 bnxt_rtnl_unlock_sp(bp);
10244}
10245
ffd77621
MC
10246static void bnxt_chk_missed_irq(struct bnxt *bp)
10247{
10248 int i;
10249
10250 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
10251 return;
10252
10253 for (i = 0; i < bp->cp_nr_rings; i++) {
10254 struct bnxt_napi *bnapi = bp->bnapi[i];
10255 struct bnxt_cp_ring_info *cpr;
10256 u32 fw_ring_id;
10257 int j;
10258
10259 if (!bnapi)
10260 continue;
10261
10262 cpr = &bnapi->cp_ring;
10263 for (j = 0; j < 2; j++) {
10264 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
10265 u32 val[2];
10266
10267 if (!cpr2 || cpr2->has_more_work ||
10268 !bnxt_has_work(bp, cpr2))
10269 continue;
10270
10271 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
10272 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
10273 continue;
10274 }
10275 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
10276 bnxt_dbg_hwrm_ring_info_get(bp,
10277 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
10278 fw_ring_id, &val[0], &val[1]);
83eb5c5c 10279 cpr->missed_irqs++;
ffd77621
MC
10280 }
10281 }
10282}
10283
c0c050c5
MC
10284static void bnxt_cfg_ntp_filters(struct bnxt *);
10285
8119e49b
MC
10286static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
10287{
10288 struct bnxt_link_info *link_info = &bp->link_info;
10289
10290 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
10291 link_info->autoneg = BNXT_AUTONEG_SPEED;
10292 if (bp->hwrm_spec_code >= 0x10201) {
10293 if (link_info->auto_pause_setting &
10294 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
10295 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10296 } else {
10297 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10298 }
10299 link_info->advertising = link_info->auto_link_speeds;
10300 } else {
10301 link_info->req_link_speed = link_info->force_link_speed;
10302 link_info->req_duplex = link_info->duplex_setting;
10303 }
10304 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
10305 link_info->req_flow_ctrl =
10306 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
10307 else
10308 link_info->req_flow_ctrl = link_info->force_pause_setting;
10309}
10310
c0c050c5
MC
10311static void bnxt_sp_task(struct work_struct *work)
10312{
10313 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
c0c050c5 10314
4cebdcec
MC
10315 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10316 smp_mb__after_atomic();
10317 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10318 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5 10319 return;
4cebdcec 10320 }
c0c050c5
MC
10321
10322 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
10323 bnxt_cfg_rx_mode(bp);
10324
10325 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
10326 bnxt_cfg_ntp_filters(bp);
c0c050c5
MC
10327 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
10328 bnxt_hwrm_exec_fwd_req(bp);
10329 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
10330 bnxt_hwrm_tunnel_dst_port_alloc(
10331 bp, bp->vxlan_port,
10332 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10333 }
10334 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
10335 bnxt_hwrm_tunnel_dst_port_free(
10336 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10337 }
7cdd5fc3
AD
10338 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
10339 bnxt_hwrm_tunnel_dst_port_alloc(
10340 bp, bp->nge_port,
10341 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10342 }
10343 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
10344 bnxt_hwrm_tunnel_dst_port_free(
10345 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10346 }
00db3cba 10347 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
3bdf56c4 10348 bnxt_hwrm_port_qstats(bp);
00db3cba 10349 bnxt_hwrm_port_qstats_ext(bp);
55e4398d 10350 bnxt_hwrm_pcie_qstats(bp);
00db3cba 10351 }
3bdf56c4 10352
0eaa24b9 10353 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
e2dc9b6e 10354 int rc;
0eaa24b9 10355
e2dc9b6e 10356 mutex_lock(&bp->link_lock);
0eaa24b9
MC
10357 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
10358 &bp->sp_event))
10359 bnxt_hwrm_phy_qcaps(bp);
10360
b1613e78
MC
10361 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
10362 &bp->sp_event))
10363 bnxt_init_ethtool_link_settings(bp);
10364
e2dc9b6e
MC
10365 rc = bnxt_update_link(bp, true);
10366 mutex_unlock(&bp->link_lock);
0eaa24b9
MC
10367 if (rc)
10368 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
10369 rc);
10370 }
a1ef4a79
MC
10371 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
10372 int rc;
10373
10374 mutex_lock(&bp->link_lock);
10375 rc = bnxt_update_phy_setting(bp);
10376 mutex_unlock(&bp->link_lock);
10377 if (rc) {
10378 netdev_warn(bp->dev, "update phy settings retry failed\n");
10379 } else {
10380 bp->link_info.phy_retry = false;
10381 netdev_info(bp->dev, "update phy settings retry succeeded\n");
10382 }
10383 }
90c694bb 10384 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
e2dc9b6e
MC
10385 mutex_lock(&bp->link_lock);
10386 bnxt_get_port_module_status(bp);
10387 mutex_unlock(&bp->link_lock);
90c694bb 10388 }
5a84acbe
SP
10389
10390 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
10391 bnxt_tc_flow_stats_work(bp);
10392
ffd77621
MC
10393 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
10394 bnxt_chk_missed_irq(bp);
10395
e2dc9b6e
MC
10396 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
10397 * must be the last functions to be called before exiting.
10398 */
6988bd92
MC
10399 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
10400 bnxt_reset(bp, false);
4cebdcec 10401
fc0f1929
MC
10402 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
10403 bnxt_reset(bp, true);
10404
657a33c8
VV
10405 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
10406 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
10407
acfb50e4
VV
10408 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
10409 if (!is_bnxt_fw_ok(bp))
10410 bnxt_devlink_health_report(bp,
10411 BNXT_FW_EXCEPTION_SP_EVENT);
10412 }
10413
4cebdcec
MC
10414 smp_mb__before_atomic();
10415 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5
MC
10416}
10417
d1e7925e 10418/* Under rtnl_lock */
98fdbe73
MC
10419int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
10420 int tx_xdp)
d1e7925e
MC
10421{
10422 int max_rx, max_tx, tx_sets = 1;
780baad4 10423 int tx_rings_needed, stats;
8f23d638 10424 int rx_rings = rx;
6fc2ffdf 10425 int cp, vnics, rc;
d1e7925e 10426
d1e7925e
MC
10427 if (tcs)
10428 tx_sets = tcs;
10429
10430 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
10431 if (rc)
10432 return rc;
10433
10434 if (max_rx < rx)
10435 return -ENOMEM;
10436
5f449249 10437 tx_rings_needed = tx * tx_sets + tx_xdp;
d1e7925e
MC
10438 if (max_tx < tx_rings_needed)
10439 return -ENOMEM;
10440
6fc2ffdf 10441 vnics = 1;
9b3d15e6 10442 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
6fc2ffdf
EW
10443 vnics += rx_rings;
10444
8f23d638
MC
10445 if (bp->flags & BNXT_FLAG_AGG_RINGS)
10446 rx_rings <<= 1;
10447 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
780baad4
VV
10448 stats = cp;
10449 if (BNXT_NEW_RM(bp)) {
11c3ec7b 10450 cp += bnxt_get_ulp_msix_num(bp);
780baad4
VV
10451 stats += bnxt_get_ulp_stat_ctxs(bp);
10452 }
6fc2ffdf 10453 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
780baad4 10454 stats, vnics);
d1e7925e
MC
10455}
10456
17086399
SP
10457static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
10458{
10459 if (bp->bar2) {
10460 pci_iounmap(pdev, bp->bar2);
10461 bp->bar2 = NULL;
10462 }
10463
10464 if (bp->bar1) {
10465 pci_iounmap(pdev, bp->bar1);
10466 bp->bar1 = NULL;
10467 }
10468
10469 if (bp->bar0) {
10470 pci_iounmap(pdev, bp->bar0);
10471 bp->bar0 = NULL;
10472 }
10473}
10474
10475static void bnxt_cleanup_pci(struct bnxt *bp)
10476{
10477 bnxt_unmap_bars(bp, bp->pdev);
10478 pci_release_regions(bp->pdev);
f6824308
VV
10479 if (pci_is_enabled(bp->pdev))
10480 pci_disable_device(bp->pdev);
17086399
SP
10481}
10482
18775aa8
MC
10483static void bnxt_init_dflt_coal(struct bnxt *bp)
10484{
10485 struct bnxt_coal *coal;
10486
10487 /* Tick values in micro seconds.
10488 * 1 coal_buf x bufs_per_record = 1 completion record.
10489 */
10490 coal = &bp->rx_coal;
0c2ff8d7 10491 coal->coal_ticks = 10;
18775aa8
MC
10492 coal->coal_bufs = 30;
10493 coal->coal_ticks_irq = 1;
10494 coal->coal_bufs_irq = 2;
05abe4dd 10495 coal->idle_thresh = 50;
18775aa8
MC
10496 coal->bufs_per_record = 2;
10497 coal->budget = 64; /* NAPI budget */
10498
10499 coal = &bp->tx_coal;
10500 coal->coal_ticks = 28;
10501 coal->coal_bufs = 30;
10502 coal->coal_ticks_irq = 2;
10503 coal->coal_bufs_irq = 2;
10504 coal->bufs_per_record = 1;
10505
10506 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
10507}
10508
8280b38e
VV
10509static void bnxt_alloc_fw_health(struct bnxt *bp)
10510{
10511 if (bp->fw_health)
10512 return;
10513
10514 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
10515 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
10516 return;
10517
10518 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
10519 if (!bp->fw_health) {
10520 netdev_warn(bp->dev, "Failed to allocate fw_health\n");
10521 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
10522 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
10523 }
10524}
10525
7c380918
MC
10526static int bnxt_fw_init_one_p1(struct bnxt *bp)
10527{
10528 int rc;
10529
10530 bp->fw_cap = 0;
10531 rc = bnxt_hwrm_ver_get(bp);
10532 if (rc)
10533 return rc;
10534
10535 if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
10536 rc = bnxt_alloc_kong_hwrm_resources(bp);
10537 if (rc)
10538 bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
10539 }
10540
10541 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
10542 bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
10543 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
10544 if (rc)
10545 return rc;
10546 }
10547 rc = bnxt_hwrm_func_reset(bp);
10548 if (rc)
10549 return -ENODEV;
10550
10551 bnxt_hwrm_fw_set_time(bp);
10552 return 0;
10553}
10554
10555static int bnxt_fw_init_one_p2(struct bnxt *bp)
10556{
10557 int rc;
10558
10559 /* Get the MAX capabilities for this function */
10560 rc = bnxt_hwrm_func_qcaps(bp);
10561 if (rc) {
10562 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
10563 rc);
10564 return -ENODEV;
10565 }
10566
10567 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
10568 if (rc)
10569 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
10570 rc);
10571
8280b38e 10572 bnxt_alloc_fw_health(bp);
07f83d72
MC
10573 rc = bnxt_hwrm_error_recovery_qcfg(bp);
10574 if (rc)
10575 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
10576 rc);
10577
2e882468 10578 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
7c380918
MC
10579 if (rc)
10580 return -ENODEV;
10581
10582 bnxt_hwrm_func_qcfg(bp);
10583 bnxt_hwrm_vnic_qcaps(bp);
10584 bnxt_hwrm_port_led_qcaps(bp);
10585 bnxt_ethtool_init(bp);
10586 bnxt_dcb_init(bp);
10587 return 0;
10588}
10589
ba642ab7
MC
10590static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
10591{
10592 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
10593 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
10594 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
10595 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
10596 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
c66c06c5 10597 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
ba642ab7
MC
10598 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
10599 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
10600 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
10601 }
10602}
10603
10604static void bnxt_set_dflt_rfs(struct bnxt *bp)
10605{
10606 struct net_device *dev = bp->dev;
10607
10608 dev->hw_features &= ~NETIF_F_NTUPLE;
10609 dev->features &= ~NETIF_F_NTUPLE;
10610 bp->flags &= ~BNXT_FLAG_RFS;
10611 if (bnxt_rfs_supported(bp)) {
10612 dev->hw_features |= NETIF_F_NTUPLE;
10613 if (bnxt_rfs_capable(bp)) {
10614 bp->flags |= BNXT_FLAG_RFS;
10615 dev->features |= NETIF_F_NTUPLE;
10616 }
10617 }
10618}
10619
10620static void bnxt_fw_init_one_p3(struct bnxt *bp)
10621{
10622 struct pci_dev *pdev = bp->pdev;
10623
10624 bnxt_set_dflt_rss_hash_type(bp);
10625 bnxt_set_dflt_rfs(bp);
10626
10627 bnxt_get_wol_settings(bp);
10628 if (bp->flags & BNXT_FLAG_WOL_CAP)
10629 device_set_wakeup_enable(&pdev->dev, bp->wol);
10630 else
10631 device_set_wakeup_capable(&pdev->dev, false);
10632
10633 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
10634 bnxt_hwrm_coal_params_qcaps(bp);
10635}
10636
ec5d31e3
MC
10637static int bnxt_fw_init_one(struct bnxt *bp)
10638{
10639 int rc;
10640
10641 rc = bnxt_fw_init_one_p1(bp);
10642 if (rc) {
10643 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
10644 return rc;
10645 }
10646 rc = bnxt_fw_init_one_p2(bp);
10647 if (rc) {
10648 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
10649 return rc;
10650 }
10651 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
10652 if (rc)
10653 return rc;
937f188c
VV
10654
10655 /* In case fw capabilities have changed, destroy the unneeded
10656 * reporters and create newly capable ones.
10657 */
10658 bnxt_dl_fw_reporters_destroy(bp, false);
10659 bnxt_dl_fw_reporters_create(bp);
ec5d31e3
MC
10660 bnxt_fw_init_one_p3(bp);
10661 return 0;
10662}
10663
cbb51067
MC
10664static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
10665{
10666 struct bnxt_fw_health *fw_health = bp->fw_health;
10667 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
10668 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
10669 u32 reg_type, reg_off, delay_msecs;
10670
10671 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
10672 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
10673 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
10674 switch (reg_type) {
10675 case BNXT_FW_HEALTH_REG_TYPE_CFG:
10676 pci_write_config_dword(bp->pdev, reg_off, val);
10677 break;
10678 case BNXT_FW_HEALTH_REG_TYPE_GRC:
10679 writel(reg_off & BNXT_GRC_BASE_MASK,
10680 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
10681 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
10682 /* fall through */
10683 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
10684 writel(val, bp->bar0 + reg_off);
10685 break;
10686 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
10687 writel(val, bp->bar1 + reg_off);
10688 break;
10689 }
10690 if (delay_msecs) {
10691 pci_read_config_dword(bp->pdev, 0, &val);
10692 msleep(delay_msecs);
10693 }
10694}
10695
10696static void bnxt_reset_all(struct bnxt *bp)
10697{
10698 struct bnxt_fw_health *fw_health = bp->fw_health;
e07ab202
VV
10699 int i, rc;
10700
10701 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
10702#ifdef CONFIG_TEE_BNXT_FW
10703 rc = tee_bnxt_fw_load();
10704 if (rc)
10705 netdev_err(bp->dev, "Unable to reset FW rc=%d\n", rc);
10706 bp->fw_reset_timestamp = jiffies;
10707#endif
10708 return;
10709 }
cbb51067
MC
10710
10711 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
10712 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
10713 bnxt_fw_reset_writel(bp, i);
10714 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
10715 struct hwrm_fw_reset_input req = {0};
cbb51067
MC
10716
10717 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
10718 req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
10719 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
10720 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
10721 req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
10722 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10723 if (rc)
10724 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
10725 }
10726 bp->fw_reset_timestamp = jiffies;
10727}
10728
230d1f0d
MC
10729static void bnxt_fw_reset_task(struct work_struct *work)
10730{
10731 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
10732 int rc;
10733
10734 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10735 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
10736 return;
10737 }
10738
10739 switch (bp->fw_reset_state) {
e72cb7d6
MC
10740 case BNXT_FW_RESET_STATE_POLL_VF: {
10741 int n = bnxt_get_registered_vfs(bp);
4037eb71 10742 int tmo;
e72cb7d6
MC
10743
10744 if (n < 0) {
230d1f0d 10745 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
e72cb7d6 10746 n, jiffies_to_msecs(jiffies -
230d1f0d
MC
10747 bp->fw_reset_timestamp));
10748 goto fw_reset_abort;
e72cb7d6 10749 } else if (n > 0) {
230d1f0d
MC
10750 if (time_after(jiffies, bp->fw_reset_timestamp +
10751 (bp->fw_reset_max_dsecs * HZ / 10))) {
10752 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10753 bp->fw_reset_state = 0;
e72cb7d6
MC
10754 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
10755 n);
230d1f0d
MC
10756 return;
10757 }
10758 bnxt_queue_fw_reset_work(bp, HZ / 10);
10759 return;
10760 }
10761 bp->fw_reset_timestamp = jiffies;
10762 rtnl_lock();
10763 bnxt_fw_reset_close(bp);
4037eb71
VV
10764 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
10765 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
10766 tmo = HZ / 10;
10767 } else {
10768 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10769 tmo = bp->fw_reset_min_dsecs * HZ / 10;
10770 }
230d1f0d 10771 rtnl_unlock();
4037eb71 10772 bnxt_queue_fw_reset_work(bp, tmo);
230d1f0d 10773 return;
e72cb7d6 10774 }
4037eb71
VV
10775 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
10776 u32 val;
10777
10778 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
10779 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
10780 !time_after(jiffies, bp->fw_reset_timestamp +
10781 (bp->fw_reset_max_dsecs * HZ / 10))) {
10782 bnxt_queue_fw_reset_work(bp, HZ / 5);
10783 return;
10784 }
10785
10786 if (!bp->fw_health->master) {
10787 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
10788
10789 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10790 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
10791 return;
10792 }
10793 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
10794 }
10795 /* fall through */
c6a9e7aa 10796 case BNXT_FW_RESET_STATE_RESET_FW:
cbb51067
MC
10797 bnxt_reset_all(bp);
10798 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
c6a9e7aa 10799 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
cbb51067 10800 return;
230d1f0d 10801 case BNXT_FW_RESET_STATE_ENABLE_DEV:
0797c10d 10802 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
d1db9e16
MC
10803 u32 val;
10804
10805 val = bnxt_fw_health_readl(bp,
10806 BNXT_FW_RESET_INPROG_REG);
10807 if (val)
10808 netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n",
10809 val);
10810 }
b4fff207 10811 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
230d1f0d
MC
10812 if (pci_enable_device(bp->pdev)) {
10813 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
10814 goto fw_reset_abort;
10815 }
10816 pci_set_master(bp->pdev);
10817 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
10818 /* fall through */
10819 case BNXT_FW_RESET_STATE_POLL_FW:
10820 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
10821 rc = __bnxt_hwrm_ver_get(bp, true);
10822 if (rc) {
10823 if (time_after(jiffies, bp->fw_reset_timestamp +
10824 (bp->fw_reset_max_dsecs * HZ / 10))) {
10825 netdev_err(bp->dev, "Firmware reset aborted\n");
10826 goto fw_reset_abort;
10827 }
10828 bnxt_queue_fw_reset_work(bp, HZ / 5);
10829 return;
10830 }
10831 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
10832 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
10833 /* fall through */
10834 case BNXT_FW_RESET_STATE_OPENING:
10835 while (!rtnl_trylock()) {
10836 bnxt_queue_fw_reset_work(bp, HZ / 10);
10837 return;
10838 }
10839 rc = bnxt_open(bp->dev);
10840 if (rc) {
10841 netdev_err(bp->dev, "bnxt_open_nic() failed\n");
10842 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10843 dev_close(bp->dev);
10844 }
230d1f0d
MC
10845
10846 bp->fw_reset_state = 0;
10847 /* Make sure fw_reset_state is 0 before clearing the flag */
10848 smp_mb__before_atomic();
10849 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
f3a6d206 10850 bnxt_ulp_start(bp, rc);
12de2ead
MC
10851 if (!rc)
10852 bnxt_reenable_sriov(bp);
737d7a6c 10853 bnxt_dl_health_recovery_done(bp);
e4e38237 10854 bnxt_dl_health_status_update(bp, true);
f3a6d206 10855 rtnl_unlock();
230d1f0d
MC
10856 break;
10857 }
10858 return;
10859
10860fw_reset_abort:
10861 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
e4e38237
VV
10862 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
10863 bnxt_dl_health_status_update(bp, false);
230d1f0d
MC
10864 bp->fw_reset_state = 0;
10865 rtnl_lock();
10866 dev_close(bp->dev);
10867 rtnl_unlock();
10868}
10869
c0c050c5
MC
10870static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
10871{
10872 int rc;
10873 struct bnxt *bp = netdev_priv(dev);
10874
10875 SET_NETDEV_DEV(dev, &pdev->dev);
10876
10877 /* enable device (incl. PCI PM wakeup), and bus-mastering */
10878 rc = pci_enable_device(pdev);
10879 if (rc) {
10880 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
10881 goto init_err;
10882 }
10883
10884 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10885 dev_err(&pdev->dev,
10886 "Cannot find PCI device base address, aborting\n");
10887 rc = -ENODEV;
10888 goto init_err_disable;
10889 }
10890
10891 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10892 if (rc) {
10893 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
10894 goto init_err_disable;
10895 }
10896
10897 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
10898 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
10899 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
10900 goto init_err_disable;
10901 }
10902
10903 pci_set_master(pdev);
10904
10905 bp->dev = dev;
10906 bp->pdev = pdev;
10907
10908 bp->bar0 = pci_ioremap_bar(pdev, 0);
10909 if (!bp->bar0) {
10910 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
10911 rc = -ENOMEM;
10912 goto init_err_release;
10913 }
10914
10915 bp->bar1 = pci_ioremap_bar(pdev, 2);
10916 if (!bp->bar1) {
10917 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
10918 rc = -ENOMEM;
10919 goto init_err_release;
10920 }
10921
10922 bp->bar2 = pci_ioremap_bar(pdev, 4);
10923 if (!bp->bar2) {
10924 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
10925 rc = -ENOMEM;
10926 goto init_err_release;
10927 }
10928
6316ea6d
SB
10929 pci_enable_pcie_error_reporting(pdev);
10930
c0c050c5 10931 INIT_WORK(&bp->sp_task, bnxt_sp_task);
230d1f0d 10932 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
c0c050c5
MC
10933
10934 spin_lock_init(&bp->ntp_fltr_lock);
697197e5
MC
10935#if BITS_PER_LONG == 32
10936 spin_lock_init(&bp->db_lock);
10937#endif
c0c050c5
MC
10938
10939 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
10940 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
10941
18775aa8 10942 bnxt_init_dflt_coal(bp);
51f30785 10943
e99e88a9 10944 timer_setup(&bp->timer, bnxt_timer, 0);
c0c050c5
MC
10945 bp->current_interval = BNXT_TIMER_INTERVAL;
10946
caefe526 10947 clear_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
10948 return 0;
10949
10950init_err_release:
17086399 10951 bnxt_unmap_bars(bp, pdev);
c0c050c5
MC
10952 pci_release_regions(pdev);
10953
10954init_err_disable:
10955 pci_disable_device(pdev);
10956
10957init_err:
10958 return rc;
10959}
10960
10961/* rtnl_lock held */
10962static int bnxt_change_mac_addr(struct net_device *dev, void *p)
10963{
10964 struct sockaddr *addr = p;
1fc2cfd0
JH
10965 struct bnxt *bp = netdev_priv(dev);
10966 int rc = 0;
c0c050c5
MC
10967
10968 if (!is_valid_ether_addr(addr->sa_data))
10969 return -EADDRNOTAVAIL;
10970
c1a7bdff
MC
10971 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
10972 return 0;
10973
28ea334b 10974 rc = bnxt_approve_mac(bp, addr->sa_data, true);
84c33dd3
MC
10975 if (rc)
10976 return rc;
bdd4347b 10977
c0c050c5 10978 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1fc2cfd0
JH
10979 if (netif_running(dev)) {
10980 bnxt_close_nic(bp, false, false);
10981 rc = bnxt_open_nic(bp, false, false);
10982 }
c0c050c5 10983
1fc2cfd0 10984 return rc;
c0c050c5
MC
10985}
10986
10987/* rtnl_lock held */
10988static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
10989{
10990 struct bnxt *bp = netdev_priv(dev);
10991
c0c050c5 10992 if (netif_running(dev))
a9b952d2 10993 bnxt_close_nic(bp, true, false);
c0c050c5
MC
10994
10995 dev->mtu = new_mtu;
10996 bnxt_set_ring_params(bp);
10997
10998 if (netif_running(dev))
a9b952d2 10999 return bnxt_open_nic(bp, true, false);
c0c050c5
MC
11000
11001 return 0;
11002}
11003
c5e3deb8 11004int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
c0c050c5
MC
11005{
11006 struct bnxt *bp = netdev_priv(dev);
3ffb6a39 11007 bool sh = false;
d1e7925e 11008 int rc;
16e5cc64 11009
c0c050c5 11010 if (tc > bp->max_tc) {
b451c8b6 11011 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
c0c050c5
MC
11012 tc, bp->max_tc);
11013 return -EINVAL;
11014 }
11015
11016 if (netdev_get_num_tc(dev) == tc)
11017 return 0;
11018
3ffb6a39
MC
11019 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
11020 sh = true;
11021
98fdbe73
MC
11022 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
11023 sh, tc, bp->tx_nr_rings_xdp);
d1e7925e
MC
11024 if (rc)
11025 return rc;
c0c050c5
MC
11026
11027 /* Needs to close the device and do hw resource re-allocations */
11028 if (netif_running(bp->dev))
11029 bnxt_close_nic(bp, true, false);
11030
11031 if (tc) {
11032 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
11033 netdev_set_num_tc(dev, tc);
11034 } else {
11035 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
11036 netdev_reset_tc(dev);
11037 }
87e9b377 11038 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
3ffb6a39
MC
11039 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
11040 bp->tx_nr_rings + bp->rx_nr_rings;
c0c050c5
MC
11041
11042 if (netif_running(bp->dev))
11043 return bnxt_open_nic(bp, true, false);
11044
11045 return 0;
11046}
11047
9e0fd15d
JP
11048static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
11049 void *cb_priv)
c5e3deb8 11050{
9e0fd15d 11051 struct bnxt *bp = cb_priv;
de4784ca 11052
312324f1
JK
11053 if (!bnxt_tc_flower_enabled(bp) ||
11054 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
38cf0426 11055 return -EOPNOTSUPP;
c5e3deb8 11056
9e0fd15d
JP
11057 switch (type) {
11058 case TC_SETUP_CLSFLOWER:
11059 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
11060 default:
11061 return -EOPNOTSUPP;
11062 }
11063}
11064
627c89d0 11065LIST_HEAD(bnxt_block_cb_list);
955bcb6e 11066
2ae7408f
SP
11067static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
11068 void *type_data)
11069{
4e95bc26
PNA
11070 struct bnxt *bp = netdev_priv(dev);
11071
2ae7408f 11072 switch (type) {
9e0fd15d 11073 case TC_SETUP_BLOCK:
955bcb6e
PNA
11074 return flow_block_cb_setup_simple(type_data,
11075 &bnxt_block_cb_list,
4e95bc26
PNA
11076 bnxt_setup_tc_block_cb,
11077 bp, bp, true);
575ed7d3 11078 case TC_SETUP_QDISC_MQPRIO: {
2ae7408f
SP
11079 struct tc_mqprio_qopt *mqprio = type_data;
11080
11081 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
56f36acd 11082
2ae7408f
SP
11083 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
11084 }
11085 default:
11086 return -EOPNOTSUPP;
11087 }
c5e3deb8
MC
11088}
11089
c0c050c5
MC
11090#ifdef CONFIG_RFS_ACCEL
11091static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
11092 struct bnxt_ntuple_filter *f2)
11093{
11094 struct flow_keys *keys1 = &f1->fkeys;
11095 struct flow_keys *keys2 = &f2->fkeys;
11096
6fc7caa8
MC
11097 if (keys1->basic.n_proto != keys2->basic.n_proto ||
11098 keys1->basic.ip_proto != keys2->basic.ip_proto)
11099 return false;
11100
11101 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
11102 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
11103 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
11104 return false;
11105 } else {
11106 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
11107 sizeof(keys1->addrs.v6addrs.src)) ||
11108 memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
11109 sizeof(keys1->addrs.v6addrs.dst)))
11110 return false;
11111 }
11112
11113 if (keys1->ports.ports == keys2->ports.ports &&
61aad724 11114 keys1->control.flags == keys2->control.flags &&
a54c4d74
MC
11115 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
11116 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
c0c050c5
MC
11117 return true;
11118
11119 return false;
11120}
11121
11122static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
11123 u16 rxq_index, u32 flow_id)
11124{
11125 struct bnxt *bp = netdev_priv(dev);
11126 struct bnxt_ntuple_filter *fltr, *new_fltr;
11127 struct flow_keys *fkeys;
11128 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
a54c4d74 11129 int rc = 0, idx, bit_id, l2_idx = 0;
c0c050c5 11130 struct hlist_head *head;
f47d0e19 11131 u32 flags;
c0c050c5 11132
a54c4d74
MC
11133 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
11134 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
11135 int off = 0, j;
11136
11137 netif_addr_lock_bh(dev);
11138 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
11139 if (ether_addr_equal(eth->h_dest,
11140 vnic->uc_list + off)) {
11141 l2_idx = j + 1;
11142 break;
11143 }
11144 }
11145 netif_addr_unlock_bh(dev);
11146 if (!l2_idx)
11147 return -EINVAL;
11148 }
c0c050c5
MC
11149 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
11150 if (!new_fltr)
11151 return -ENOMEM;
11152
11153 fkeys = &new_fltr->fkeys;
11154 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
11155 rc = -EPROTONOSUPPORT;
11156 goto err_free;
11157 }
11158
dda0e746
MC
11159 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
11160 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
c0c050c5
MC
11161 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
11162 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
11163 rc = -EPROTONOSUPPORT;
11164 goto err_free;
11165 }
dda0e746
MC
11166 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
11167 bp->hwrm_spec_code < 0x10601) {
11168 rc = -EPROTONOSUPPORT;
11169 goto err_free;
11170 }
f47d0e19
MC
11171 flags = fkeys->control.flags;
11172 if (((flags & FLOW_DIS_ENCAPSULATION) &&
11173 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
61aad724
MC
11174 rc = -EPROTONOSUPPORT;
11175 goto err_free;
11176 }
c0c050c5 11177
a54c4d74 11178 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
c0c050c5
MC
11179 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
11180
11181 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
11182 head = &bp->ntp_fltr_hash_tbl[idx];
11183 rcu_read_lock();
11184 hlist_for_each_entry_rcu(fltr, head, hash) {
11185 if (bnxt_fltr_match(fltr, new_fltr)) {
11186 rcu_read_unlock();
11187 rc = 0;
11188 goto err_free;
11189 }
11190 }
11191 rcu_read_unlock();
11192
11193 spin_lock_bh(&bp->ntp_fltr_lock);
84e86b98
MC
11194 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
11195 BNXT_NTP_FLTR_MAX_FLTR, 0);
11196 if (bit_id < 0) {
c0c050c5
MC
11197 spin_unlock_bh(&bp->ntp_fltr_lock);
11198 rc = -ENOMEM;
11199 goto err_free;
11200 }
11201
84e86b98 11202 new_fltr->sw_id = (u16)bit_id;
c0c050c5 11203 new_fltr->flow_id = flow_id;
a54c4d74 11204 new_fltr->l2_fltr_idx = l2_idx;
c0c050c5
MC
11205 new_fltr->rxq = rxq_index;
11206 hlist_add_head_rcu(&new_fltr->hash, head);
11207 bp->ntp_fltr_count++;
11208 spin_unlock_bh(&bp->ntp_fltr_lock);
11209
11210 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
c213eae8 11211 bnxt_queue_sp_work(bp);
c0c050c5
MC
11212
11213 return new_fltr->sw_id;
11214
11215err_free:
11216 kfree(new_fltr);
11217 return rc;
11218}
11219
11220static void bnxt_cfg_ntp_filters(struct bnxt *bp)
11221{
11222 int i;
11223
11224 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
11225 struct hlist_head *head;
11226 struct hlist_node *tmp;
11227 struct bnxt_ntuple_filter *fltr;
11228 int rc;
11229
11230 head = &bp->ntp_fltr_hash_tbl[i];
11231 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
11232 bool del = false;
11233
11234 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
11235 if (rps_may_expire_flow(bp->dev, fltr->rxq,
11236 fltr->flow_id,
11237 fltr->sw_id)) {
11238 bnxt_hwrm_cfa_ntuple_filter_free(bp,
11239 fltr);
11240 del = true;
11241 }
11242 } else {
11243 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
11244 fltr);
11245 if (rc)
11246 del = true;
11247 else
11248 set_bit(BNXT_FLTR_VALID, &fltr->state);
11249 }
11250
11251 if (del) {
11252 spin_lock_bh(&bp->ntp_fltr_lock);
11253 hlist_del_rcu(&fltr->hash);
11254 bp->ntp_fltr_count--;
11255 spin_unlock_bh(&bp->ntp_fltr_lock);
11256 synchronize_rcu();
11257 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
11258 kfree(fltr);
11259 }
11260 }
11261 }
19241368 11262 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
9a005c38 11263 netdev_info(bp->dev, "Receive PF driver unload event!\n");
c0c050c5
MC
11264}
11265
11266#else
11267
11268static void bnxt_cfg_ntp_filters(struct bnxt *bp)
11269{
11270}
11271
11272#endif /* CONFIG_RFS_ACCEL */
11273
ad51b8e9
AD
11274static void bnxt_udp_tunnel_add(struct net_device *dev,
11275 struct udp_tunnel_info *ti)
c0c050c5
MC
11276{
11277 struct bnxt *bp = netdev_priv(dev);
11278
ad51b8e9 11279 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
c0c050c5
MC
11280 return;
11281
ad51b8e9 11282 if (!netif_running(dev))
c0c050c5
MC
11283 return;
11284
ad51b8e9
AD
11285 switch (ti->type) {
11286 case UDP_TUNNEL_TYPE_VXLAN:
11287 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
11288 return;
c0c050c5 11289
ad51b8e9
AD
11290 bp->vxlan_port_cnt++;
11291 if (bp->vxlan_port_cnt == 1) {
11292 bp->vxlan_port = ti->port;
11293 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
c213eae8 11294 bnxt_queue_sp_work(bp);
ad51b8e9
AD
11295 }
11296 break;
7cdd5fc3
AD
11297 case UDP_TUNNEL_TYPE_GENEVE:
11298 if (bp->nge_port_cnt && bp->nge_port != ti->port)
11299 return;
11300
11301 bp->nge_port_cnt++;
11302 if (bp->nge_port_cnt == 1) {
11303 bp->nge_port = ti->port;
11304 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
11305 }
11306 break;
ad51b8e9
AD
11307 default:
11308 return;
c0c050c5 11309 }
ad51b8e9 11310
c213eae8 11311 bnxt_queue_sp_work(bp);
c0c050c5
MC
11312}
11313
ad51b8e9
AD
11314static void bnxt_udp_tunnel_del(struct net_device *dev,
11315 struct udp_tunnel_info *ti)
c0c050c5
MC
11316{
11317 struct bnxt *bp = netdev_priv(dev);
11318
ad51b8e9 11319 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
c0c050c5
MC
11320 return;
11321
ad51b8e9 11322 if (!netif_running(dev))
c0c050c5
MC
11323 return;
11324
ad51b8e9
AD
11325 switch (ti->type) {
11326 case UDP_TUNNEL_TYPE_VXLAN:
11327 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
11328 return;
c0c050c5
MC
11329 bp->vxlan_port_cnt--;
11330
ad51b8e9
AD
11331 if (bp->vxlan_port_cnt != 0)
11332 return;
11333
11334 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
11335 break;
7cdd5fc3
AD
11336 case UDP_TUNNEL_TYPE_GENEVE:
11337 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
11338 return;
11339 bp->nge_port_cnt--;
11340
11341 if (bp->nge_port_cnt != 0)
11342 return;
11343
11344 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
11345 break;
ad51b8e9
AD
11346 default:
11347 return;
c0c050c5 11348 }
ad51b8e9 11349
c213eae8 11350 bnxt_queue_sp_work(bp);
c0c050c5
MC
11351}
11352
39d8ba2e
MC
11353static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
11354 struct net_device *dev, u32 filter_mask,
11355 int nlflags)
11356{
11357 struct bnxt *bp = netdev_priv(dev);
11358
11359 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
11360 nlflags, filter_mask, NULL);
11361}
11362
11363static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
2fd527b7 11364 u16 flags, struct netlink_ext_ack *extack)
39d8ba2e
MC
11365{
11366 struct bnxt *bp = netdev_priv(dev);
11367 struct nlattr *attr, *br_spec;
11368 int rem, rc = 0;
11369
11370 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
11371 return -EOPNOTSUPP;
11372
11373 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
11374 if (!br_spec)
11375 return -EINVAL;
11376
11377 nla_for_each_nested(attr, br_spec, rem) {
11378 u16 mode;
11379
11380 if (nla_type(attr) != IFLA_BRIDGE_MODE)
11381 continue;
11382
11383 if (nla_len(attr) < sizeof(mode))
11384 return -EINVAL;
11385
11386 mode = nla_get_u16(attr);
11387 if (mode == bp->br_mode)
11388 break;
11389
11390 rc = bnxt_hwrm_set_br_mode(bp, mode);
11391 if (!rc)
11392 bp->br_mode = mode;
11393 break;
11394 }
11395 return rc;
11396}
11397
52d5254a
FF
11398int bnxt_get_port_parent_id(struct net_device *dev,
11399 struct netdev_phys_item_id *ppid)
c124a62f 11400{
52d5254a
FF
11401 struct bnxt *bp = netdev_priv(dev);
11402
c124a62f
SP
11403 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
11404 return -EOPNOTSUPP;
11405
11406 /* The PF and it's VF-reps only support the switchdev framework */
d061b241 11407 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
c124a62f
SP
11408 return -EOPNOTSUPP;
11409
b014232f
VV
11410 ppid->id_len = sizeof(bp->dsn);
11411 memcpy(ppid->id, bp->dsn, ppid->id_len);
c124a62f 11412
52d5254a 11413 return 0;
c124a62f
SP
11414}
11415
c9c49a65
JP
11416static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
11417{
11418 struct bnxt *bp = netdev_priv(dev);
11419
11420 return &bp->dl_port;
11421}
11422
c0c050c5
MC
11423static const struct net_device_ops bnxt_netdev_ops = {
11424 .ndo_open = bnxt_open,
11425 .ndo_start_xmit = bnxt_start_xmit,
11426 .ndo_stop = bnxt_close,
11427 .ndo_get_stats64 = bnxt_get_stats64,
11428 .ndo_set_rx_mode = bnxt_set_rx_mode,
11429 .ndo_do_ioctl = bnxt_ioctl,
11430 .ndo_validate_addr = eth_validate_addr,
11431 .ndo_set_mac_address = bnxt_change_mac_addr,
11432 .ndo_change_mtu = bnxt_change_mtu,
11433 .ndo_fix_features = bnxt_fix_features,
11434 .ndo_set_features = bnxt_set_features,
11435 .ndo_tx_timeout = bnxt_tx_timeout,
11436#ifdef CONFIG_BNXT_SRIOV
11437 .ndo_get_vf_config = bnxt_get_vf_config,
11438 .ndo_set_vf_mac = bnxt_set_vf_mac,
11439 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
11440 .ndo_set_vf_rate = bnxt_set_vf_bw,
11441 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
11442 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
746df139 11443 .ndo_set_vf_trust = bnxt_set_vf_trust,
c0c050c5
MC
11444#endif
11445 .ndo_setup_tc = bnxt_setup_tc,
11446#ifdef CONFIG_RFS_ACCEL
11447 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
11448#endif
ad51b8e9
AD
11449 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
11450 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
f4e63525 11451 .ndo_bpf = bnxt_xdp,
f18c2b77 11452 .ndo_xdp_xmit = bnxt_xdp_xmit,
39d8ba2e
MC
11453 .ndo_bridge_getlink = bnxt_bridge_getlink,
11454 .ndo_bridge_setlink = bnxt_bridge_setlink,
c9c49a65 11455 .ndo_get_devlink_port = bnxt_get_devlink_port,
c0c050c5
MC
11456};
11457
11458static void bnxt_remove_one(struct pci_dev *pdev)
11459{
11460 struct net_device *dev = pci_get_drvdata(pdev);
11461 struct bnxt *bp = netdev_priv(dev);
11462
7e334fc8 11463 if (BNXT_PF(bp))
c0c050c5
MC
11464 bnxt_sriov_disable(bp);
11465
7e334fc8 11466 bnxt_dl_fw_reporters_destroy(bp, true);
6316ea6d 11467 pci_disable_pcie_error_reporting(pdev);
c0c050c5 11468 unregister_netdev(dev);
cda2cab0 11469 bnxt_dl_unregister(bp);
2ae7408f 11470 bnxt_shutdown_tc(bp);
c213eae8 11471 bnxt_cancel_sp_work(bp);
c0c050c5
MC
11472 bp->sp_event = 0;
11473
7809592d 11474 bnxt_clear_int_mode(bp);
be58a0da 11475 bnxt_hwrm_func_drv_unrgtr(bp);
c0c050c5 11476 bnxt_free_hwrm_resources(bp);
e605db80 11477 bnxt_free_hwrm_short_cmd_req(bp);
eb513658 11478 bnxt_ethtool_free(bp);
7df4ae9f 11479 bnxt_dcb_free(bp);
a588e458
MC
11480 kfree(bp->edev);
11481 bp->edev = NULL;
8280b38e
VV
11482 kfree(bp->fw_health);
11483 bp->fw_health = NULL;
c20dc142 11484 bnxt_cleanup_pci(bp);
98f04cf0
MC
11485 bnxt_free_ctx_mem(bp);
11486 kfree(bp->ctx);
11487 bp->ctx = NULL;
fd3ab1c7 11488 bnxt_free_port_stats(bp);
c0c050c5 11489 free_netdev(dev);
c0c050c5
MC
11490}
11491
ba642ab7 11492static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
c0c050c5
MC
11493{
11494 int rc = 0;
11495 struct bnxt_link_info *link_info = &bp->link_info;
c0c050c5 11496
170ce013
MC
11497 rc = bnxt_hwrm_phy_qcaps(bp);
11498 if (rc) {
11499 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
11500 rc);
11501 return rc;
11502 }
43a5107d
MC
11503 if (!fw_dflt)
11504 return 0;
11505
c0c050c5
MC
11506 rc = bnxt_update_link(bp, false);
11507 if (rc) {
11508 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
11509 rc);
11510 return rc;
11511 }
11512
93ed8117
MC
11513 /* Older firmware does not have supported_auto_speeds, so assume
11514 * that all supported speeds can be autonegotiated.
11515 */
11516 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
11517 link_info->support_auto_speeds = link_info->support_speeds;
11518
8119e49b 11519 bnxt_init_ethtool_link_settings(bp);
ba642ab7 11520 return 0;
c0c050c5
MC
11521}
11522
11523static int bnxt_get_max_irq(struct pci_dev *pdev)
11524{
11525 u16 ctrl;
11526
11527 if (!pdev->msix_cap)
11528 return 1;
11529
11530 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
11531 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
11532}
11533
6e6c5a57
MC
11534static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
11535 int *max_cp)
c0c050c5 11536{
6a4f2947 11537 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
e30fbc33 11538 int max_ring_grps = 0, max_irq;
c0c050c5 11539
6a4f2947
MC
11540 *max_tx = hw_resc->max_tx_rings;
11541 *max_rx = hw_resc->max_rx_rings;
e30fbc33
MC
11542 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
11543 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
11544 bnxt_get_ulp_msix_num(bp),
c027c6b4 11545 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
e30fbc33
MC
11546 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11547 *max_cp = min_t(int, *max_cp, max_irq);
6a4f2947 11548 max_ring_grps = hw_resc->max_hw_ring_grps;
76595193
PS
11549 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
11550 *max_cp -= 1;
11551 *max_rx -= 2;
11552 }
c0c050c5
MC
11553 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11554 *max_rx >>= 1;
e30fbc33
MC
11555 if (bp->flags & BNXT_FLAG_CHIP_P5) {
11556 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
11557 /* On P5 chips, max_cp output param should be available NQs */
11558 *max_cp = max_irq;
11559 }
b72d4a68 11560 *max_rx = min_t(int, *max_rx, max_ring_grps);
6e6c5a57
MC
11561}
11562
11563int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
11564{
11565 int rx, tx, cp;
11566
11567 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
78f058a4
MC
11568 *max_rx = rx;
11569 *max_tx = tx;
6e6c5a57
MC
11570 if (!rx || !tx || !cp)
11571 return -ENOMEM;
11572
6e6c5a57
MC
11573 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
11574}
11575
e4060d30
MC
11576static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
11577 bool shared)
11578{
11579 int rc;
11580
11581 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
bdbd1eb5
MC
11582 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
11583 /* Not enough rings, try disabling agg rings. */
11584 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
11585 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
07f4fde5
MC
11586 if (rc) {
11587 /* set BNXT_FLAG_AGG_RINGS back for consistency */
11588 bp->flags |= BNXT_FLAG_AGG_RINGS;
bdbd1eb5 11589 return rc;
07f4fde5 11590 }
bdbd1eb5 11591 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
1054aee8
MC
11592 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
11593 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
bdbd1eb5
MC
11594 bnxt_set_ring_params(bp);
11595 }
e4060d30
MC
11596
11597 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
11598 int max_cp, max_stat, max_irq;
11599
11600 /* Reserve minimum resources for RoCE */
11601 max_cp = bnxt_get_max_func_cp_rings(bp);
11602 max_stat = bnxt_get_max_func_stat_ctxs(bp);
11603 max_irq = bnxt_get_max_func_irqs(bp);
11604 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
11605 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
11606 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
11607 return 0;
11608
11609 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
11610 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
11611 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
11612 max_cp = min_t(int, max_cp, max_irq);
11613 max_cp = min_t(int, max_cp, max_stat);
11614 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
11615 if (rc)
11616 rc = 0;
11617 }
11618 return rc;
11619}
11620
58ea801a
MC
11621/* In initial default shared ring setting, each shared ring must have a
11622 * RX/TX ring pair.
11623 */
11624static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
11625{
11626 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
11627 bp->rx_nr_rings = bp->cp_nr_rings;
11628 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
11629 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
11630}
11631
702c221c 11632static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
6e6c5a57
MC
11633{
11634 int dflt_rings, max_rx_rings, max_tx_rings, rc;
6e6c5a57 11635
2773dfb2
MC
11636 if (!bnxt_can_reserve_rings(bp))
11637 return 0;
11638
6e6c5a57
MC
11639 if (sh)
11640 bp->flags |= BNXT_FLAG_SHARED_RINGS;
d629522e 11641 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
1d3ef13d
MC
11642 /* Reduce default rings on multi-port cards so that total default
11643 * rings do not exceed CPU count.
11644 */
11645 if (bp->port_count > 1) {
11646 int max_rings =
11647 max_t(int, num_online_cpus() / bp->port_count, 1);
11648
11649 dflt_rings = min_t(int, dflt_rings, max_rings);
11650 }
e4060d30 11651 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
6e6c5a57
MC
11652 if (rc)
11653 return rc;
11654 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
11655 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
58ea801a
MC
11656 if (sh)
11657 bnxt_trim_dflt_sh_rings(bp);
11658 else
11659 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
11660 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
391be5c2 11661
674f50a5 11662 rc = __bnxt_reserve_rings(bp);
391be5c2
MC
11663 if (rc)
11664 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
58ea801a
MC
11665 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11666 if (sh)
11667 bnxt_trim_dflt_sh_rings(bp);
391be5c2 11668
674f50a5
MC
11669 /* Rings may have been trimmed, re-reserve the trimmed rings. */
11670 if (bnxt_need_reserve_rings(bp)) {
11671 rc = __bnxt_reserve_rings(bp);
11672 if (rc)
11673 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
11674 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11675 }
76595193
PS
11676 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11677 bp->rx_nr_rings++;
11678 bp->cp_nr_rings++;
11679 }
6e6c5a57 11680 return rc;
c0c050c5
MC
11681}
11682
47558acd
MC
11683static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
11684{
11685 int rc;
11686
11687 if (bp->tx_nr_rings)
11688 return 0;
11689
6b95c3e9
MC
11690 bnxt_ulp_irq_stop(bp);
11691 bnxt_clear_int_mode(bp);
47558acd
MC
11692 rc = bnxt_set_dflt_rings(bp, true);
11693 if (rc) {
11694 netdev_err(bp->dev, "Not enough rings available.\n");
6b95c3e9 11695 goto init_dflt_ring_err;
47558acd
MC
11696 }
11697 rc = bnxt_init_int_mode(bp);
11698 if (rc)
6b95c3e9
MC
11699 goto init_dflt_ring_err;
11700
47558acd
MC
11701 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11702 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
11703 bp->flags |= BNXT_FLAG_RFS;
11704 bp->dev->features |= NETIF_F_NTUPLE;
11705 }
6b95c3e9
MC
11706init_dflt_ring_err:
11707 bnxt_ulp_irq_restart(bp, rc);
11708 return rc;
47558acd
MC
11709}
11710
80fcaf46 11711int bnxt_restore_pf_fw_resources(struct bnxt *bp)
7b08f661 11712{
80fcaf46
MC
11713 int rc;
11714
7b08f661
MC
11715 ASSERT_RTNL();
11716 bnxt_hwrm_func_qcaps(bp);
1a037782
VD
11717
11718 if (netif_running(bp->dev))
11719 __bnxt_close_nic(bp, true, false);
11720
ec86f14e 11721 bnxt_ulp_irq_stop(bp);
80fcaf46
MC
11722 bnxt_clear_int_mode(bp);
11723 rc = bnxt_init_int_mode(bp);
ec86f14e 11724 bnxt_ulp_irq_restart(bp, rc);
1a037782
VD
11725
11726 if (netif_running(bp->dev)) {
11727 if (rc)
11728 dev_close(bp->dev);
11729 else
11730 rc = bnxt_open_nic(bp, true, false);
11731 }
11732
80fcaf46 11733 return rc;
7b08f661
MC
11734}
11735
a22a6ac2
MC
11736static int bnxt_init_mac_addr(struct bnxt *bp)
11737{
11738 int rc = 0;
11739
11740 if (BNXT_PF(bp)) {
11741 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
11742 } else {
11743#ifdef CONFIG_BNXT_SRIOV
11744 struct bnxt_vf_info *vf = &bp->vf;
28ea334b 11745 bool strict_approval = true;
a22a6ac2
MC
11746
11747 if (is_valid_ether_addr(vf->mac_addr)) {
91cdda40 11748 /* overwrite netdev dev_addr with admin VF MAC */
a22a6ac2 11749 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
28ea334b
MC
11750 /* Older PF driver or firmware may not approve this
11751 * correctly.
11752 */
11753 strict_approval = false;
a22a6ac2
MC
11754 } else {
11755 eth_hw_addr_random(bp->dev);
a22a6ac2 11756 }
28ea334b 11757 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
a22a6ac2
MC
11758#endif
11759 }
11760 return rc;
11761}
11762
03213a99
JP
11763static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
11764{
11765 struct pci_dev *pdev = bp->pdev;
11766 int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
11767 u32 dw;
11768
11769 if (!pos) {
9a005c38 11770 netdev_info(bp->dev, "Unable do read adapter's DSN\n");
03213a99
JP
11771 return -EOPNOTSUPP;
11772 }
11773
11774 /* DSN (two dw) is at an offset of 4 from the cap pos */
11775 pos += 4;
11776 pci_read_config_dword(pdev, pos, &dw);
11777 put_unaligned_le32(dw, &dsn[0]);
11778 pci_read_config_dword(pdev, pos + 4, &dw);
11779 put_unaligned_le32(dw, &dsn[4]);
d061b241 11780 bp->flags |= BNXT_FLAG_DSN_VALID;
03213a99
JP
11781 return 0;
11782}
11783
c0c050c5
MC
11784static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
11785{
11786 static int version_printed;
11787 struct net_device *dev;
11788 struct bnxt *bp;
6e6c5a57 11789 int rc, max_irqs;
c0c050c5 11790
4e00338a 11791 if (pci_is_bridge(pdev))
fa853dda
PS
11792 return -ENODEV;
11793
c0c050c5
MC
11794 if (version_printed++ == 0)
11795 pr_info("%s", version);
11796
8743db4a
VV
11797 /* Clear any pending DMA transactions from crash kernel
11798 * while loading driver in capture kernel.
11799 */
11800 if (is_kdump_kernel()) {
11801 pci_clear_master(pdev);
11802 pcie_flr(pdev);
11803 }
11804
c0c050c5
MC
11805 max_irqs = bnxt_get_max_irq(pdev);
11806 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
11807 if (!dev)
11808 return -ENOMEM;
11809
11810 bp = netdev_priv(dev);
9c1fabdf 11811 bnxt_set_max_func_irqs(bp, max_irqs);
c0c050c5
MC
11812
11813 if (bnxt_vf_pciid(ent->driver_data))
11814 bp->flags |= BNXT_FLAG_VF;
11815
2bcfa6f6 11816 if (pdev->msix_cap)
c0c050c5 11817 bp->flags |= BNXT_FLAG_MSIX_CAP;
c0c050c5
MC
11818
11819 rc = bnxt_init_board(pdev, dev);
11820 if (rc < 0)
11821 goto init_err_free;
11822
11823 dev->netdev_ops = &bnxt_netdev_ops;
11824 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
11825 dev->ethtool_ops = &bnxt_ethtool_ops;
c0c050c5
MC
11826 pci_set_drvdata(pdev, dev);
11827
3e8060fa
PS
11828 rc = bnxt_alloc_hwrm_resources(bp);
11829 if (rc)
17086399 11830 goto init_err_pci_clean;
3e8060fa
PS
11831
11832 mutex_init(&bp->hwrm_cmd_lock);
ba642ab7 11833 mutex_init(&bp->link_lock);
7c380918
MC
11834
11835 rc = bnxt_fw_init_one_p1(bp);
3e8060fa 11836 if (rc)
17086399 11837 goto init_err_pci_clean;
3e8060fa 11838
e38287b7
MC
11839 if (BNXT_CHIP_P5(bp))
11840 bp->flags |= BNXT_FLAG_CHIP_P5;
11841
7c380918 11842 rc = bnxt_fw_init_one_p2(bp);
3c2217a6
MC
11843 if (rc)
11844 goto init_err_pci_clean;
11845
c0c050c5
MC
11846 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
11847 NETIF_F_TSO | NETIF_F_TSO6 |
11848 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
7e13318d 11849 NETIF_F_GSO_IPXIP4 |
152971ee
AD
11850 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
11851 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
3e8060fa
PS
11852 NETIF_F_RXCSUM | NETIF_F_GRO;
11853
e38287b7 11854 if (BNXT_SUPPORTS_TPA(bp))
3e8060fa 11855 dev->hw_features |= NETIF_F_LRO;
c0c050c5 11856
c0c050c5
MC
11857 dev->hw_enc_features =
11858 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
11859 NETIF_F_TSO | NETIF_F_TSO6 |
11860 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
152971ee 11861 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
7e13318d 11862 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
152971ee
AD
11863 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
11864 NETIF_F_GSO_GRE_CSUM;
c0c050c5
MC
11865 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
11866 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
11867 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
e38287b7 11868 if (BNXT_SUPPORTS_TPA(bp))
1054aee8 11869 dev->hw_features |= NETIF_F_GRO_HW;
c0c050c5 11870 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
1054aee8
MC
11871 if (dev->features & NETIF_F_GRO_HW)
11872 dev->features &= ~NETIF_F_LRO;
c0c050c5
MC
11873 dev->priv_flags |= IFF_UNICAST_FLT;
11874
11875#ifdef CONFIG_BNXT_SRIOV
11876 init_waitqueue_head(&bp->sriov_cfg_wait);
4ab0c6a8 11877 mutex_init(&bp->sriov_lock);
c0c050c5 11878#endif
e38287b7
MC
11879 if (BNXT_SUPPORTS_TPA(bp)) {
11880 bp->gro_func = bnxt_gro_func_5730x;
67912c36 11881 if (BNXT_CHIP_P4(bp))
e38287b7 11882 bp->gro_func = bnxt_gro_func_5731x;
67912c36
MC
11883 else if (BNXT_CHIP_P5(bp))
11884 bp->gro_func = bnxt_gro_func_5750x;
e38287b7
MC
11885 }
11886 if (!BNXT_CHIP_P4_PLUS(bp))
434c975a 11887 bp->flags |= BNXT_FLAG_DOUBLE_DB;
309369c9 11888
a588e458
MC
11889 bp->ulp_probe = bnxt_ulp_probe;
11890
a22a6ac2
MC
11891 rc = bnxt_init_mac_addr(bp);
11892 if (rc) {
11893 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
11894 rc = -EADDRNOTAVAIL;
11895 goto init_err_pci_clean;
11896 }
c0c050c5 11897
2e9217d1
VV
11898 if (BNXT_PF(bp)) {
11899 /* Read the adapter's DSN to use as the eswitch switch_id */
b014232f 11900 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
2e9217d1 11901 }
567b2abe 11902
7eb9bb3a
MC
11903 /* MTU range: 60 - FW defined max */
11904 dev->min_mtu = ETH_ZLEN;
11905 dev->max_mtu = bp->max_mtu;
11906
ba642ab7 11907 rc = bnxt_probe_phy(bp, true);
d5430d31
MC
11908 if (rc)
11909 goto init_err_pci_clean;
11910
c61fb99c 11911 bnxt_set_rx_skb_mode(bp, false);
c0c050c5
MC
11912 bnxt_set_tpa_flags(bp);
11913 bnxt_set_ring_params(bp);
702c221c 11914 rc = bnxt_set_dflt_rings(bp, true);
bdbd1eb5
MC
11915 if (rc) {
11916 netdev_err(bp->dev, "Not enough rings available.\n");
11917 rc = -ENOMEM;
17086399 11918 goto init_err_pci_clean;
bdbd1eb5 11919 }
c0c050c5 11920
ba642ab7 11921 bnxt_fw_init_one_p3(bp);
2bcfa6f6 11922
c0c050c5
MC
11923 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
11924 bp->flags |= BNXT_FLAG_STRIP_VLAN;
11925
7809592d 11926 rc = bnxt_init_int_mode(bp);
c0c050c5 11927 if (rc)
17086399 11928 goto init_err_pci_clean;
c0c050c5 11929
832aed16
MC
11930 /* No TC has been set yet and rings may have been trimmed due to
11931 * limited MSIX, so we re-initialize the TX rings per TC.
11932 */
11933 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11934
c213eae8
MC
11935 if (BNXT_PF(bp)) {
11936 if (!bnxt_pf_wq) {
11937 bnxt_pf_wq =
11938 create_singlethread_workqueue("bnxt_pf_wq");
11939 if (!bnxt_pf_wq) {
11940 dev_err(&pdev->dev, "Unable to create workqueue.\n");
11941 goto init_err_pci_clean;
11942 }
11943 }
2ae7408f 11944 bnxt_init_tc(bp);
c213eae8 11945 }
2ae7408f 11946
cda2cab0
VV
11947 bnxt_dl_register(bp);
11948
7809592d
MC
11949 rc = register_netdev(dev);
11950 if (rc)
cda2cab0 11951 goto init_err_cleanup;
7809592d 11952
cda2cab0
VV
11953 if (BNXT_PF(bp))
11954 devlink_port_type_eth_set(&bp->dl_port, bp->dev);
7e334fc8 11955 bnxt_dl_fw_reporters_create(bp);
4ab0c6a8 11956
c0c050c5
MC
11957 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
11958 board_info[ent->driver_data].name,
11959 (long)pci_resource_start(pdev, 0), dev->dev_addr);
af125b75 11960 pcie_print_link_status(pdev);
90c4f788 11961
c0c050c5
MC
11962 return 0;
11963
cda2cab0
VV
11964init_err_cleanup:
11965 bnxt_dl_unregister(bp);
2ae7408f 11966 bnxt_shutdown_tc(bp);
7809592d
MC
11967 bnxt_clear_int_mode(bp);
11968
17086399 11969init_err_pci_clean:
bdb38602 11970 bnxt_hwrm_func_drv_unrgtr(bp);
f9099d61 11971 bnxt_free_hwrm_short_cmd_req(bp);
a2bf74f4 11972 bnxt_free_hwrm_resources(bp);
98f04cf0
MC
11973 bnxt_free_ctx_mem(bp);
11974 kfree(bp->ctx);
11975 bp->ctx = NULL;
07f83d72
MC
11976 kfree(bp->fw_health);
11977 bp->fw_health = NULL;
17086399 11978 bnxt_cleanup_pci(bp);
c0c050c5
MC
11979
11980init_err_free:
11981 free_netdev(dev);
11982 return rc;
11983}
11984
d196ece7
MC
11985static void bnxt_shutdown(struct pci_dev *pdev)
11986{
11987 struct net_device *dev = pci_get_drvdata(pdev);
11988 struct bnxt *bp;
11989
11990 if (!dev)
11991 return;
11992
11993 rtnl_lock();
11994 bp = netdev_priv(dev);
11995 if (!bp)
11996 goto shutdown_exit;
11997
11998 if (netif_running(dev))
11999 dev_close(dev);
12000
a7f3f939 12001 bnxt_ulp_shutdown(bp);
5567ae4a
VV
12002 bnxt_clear_int_mode(bp);
12003 pci_disable_device(pdev);
a7f3f939 12004
d196ece7 12005 if (system_state == SYSTEM_POWER_OFF) {
d196ece7
MC
12006 pci_wake_from_d3(pdev, bp->wol);
12007 pci_set_power_state(pdev, PCI_D3hot);
12008 }
12009
12010shutdown_exit:
12011 rtnl_unlock();
12012}
12013
f65a2044
MC
12014#ifdef CONFIG_PM_SLEEP
12015static int bnxt_suspend(struct device *device)
12016{
f521eaa9 12017 struct net_device *dev = dev_get_drvdata(device);
f65a2044
MC
12018 struct bnxt *bp = netdev_priv(dev);
12019 int rc = 0;
12020
12021 rtnl_lock();
6a68749d 12022 bnxt_ulp_stop(bp);
f65a2044
MC
12023 if (netif_running(dev)) {
12024 netif_device_detach(dev);
12025 rc = bnxt_close(dev);
12026 }
12027 bnxt_hwrm_func_drv_unrgtr(bp);
ef02af8c 12028 pci_disable_device(bp->pdev);
f9b69d7f
VV
12029 bnxt_free_ctx_mem(bp);
12030 kfree(bp->ctx);
12031 bp->ctx = NULL;
f65a2044
MC
12032 rtnl_unlock();
12033 return rc;
12034}
12035
12036static int bnxt_resume(struct device *device)
12037{
f521eaa9 12038 struct net_device *dev = dev_get_drvdata(device);
f65a2044
MC
12039 struct bnxt *bp = netdev_priv(dev);
12040 int rc = 0;
12041
12042 rtnl_lock();
ef02af8c
MC
12043 rc = pci_enable_device(bp->pdev);
12044 if (rc) {
12045 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
12046 rc);
12047 goto resume_exit;
12048 }
12049 pci_set_master(bp->pdev);
f92335d8 12050 if (bnxt_hwrm_ver_get(bp)) {
f65a2044
MC
12051 rc = -ENODEV;
12052 goto resume_exit;
12053 }
12054 rc = bnxt_hwrm_func_reset(bp);
12055 if (rc) {
12056 rc = -EBUSY;
12057 goto resume_exit;
12058 }
f92335d8 12059
f9b69d7f
VV
12060 if (bnxt_hwrm_queue_qportcfg(bp)) {
12061 rc = -ENODEV;
12062 goto resume_exit;
12063 }
12064
12065 if (bp->hwrm_spec_code >= 0x10803) {
12066 if (bnxt_alloc_ctx_mem(bp)) {
12067 rc = -ENODEV;
12068 goto resume_exit;
12069 }
12070 }
f92335d8
VV
12071 if (BNXT_NEW_RM(bp))
12072 bnxt_hwrm_func_resc_qcaps(bp, false);
12073
12074 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
12075 rc = -ENODEV;
12076 goto resume_exit;
12077 }
12078
f65a2044
MC
12079 bnxt_get_wol_settings(bp);
12080 if (netif_running(dev)) {
12081 rc = bnxt_open(dev);
12082 if (!rc)
12083 netif_device_attach(dev);
12084 }
12085
12086resume_exit:
6a68749d 12087 bnxt_ulp_start(bp, rc);
f65a2044
MC
12088 rtnl_unlock();
12089 return rc;
12090}
12091
12092static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
12093#define BNXT_PM_OPS (&bnxt_pm_ops)
12094
12095#else
12096
12097#define BNXT_PM_OPS NULL
12098
12099#endif /* CONFIG_PM_SLEEP */
12100
6316ea6d
SB
12101/**
12102 * bnxt_io_error_detected - called when PCI error is detected
12103 * @pdev: Pointer to PCI device
12104 * @state: The current pci connection state
12105 *
12106 * This function is called after a PCI bus error affecting
12107 * this device has been detected.
12108 */
12109static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
12110 pci_channel_state_t state)
12111{
12112 struct net_device *netdev = pci_get_drvdata(pdev);
a588e458 12113 struct bnxt *bp = netdev_priv(netdev);
6316ea6d
SB
12114
12115 netdev_info(netdev, "PCI I/O error detected\n");
12116
12117 rtnl_lock();
12118 netif_device_detach(netdev);
12119
a588e458
MC
12120 bnxt_ulp_stop(bp);
12121
6316ea6d
SB
12122 if (state == pci_channel_io_perm_failure) {
12123 rtnl_unlock();
12124 return PCI_ERS_RESULT_DISCONNECT;
12125 }
12126
12127 if (netif_running(netdev))
12128 bnxt_close(netdev);
12129
12130 pci_disable_device(pdev);
12131 rtnl_unlock();
12132
12133 /* Request a slot slot reset. */
12134 return PCI_ERS_RESULT_NEED_RESET;
12135}
12136
12137/**
12138 * bnxt_io_slot_reset - called after the pci bus has been reset.
12139 * @pdev: Pointer to PCI device
12140 *
12141 * Restart the card from scratch, as if from a cold-boot.
12142 * At this point, the card has exprienced a hard reset,
12143 * followed by fixups by BIOS, and has its config space
12144 * set up identically to what it was at cold boot.
12145 */
12146static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
12147{
12148 struct net_device *netdev = pci_get_drvdata(pdev);
12149 struct bnxt *bp = netdev_priv(netdev);
12150 int err = 0;
12151 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
12152
12153 netdev_info(bp->dev, "PCI Slot Reset\n");
12154
12155 rtnl_lock();
12156
12157 if (pci_enable_device(pdev)) {
12158 dev_err(&pdev->dev,
12159 "Cannot re-enable PCI device after reset.\n");
12160 } else {
12161 pci_set_master(pdev);
12162
aa8ed021
MC
12163 err = bnxt_hwrm_func_reset(bp);
12164 if (!err && netif_running(netdev))
6316ea6d
SB
12165 err = bnxt_open(netdev);
12166
aa46dfff 12167 if (!err)
6316ea6d 12168 result = PCI_ERS_RESULT_RECOVERED;
aa46dfff 12169 bnxt_ulp_start(bp, err);
6316ea6d
SB
12170 }
12171
12172 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
12173 dev_close(netdev);
12174
12175 rtnl_unlock();
12176
6316ea6d
SB
12177 return PCI_ERS_RESULT_RECOVERED;
12178}
12179
12180/**
12181 * bnxt_io_resume - called when traffic can start flowing again.
12182 * @pdev: Pointer to PCI device
12183 *
12184 * This callback is called when the error recovery driver tells
12185 * us that its OK to resume normal operation.
12186 */
12187static void bnxt_io_resume(struct pci_dev *pdev)
12188{
12189 struct net_device *netdev = pci_get_drvdata(pdev);
12190
12191 rtnl_lock();
12192
12193 netif_device_attach(netdev);
12194
12195 rtnl_unlock();
12196}
12197
12198static const struct pci_error_handlers bnxt_err_handler = {
12199 .error_detected = bnxt_io_error_detected,
12200 .slot_reset = bnxt_io_slot_reset,
12201 .resume = bnxt_io_resume
12202};
12203
c0c050c5
MC
12204static struct pci_driver bnxt_pci_driver = {
12205 .name = DRV_MODULE_NAME,
12206 .id_table = bnxt_pci_tbl,
12207 .probe = bnxt_init_one,
12208 .remove = bnxt_remove_one,
d196ece7 12209 .shutdown = bnxt_shutdown,
f65a2044 12210 .driver.pm = BNXT_PM_OPS,
6316ea6d 12211 .err_handler = &bnxt_err_handler,
c0c050c5
MC
12212#if defined(CONFIG_BNXT_SRIOV)
12213 .sriov_configure = bnxt_sriov_configure,
12214#endif
12215};
12216
c213eae8
MC
12217static int __init bnxt_init(void)
12218{
cabfb09d 12219 bnxt_debug_init();
c213eae8
MC
12220 return pci_register_driver(&bnxt_pci_driver);
12221}
12222
12223static void __exit bnxt_exit(void)
12224{
12225 pci_unregister_driver(&bnxt_pci_driver);
12226 if (bnxt_pf_wq)
12227 destroy_workqueue(bnxt_pf_wq);
cabfb09d 12228 bnxt_debug_exit();
c213eae8
MC
12229}
12230
12231module_init(bnxt_init);
12232module_exit(bnxt_exit);