]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/broadcom/bnxt/bnxt.c
bnxt_en: Handle RESET_NOTIFY async event from firmware.
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
CommitLineData
c0c050c5
MC
1/* Broadcom NetXtreme-C/E network driver.
2 *
11f15ed3 3 * Copyright (c) 2014-2016 Broadcom Corporation
c6cc32a2 4 * Copyright (c) 2016-2019 Broadcom Limited
c0c050c5
MC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12
13#include <linux/stringify.h>
14#include <linux/kernel.h>
15#include <linux/timer.h>
16#include <linux/errno.h>
17#include <linux/ioport.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/interrupt.h>
21#include <linux/pci.h>
22#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/skbuff.h>
25#include <linux/dma-mapping.h>
26#include <linux/bitops.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29#include <linux/delay.h>
30#include <asm/byteorder.h>
31#include <asm/page.h>
32#include <linux/time.h>
33#include <linux/mii.h>
0ca12be9 34#include <linux/mdio.h>
c0c050c5
MC
35#include <linux/if.h>
36#include <linux/if_vlan.h>
32e8239c 37#include <linux/if_bridge.h>
5ac67d8b 38#include <linux/rtc.h>
c6d30e83 39#include <linux/bpf.h>
c0c050c5
MC
40#include <net/ip.h>
41#include <net/tcp.h>
42#include <net/udp.h>
43#include <net/checksum.h>
44#include <net/ip6_checksum.h>
ad51b8e9 45#include <net/udp_tunnel.h>
c0c050c5
MC
46#include <linux/workqueue.h>
47#include <linux/prefetch.h>
48#include <linux/cache.h>
49#include <linux/log2.h>
50#include <linux/aer.h>
51#include <linux/bitmap.h>
52#include <linux/cpu_rmap.h>
56f0fd80 53#include <linux/cpumask.h>
2ae7408f 54#include <net/pkt_cls.h>
cde49a42
VV
55#include <linux/hwmon.h>
56#include <linux/hwmon-sysfs.h>
322b87ca 57#include <net/page_pool.h>
c0c050c5
MC
58
59#include "bnxt_hsi.h"
60#include "bnxt.h"
a588e458 61#include "bnxt_ulp.h"
c0c050c5
MC
62#include "bnxt_sriov.h"
63#include "bnxt_ethtool.h"
7df4ae9f 64#include "bnxt_dcb.h"
c6d30e83 65#include "bnxt_xdp.h"
4ab0c6a8 66#include "bnxt_vfr.h"
2ae7408f 67#include "bnxt_tc.h"
3c467bf3 68#include "bnxt_devlink.h"
cabfb09d 69#include "bnxt_debugfs.h"
c0c050c5
MC
70
71#define BNXT_TX_TIMEOUT (5 * HZ)
72
73static const char version[] =
74 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
75
76MODULE_LICENSE("GPL");
77MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
78MODULE_VERSION(DRV_MODULE_VERSION);
79
80#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
81#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
82#define BNXT_RX_COPY_THRESH 256
83
4419dbe6 84#define BNXT_TX_PUSH_THRESH 164
c0c050c5
MC
85
86enum board_idx {
fbc9a523 87 BCM57301,
c0c050c5
MC
88 BCM57302,
89 BCM57304,
1f681688 90 BCM57417_NPAR,
fa853dda 91 BCM58700,
b24eb6ae
MC
92 BCM57311,
93 BCM57312,
fbc9a523 94 BCM57402,
c0c050c5
MC
95 BCM57404,
96 BCM57406,
1f681688
MC
97 BCM57402_NPAR,
98 BCM57407,
b24eb6ae
MC
99 BCM57412,
100 BCM57414,
101 BCM57416,
102 BCM57417,
1f681688 103 BCM57412_NPAR,
5049e33b 104 BCM57314,
1f681688
MC
105 BCM57417_SFP,
106 BCM57416_SFP,
107 BCM57404_NPAR,
108 BCM57406_NPAR,
109 BCM57407_SFP,
adbc8305 110 BCM57407_NPAR,
1f681688
MC
111 BCM57414_NPAR,
112 BCM57416_NPAR,
32b40798
DK
113 BCM57452,
114 BCM57454,
92abef36 115 BCM5745x_NPAR,
1ab968d2 116 BCM57508,
c6cc32a2 117 BCM57504,
51fec80d 118 BCM57502,
49c98421
MC
119 BCM57508_NPAR,
120 BCM57504_NPAR,
121 BCM57502_NPAR,
4a58139b 122 BCM58802,
8ed693b7 123 BCM58804,
4a58139b 124 BCM58808,
adbc8305
MC
125 NETXTREME_E_VF,
126 NETXTREME_C_VF,
618784e3 127 NETXTREME_S_VF,
b16b6891 128 NETXTREME_E_P5_VF,
c0c050c5
MC
129};
130
131/* indexed by enum above */
132static const struct {
133 char *name;
134} board_info[] = {
27573a7d
SB
135 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
136 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
137 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
138 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
139 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
140 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
141 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
142 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
143 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
144 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
145 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
146 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
147 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
148 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
149 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
150 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
151 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
152 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
153 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
154 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
155 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
156 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
157 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
158 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
159 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
160 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
161 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
162 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
92abef36 163 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
1ab968d2 164 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
c6cc32a2 165 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
51fec80d 166 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
49c98421
MC
167 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
168 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
169 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
27573a7d 170 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
8ed693b7 171 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
27573a7d
SB
172 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
173 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
174 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
618784e3 175 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
b16b6891 176 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
c0c050c5
MC
177};
178
179static const struct pci_device_id bnxt_pci_tbl[] = {
92abef36
VV
180 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
181 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
4a58139b 182 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
adbc8305 183 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
fbc9a523 184 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
c0c050c5
MC
185 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
186 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
1f681688 187 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
fa853dda 188 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
b24eb6ae
MC
189 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
190 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
fbc9a523 191 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
c0c050c5
MC
192 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
193 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
1f681688
MC
194 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
195 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
b24eb6ae
MC
196 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
197 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
198 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
199 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
1f681688 200 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
5049e33b 201 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
1f681688
MC
202 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
203 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
204 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
205 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
206 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
adbc8305
MC
207 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
208 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
1f681688 209 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
adbc8305 210 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
1f681688 211 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
adbc8305 212 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
4a58139b 213 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
32b40798 214 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
1ab968d2 215 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
c6cc32a2 216 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
51fec80d 217 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
49c98421
MC
218 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
219 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
220 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
221 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
222 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
223 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
4a58139b 224 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
8ed693b7 225 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
c0c050c5 226#ifdef CONFIG_BNXT_SRIOV
c7ef35eb
DK
227 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
228 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
adbc8305
MC
229 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
230 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
231 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
232 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
233 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
234 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
51fec80d 235 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
b16b6891 236 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
618784e3 237 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
c0c050c5
MC
238#endif
239 { 0 }
240};
241
242MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
243
244static const u16 bnxt_vf_req_snif[] = {
245 HWRM_FUNC_CFG,
91cdda40 246 HWRM_FUNC_VF_CFG,
c0c050c5
MC
247 HWRM_PORT_PHY_QCFG,
248 HWRM_CFA_L2_FILTER_ALLOC,
249};
250
25be8623 251static const u16 bnxt_async_events_arr[] = {
87c374de
MC
252 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
253 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
254 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
255 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
256 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
2151fe08 257 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
7e914027 258 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
25be8623
MC
259};
260
c213eae8
MC
261static struct workqueue_struct *bnxt_pf_wq;
262
c0c050c5
MC
263static bool bnxt_vf_pciid(enum board_idx idx)
264{
618784e3 265 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
b16b6891 266 idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
c0c050c5
MC
267}
268
269#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
270#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
271#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
272
c0c050c5
MC
273#define BNXT_CP_DB_IRQ_DIS(db) \
274 writel(DB_CP_IRQ_DIS_FLAGS, db)
275
697197e5
MC
276#define BNXT_DB_CQ(db, idx) \
277 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
278
279#define BNXT_DB_NQ_P5(db, idx) \
280 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
281
282#define BNXT_DB_CQ_ARM(db, idx) \
283 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
284
285#define BNXT_DB_NQ_ARM_P5(db, idx) \
286 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
287
288static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
289{
290 if (bp->flags & BNXT_FLAG_CHIP_P5)
291 BNXT_DB_NQ_P5(db, idx);
292 else
293 BNXT_DB_CQ(db, idx);
294}
295
296static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
297{
298 if (bp->flags & BNXT_FLAG_CHIP_P5)
299 BNXT_DB_NQ_ARM_P5(db, idx);
300 else
301 BNXT_DB_CQ_ARM(db, idx);
302}
303
304static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
305{
306 if (bp->flags & BNXT_FLAG_CHIP_P5)
307 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
308 db->doorbell);
309 else
310 BNXT_DB_CQ(db, idx);
311}
312
38413406 313const u16 bnxt_lhint_arr[] = {
c0c050c5
MC
314 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
315 TX_BD_FLAGS_LHINT_512_TO_1023,
316 TX_BD_FLAGS_LHINT_1024_TO_2047,
317 TX_BD_FLAGS_LHINT_1024_TO_2047,
318 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
319 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
320 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
321 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
322 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
323 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
324 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
325 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
326 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
327 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
328 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
329 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
330 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
331 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
332 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
333};
334
ee5c7fb3
SP
335static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
336{
337 struct metadata_dst *md_dst = skb_metadata_dst(skb);
338
339 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
340 return 0;
341
342 return md_dst->u.port_info.port_id;
343}
344
c0c050c5
MC
345static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
346{
347 struct bnxt *bp = netdev_priv(dev);
348 struct tx_bd *txbd;
349 struct tx_bd_ext *txbd1;
350 struct netdev_queue *txq;
351 int i;
352 dma_addr_t mapping;
353 unsigned int length, pad = 0;
354 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
355 u16 prod, last_frag;
356 struct pci_dev *pdev = bp->pdev;
c0c050c5
MC
357 struct bnxt_tx_ring_info *txr;
358 struct bnxt_sw_tx_bd *tx_buf;
359
360 i = skb_get_queue_mapping(skb);
361 if (unlikely(i >= bp->tx_nr_rings)) {
362 dev_kfree_skb_any(skb);
363 return NETDEV_TX_OK;
364 }
365
c0c050c5 366 txq = netdev_get_tx_queue(dev, i);
a960dec9 367 txr = &bp->tx_ring[bp->tx_ring_map[i]];
c0c050c5
MC
368 prod = txr->tx_prod;
369
370 free_size = bnxt_tx_avail(bp, txr);
371 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
372 netif_tx_stop_queue(txq);
373 return NETDEV_TX_BUSY;
374 }
375
376 length = skb->len;
377 len = skb_headlen(skb);
378 last_frag = skb_shinfo(skb)->nr_frags;
379
380 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
381
382 txbd->tx_bd_opaque = prod;
383
384 tx_buf = &txr->tx_buf_ring[prod];
385 tx_buf->skb = skb;
386 tx_buf->nr_frags = last_frag;
387
388 vlan_tag_flags = 0;
ee5c7fb3 389 cfa_action = bnxt_xmit_get_cfa_action(skb);
c0c050c5
MC
390 if (skb_vlan_tag_present(skb)) {
391 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
392 skb_vlan_tag_get(skb);
393 /* Currently supports 8021Q, 8021AD vlan offloads
394 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
395 */
396 if (skb->vlan_proto == htons(ETH_P_8021Q))
397 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
398 }
399
400 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
4419dbe6
MC
401 struct tx_push_buffer *tx_push_buf = txr->tx_push;
402 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
403 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
697197e5 404 void __iomem *db = txr->tx_db.doorbell;
4419dbe6
MC
405 void *pdata = tx_push_buf->data;
406 u64 *end;
407 int j, push_len;
c0c050c5
MC
408
409 /* Set COAL_NOW to be ready quickly for the next push */
410 tx_push->tx_bd_len_flags_type =
411 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
412 TX_BD_TYPE_LONG_TX_BD |
413 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
414 TX_BD_FLAGS_COAL_NOW |
415 TX_BD_FLAGS_PACKET_END |
416 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
417
418 if (skb->ip_summed == CHECKSUM_PARTIAL)
419 tx_push1->tx_bd_hsize_lflags =
420 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
421 else
422 tx_push1->tx_bd_hsize_lflags = 0;
423
424 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
ee5c7fb3
SP
425 tx_push1->tx_bd_cfa_action =
426 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
c0c050c5 427
fbb0fa8b
MC
428 end = pdata + length;
429 end = PTR_ALIGN(end, 8) - 1;
4419dbe6
MC
430 *end = 0;
431
c0c050c5
MC
432 skb_copy_from_linear_data(skb, pdata, len);
433 pdata += len;
434 for (j = 0; j < last_frag; j++) {
435 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
436 void *fptr;
437
438 fptr = skb_frag_address_safe(frag);
439 if (!fptr)
440 goto normal_tx;
441
442 memcpy(pdata, fptr, skb_frag_size(frag));
443 pdata += skb_frag_size(frag);
444 }
445
4419dbe6
MC
446 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
447 txbd->tx_bd_haddr = txr->data_mapping;
c0c050c5
MC
448 prod = NEXT_TX(prod);
449 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
450 memcpy(txbd, tx_push1, sizeof(*txbd));
451 prod = NEXT_TX(prod);
4419dbe6 452 tx_push->doorbell =
c0c050c5
MC
453 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
454 txr->tx_prod = prod;
455
b9a8460a 456 tx_buf->is_push = 1;
c0c050c5 457 netdev_tx_sent_queue(txq, skb->len);
b9a8460a 458 wmb(); /* Sync is_push and byte queue before pushing data */
c0c050c5 459
4419dbe6
MC
460 push_len = (length + sizeof(*tx_push) + 7) / 8;
461 if (push_len > 16) {
697197e5
MC
462 __iowrite64_copy(db, tx_push_buf, 16);
463 __iowrite32_copy(db + 4, tx_push_buf + 1,
9d13744b 464 (push_len - 16) << 1);
4419dbe6 465 } else {
697197e5 466 __iowrite64_copy(db, tx_push_buf, push_len);
4419dbe6 467 }
c0c050c5 468
c0c050c5
MC
469 goto tx_done;
470 }
471
472normal_tx:
473 if (length < BNXT_MIN_PKT_SIZE) {
474 pad = BNXT_MIN_PKT_SIZE - length;
475 if (skb_pad(skb, pad)) {
476 /* SKB already freed. */
477 tx_buf->skb = NULL;
478 return NETDEV_TX_OK;
479 }
480 length = BNXT_MIN_PKT_SIZE;
481 }
482
483 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
484
485 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
486 dev_kfree_skb_any(skb);
487 tx_buf->skb = NULL;
488 return NETDEV_TX_OK;
489 }
490
491 dma_unmap_addr_set(tx_buf, mapping, mapping);
492 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
493 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
494
495 txbd->tx_bd_haddr = cpu_to_le64(mapping);
496
497 prod = NEXT_TX(prod);
498 txbd1 = (struct tx_bd_ext *)
499 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
500
501 txbd1->tx_bd_hsize_lflags = 0;
502 if (skb_is_gso(skb)) {
503 u32 hdr_len;
504
505 if (skb->encapsulation)
506 hdr_len = skb_inner_network_offset(skb) +
507 skb_inner_network_header_len(skb) +
508 inner_tcp_hdrlen(skb);
509 else
510 hdr_len = skb_transport_offset(skb) +
511 tcp_hdrlen(skb);
512
513 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
514 TX_BD_FLAGS_T_IPID |
515 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
516 length = skb_shinfo(skb)->gso_size;
517 txbd1->tx_bd_mss = cpu_to_le32(length);
518 length += hdr_len;
519 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
520 txbd1->tx_bd_hsize_lflags =
521 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
522 txbd1->tx_bd_mss = 0;
523 }
524
525 length >>= 9;
2b3c6885
MC
526 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
527 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
528 skb->len);
529 i = 0;
530 goto tx_dma_error;
531 }
c0c050c5
MC
532 flags |= bnxt_lhint_arr[length];
533 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
534
535 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
ee5c7fb3
SP
536 txbd1->tx_bd_cfa_action =
537 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
c0c050c5
MC
538 for (i = 0; i < last_frag; i++) {
539 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
540
541 prod = NEXT_TX(prod);
542 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
543
544 len = skb_frag_size(frag);
545 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
546 DMA_TO_DEVICE);
547
548 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
549 goto tx_dma_error;
550
551 tx_buf = &txr->tx_buf_ring[prod];
552 dma_unmap_addr_set(tx_buf, mapping, mapping);
553
554 txbd->tx_bd_haddr = cpu_to_le64(mapping);
555
556 flags = len << TX_BD_LEN_SHIFT;
557 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
558 }
559
560 flags &= ~TX_BD_LEN;
561 txbd->tx_bd_len_flags_type =
562 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
563 TX_BD_FLAGS_PACKET_END);
564
565 netdev_tx_sent_queue(txq, skb->len);
566
567 /* Sync BD data before updating doorbell */
568 wmb();
569
570 prod = NEXT_TX(prod);
571 txr->tx_prod = prod;
572
6b16f9ee 573 if (!netdev_xmit_more() || netif_xmit_stopped(txq))
697197e5 574 bnxt_db_write(bp, &txr->tx_db, prod);
c0c050c5
MC
575
576tx_done:
577
c0c050c5 578 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
6b16f9ee 579 if (netdev_xmit_more() && !tx_buf->is_push)
697197e5 580 bnxt_db_write(bp, &txr->tx_db, prod);
4d172f21 581
c0c050c5
MC
582 netif_tx_stop_queue(txq);
583
584 /* netif_tx_stop_queue() must be done before checking
585 * tx index in bnxt_tx_avail() below, because in
586 * bnxt_tx_int(), we update tx index before checking for
587 * netif_tx_queue_stopped().
588 */
589 smp_mb();
590 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
591 netif_tx_wake_queue(txq);
592 }
593 return NETDEV_TX_OK;
594
595tx_dma_error:
596 last_frag = i;
597
598 /* start back at beginning and unmap skb */
599 prod = txr->tx_prod;
600 tx_buf = &txr->tx_buf_ring[prod];
601 tx_buf->skb = NULL;
602 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
603 skb_headlen(skb), PCI_DMA_TODEVICE);
604 prod = NEXT_TX(prod);
605
606 /* unmap remaining mapped pages */
607 for (i = 0; i < last_frag; i++) {
608 prod = NEXT_TX(prod);
609 tx_buf = &txr->tx_buf_ring[prod];
610 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
611 skb_frag_size(&skb_shinfo(skb)->frags[i]),
612 PCI_DMA_TODEVICE);
613 }
614
615 dev_kfree_skb_any(skb);
616 return NETDEV_TX_OK;
617}
618
619static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
620{
b6ab4b01 621 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
a960dec9 622 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
c0c050c5
MC
623 u16 cons = txr->tx_cons;
624 struct pci_dev *pdev = bp->pdev;
625 int i;
626 unsigned int tx_bytes = 0;
627
628 for (i = 0; i < nr_pkts; i++) {
629 struct bnxt_sw_tx_bd *tx_buf;
630 struct sk_buff *skb;
631 int j, last;
632
633 tx_buf = &txr->tx_buf_ring[cons];
634 cons = NEXT_TX(cons);
635 skb = tx_buf->skb;
636 tx_buf->skb = NULL;
637
638 if (tx_buf->is_push) {
639 tx_buf->is_push = 0;
640 goto next_tx_int;
641 }
642
643 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
644 skb_headlen(skb), PCI_DMA_TODEVICE);
645 last = tx_buf->nr_frags;
646
647 for (j = 0; j < last; j++) {
648 cons = NEXT_TX(cons);
649 tx_buf = &txr->tx_buf_ring[cons];
650 dma_unmap_page(
651 &pdev->dev,
652 dma_unmap_addr(tx_buf, mapping),
653 skb_frag_size(&skb_shinfo(skb)->frags[j]),
654 PCI_DMA_TODEVICE);
655 }
656
657next_tx_int:
658 cons = NEXT_TX(cons);
659
660 tx_bytes += skb->len;
661 dev_kfree_skb_any(skb);
662 }
663
664 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
665 txr->tx_cons = cons;
666
667 /* Need to make the tx_cons update visible to bnxt_start_xmit()
668 * before checking for netif_tx_queue_stopped(). Without the
669 * memory barrier, there is a small possibility that bnxt_start_xmit()
670 * will miss it and cause the queue to be stopped forever.
671 */
672 smp_mb();
673
674 if (unlikely(netif_tx_queue_stopped(txq)) &&
675 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
676 __netif_tx_lock(txq, smp_processor_id());
677 if (netif_tx_queue_stopped(txq) &&
678 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
679 txr->dev_state != BNXT_DEV_STATE_CLOSING)
680 netif_tx_wake_queue(txq);
681 __netif_tx_unlock(txq);
682 }
683}
684
c61fb99c 685static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
322b87ca 686 struct bnxt_rx_ring_info *rxr,
c61fb99c
MC
687 gfp_t gfp)
688{
689 struct device *dev = &bp->pdev->dev;
690 struct page *page;
691
322b87ca 692 page = page_pool_dev_alloc_pages(rxr->page_pool);
c61fb99c
MC
693 if (!page)
694 return NULL;
695
c519fe9a
SN
696 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
697 DMA_ATTR_WEAK_ORDERING);
c61fb99c 698 if (dma_mapping_error(dev, *mapping)) {
322b87ca 699 page_pool_recycle_direct(rxr->page_pool, page);
c61fb99c
MC
700 return NULL;
701 }
702 *mapping += bp->rx_dma_offset;
703 return page;
704}
705
c0c050c5
MC
706static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
707 gfp_t gfp)
708{
709 u8 *data;
710 struct pci_dev *pdev = bp->pdev;
711
712 data = kmalloc(bp->rx_buf_size, gfp);
713 if (!data)
714 return NULL;
715
c519fe9a
SN
716 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
717 bp->rx_buf_use_size, bp->rx_dir,
718 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
719
720 if (dma_mapping_error(&pdev->dev, *mapping)) {
721 kfree(data);
722 data = NULL;
723 }
724 return data;
725}
726
38413406
MC
727int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
728 u16 prod, gfp_t gfp)
c0c050c5
MC
729{
730 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
731 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
c0c050c5
MC
732 dma_addr_t mapping;
733
c61fb99c 734 if (BNXT_RX_PAGE_MODE(bp)) {
322b87ca
AG
735 struct page *page =
736 __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
c0c050c5 737
c61fb99c
MC
738 if (!page)
739 return -ENOMEM;
740
741 rx_buf->data = page;
742 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
743 } else {
744 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
745
746 if (!data)
747 return -ENOMEM;
748
749 rx_buf->data = data;
750 rx_buf->data_ptr = data + bp->rx_offset;
751 }
11cd119d 752 rx_buf->mapping = mapping;
c0c050c5
MC
753
754 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
c0c050c5
MC
755 return 0;
756}
757
c6d30e83 758void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
c0c050c5
MC
759{
760 u16 prod = rxr->rx_prod;
761 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
762 struct rx_bd *cons_bd, *prod_bd;
763
764 prod_rx_buf = &rxr->rx_buf_ring[prod];
765 cons_rx_buf = &rxr->rx_buf_ring[cons];
766
767 prod_rx_buf->data = data;
6bb19474 768 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
c0c050c5 769
11cd119d 770 prod_rx_buf->mapping = cons_rx_buf->mapping;
c0c050c5
MC
771
772 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
773 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
774
775 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
776}
777
778static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
779{
780 u16 next, max = rxr->rx_agg_bmap_size;
781
782 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
783 if (next >= max)
784 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
785 return next;
786}
787
788static inline int bnxt_alloc_rx_page(struct bnxt *bp,
789 struct bnxt_rx_ring_info *rxr,
790 u16 prod, gfp_t gfp)
791{
792 struct rx_bd *rxbd =
793 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
794 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
795 struct pci_dev *pdev = bp->pdev;
796 struct page *page;
797 dma_addr_t mapping;
798 u16 sw_prod = rxr->rx_sw_agg_prod;
89d0a06c 799 unsigned int offset = 0;
c0c050c5 800
89d0a06c
MC
801 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
802 page = rxr->rx_page;
803 if (!page) {
804 page = alloc_page(gfp);
805 if (!page)
806 return -ENOMEM;
807 rxr->rx_page = page;
808 rxr->rx_page_offset = 0;
809 }
810 offset = rxr->rx_page_offset;
811 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
812 if (rxr->rx_page_offset == PAGE_SIZE)
813 rxr->rx_page = NULL;
814 else
815 get_page(page);
816 } else {
817 page = alloc_page(gfp);
818 if (!page)
819 return -ENOMEM;
820 }
c0c050c5 821
c519fe9a
SN
822 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
823 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
824 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
825 if (dma_mapping_error(&pdev->dev, mapping)) {
826 __free_page(page);
827 return -EIO;
828 }
829
830 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
831 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
832
833 __set_bit(sw_prod, rxr->rx_agg_bmap);
834 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
835 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
836
837 rx_agg_buf->page = page;
89d0a06c 838 rx_agg_buf->offset = offset;
c0c050c5
MC
839 rx_agg_buf->mapping = mapping;
840 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
841 rxbd->rx_bd_opaque = sw_prod;
842 return 0;
843}
844
4a228a3a
MC
845static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
846 struct bnxt_cp_ring_info *cpr,
847 u16 cp_cons, u16 curr)
848{
849 struct rx_agg_cmp *agg;
850
851 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
852 agg = (struct rx_agg_cmp *)
853 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
854 return agg;
855}
856
bfcd8d79
MC
857static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
858 struct bnxt_rx_ring_info *rxr,
859 u16 agg_id, u16 curr)
860{
861 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
862
863 return &tpa_info->agg_arr[curr];
864}
865
4a228a3a
MC
866static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
867 u16 start, u32 agg_bufs, bool tpa)
c0c050c5 868{
e44758b7 869 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5 870 struct bnxt *bp = bnapi->bp;
b6ab4b01 871 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
872 u16 prod = rxr->rx_agg_prod;
873 u16 sw_prod = rxr->rx_sw_agg_prod;
bfcd8d79 874 bool p5_tpa = false;
c0c050c5
MC
875 u32 i;
876
bfcd8d79
MC
877 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
878 p5_tpa = true;
879
c0c050c5
MC
880 for (i = 0; i < agg_bufs; i++) {
881 u16 cons;
882 struct rx_agg_cmp *agg;
883 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
884 struct rx_bd *prod_bd;
885 struct page *page;
886
bfcd8d79
MC
887 if (p5_tpa)
888 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
889 else
890 agg = bnxt_get_agg(bp, cpr, idx, start + i);
c0c050c5
MC
891 cons = agg->rx_agg_cmp_opaque;
892 __clear_bit(cons, rxr->rx_agg_bmap);
893
894 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
895 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
896
897 __set_bit(sw_prod, rxr->rx_agg_bmap);
898 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
899 cons_rx_buf = &rxr->rx_agg_ring[cons];
900
901 /* It is possible for sw_prod to be equal to cons, so
902 * set cons_rx_buf->page to NULL first.
903 */
904 page = cons_rx_buf->page;
905 cons_rx_buf->page = NULL;
906 prod_rx_buf->page = page;
89d0a06c 907 prod_rx_buf->offset = cons_rx_buf->offset;
c0c050c5
MC
908
909 prod_rx_buf->mapping = cons_rx_buf->mapping;
910
911 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
912
913 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
914 prod_bd->rx_bd_opaque = sw_prod;
915
916 prod = NEXT_RX_AGG(prod);
917 sw_prod = NEXT_RX_AGG(sw_prod);
c0c050c5
MC
918 }
919 rxr->rx_agg_prod = prod;
920 rxr->rx_sw_agg_prod = sw_prod;
921}
922
c61fb99c
MC
923static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
924 struct bnxt_rx_ring_info *rxr,
925 u16 cons, void *data, u8 *data_ptr,
926 dma_addr_t dma_addr,
927 unsigned int offset_and_len)
928{
929 unsigned int payload = offset_and_len >> 16;
930 unsigned int len = offset_and_len & 0xffff;
d7840976 931 skb_frag_t *frag;
c61fb99c
MC
932 struct page *page = data;
933 u16 prod = rxr->rx_prod;
934 struct sk_buff *skb;
935 int off, err;
936
937 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
938 if (unlikely(err)) {
939 bnxt_reuse_rx_data(rxr, cons, data);
940 return NULL;
941 }
942 dma_addr -= bp->rx_dma_offset;
c519fe9a
SN
943 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
944 DMA_ATTR_WEAK_ORDERING);
c61fb99c
MC
945
946 if (unlikely(!payload))
c43f1255 947 payload = eth_get_headlen(bp->dev, data_ptr, len);
c61fb99c
MC
948
949 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
950 if (!skb) {
951 __free_page(page);
952 return NULL;
953 }
954
955 off = (void *)data_ptr - page_address(page);
956 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
957 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
958 payload + NET_IP_ALIGN);
959
960 frag = &skb_shinfo(skb)->frags[0];
961 skb_frag_size_sub(frag, payload);
b54c9d5b 962 skb_frag_off_add(frag, payload);
c61fb99c
MC
963 skb->data_len -= payload;
964 skb->tail += payload;
965
966 return skb;
967}
968
c0c050c5
MC
969static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
970 struct bnxt_rx_ring_info *rxr, u16 cons,
6bb19474
MC
971 void *data, u8 *data_ptr,
972 dma_addr_t dma_addr,
973 unsigned int offset_and_len)
c0c050c5 974{
6bb19474 975 u16 prod = rxr->rx_prod;
c0c050c5 976 struct sk_buff *skb;
6bb19474 977 int err;
c0c050c5
MC
978
979 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
980 if (unlikely(err)) {
981 bnxt_reuse_rx_data(rxr, cons, data);
982 return NULL;
983 }
984
985 skb = build_skb(data, 0);
c519fe9a
SN
986 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
987 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
988 if (!skb) {
989 kfree(data);
990 return NULL;
991 }
992
b3dba77c 993 skb_reserve(skb, bp->rx_offset);
6bb19474 994 skb_put(skb, offset_and_len & 0xffff);
c0c050c5
MC
995 return skb;
996}
997
e44758b7
MC
998static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
999 struct bnxt_cp_ring_info *cpr,
4a228a3a
MC
1000 struct sk_buff *skb, u16 idx,
1001 u32 agg_bufs, bool tpa)
c0c050c5 1002{
e44758b7 1003 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5 1004 struct pci_dev *pdev = bp->pdev;
b6ab4b01 1005 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5 1006 u16 prod = rxr->rx_agg_prod;
bfcd8d79 1007 bool p5_tpa = false;
c0c050c5
MC
1008 u32 i;
1009
bfcd8d79
MC
1010 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1011 p5_tpa = true;
1012
c0c050c5
MC
1013 for (i = 0; i < agg_bufs; i++) {
1014 u16 cons, frag_len;
1015 struct rx_agg_cmp *agg;
1016 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1017 struct page *page;
1018 dma_addr_t mapping;
1019
bfcd8d79
MC
1020 if (p5_tpa)
1021 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1022 else
1023 agg = bnxt_get_agg(bp, cpr, idx, i);
c0c050c5
MC
1024 cons = agg->rx_agg_cmp_opaque;
1025 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1026 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1027
1028 cons_rx_buf = &rxr->rx_agg_ring[cons];
89d0a06c
MC
1029 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1030 cons_rx_buf->offset, frag_len);
c0c050c5
MC
1031 __clear_bit(cons, rxr->rx_agg_bmap);
1032
1033 /* It is possible for bnxt_alloc_rx_page() to allocate
1034 * a sw_prod index that equals the cons index, so we
1035 * need to clear the cons entry now.
1036 */
11cd119d 1037 mapping = cons_rx_buf->mapping;
c0c050c5
MC
1038 page = cons_rx_buf->page;
1039 cons_rx_buf->page = NULL;
1040
1041 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1042 struct skb_shared_info *shinfo;
1043 unsigned int nr_frags;
1044
1045 shinfo = skb_shinfo(skb);
1046 nr_frags = --shinfo->nr_frags;
1047 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1048
1049 dev_kfree_skb(skb);
1050
1051 cons_rx_buf->page = page;
1052
1053 /* Update prod since possibly some pages have been
1054 * allocated already.
1055 */
1056 rxr->rx_agg_prod = prod;
4a228a3a 1057 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
c0c050c5
MC
1058 return NULL;
1059 }
1060
c519fe9a
SN
1061 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1062 PCI_DMA_FROMDEVICE,
1063 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
1064
1065 skb->data_len += frag_len;
1066 skb->len += frag_len;
1067 skb->truesize += PAGE_SIZE;
1068
1069 prod = NEXT_RX_AGG(prod);
c0c050c5
MC
1070 }
1071 rxr->rx_agg_prod = prod;
1072 return skb;
1073}
1074
1075static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1076 u8 agg_bufs, u32 *raw_cons)
1077{
1078 u16 last;
1079 struct rx_agg_cmp *agg;
1080
1081 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1082 last = RING_CMP(*raw_cons);
1083 agg = (struct rx_agg_cmp *)
1084 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1085 return RX_AGG_CMP_VALID(agg, *raw_cons);
1086}
1087
1088static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1089 unsigned int len,
1090 dma_addr_t mapping)
1091{
1092 struct bnxt *bp = bnapi->bp;
1093 struct pci_dev *pdev = bp->pdev;
1094 struct sk_buff *skb;
1095
1096 skb = napi_alloc_skb(&bnapi->napi, len);
1097 if (!skb)
1098 return NULL;
1099
745fc05c
MC
1100 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1101 bp->rx_dir);
c0c050c5 1102
6bb19474
MC
1103 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1104 len + NET_IP_ALIGN);
c0c050c5 1105
745fc05c
MC
1106 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1107 bp->rx_dir);
c0c050c5
MC
1108
1109 skb_put(skb, len);
1110 return skb;
1111}
1112
e44758b7 1113static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
fa7e2812
MC
1114 u32 *raw_cons, void *cmp)
1115{
fa7e2812
MC
1116 struct rx_cmp *rxcmp = cmp;
1117 u32 tmp_raw_cons = *raw_cons;
1118 u8 cmp_type, agg_bufs = 0;
1119
1120 cmp_type = RX_CMP_TYPE(rxcmp);
1121
1122 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1123 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1124 RX_CMP_AGG_BUFS) >>
1125 RX_CMP_AGG_BUFS_SHIFT;
1126 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1127 struct rx_tpa_end_cmp *tpa_end = cmp;
1128
bfcd8d79
MC
1129 if (bp->flags & BNXT_FLAG_CHIP_P5)
1130 return 0;
1131
4a228a3a 1132 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
fa7e2812
MC
1133 }
1134
1135 if (agg_bufs) {
1136 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1137 return -EBUSY;
1138 }
1139 *raw_cons = tmp_raw_cons;
1140 return 0;
1141}
1142
c213eae8
MC
1143static void bnxt_queue_sp_work(struct bnxt *bp)
1144{
1145 if (BNXT_PF(bp))
1146 queue_work(bnxt_pf_wq, &bp->sp_task);
1147 else
1148 schedule_work(&bp->sp_task);
1149}
1150
1151static void bnxt_cancel_sp_work(struct bnxt *bp)
1152{
1153 if (BNXT_PF(bp))
1154 flush_workqueue(bnxt_pf_wq);
1155 else
1156 cancel_work_sync(&bp->sp_task);
1157}
1158
fa7e2812
MC
1159static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1160{
1161 if (!rxr->bnapi->in_reset) {
1162 rxr->bnapi->in_reset = true;
1163 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
c213eae8 1164 bnxt_queue_sp_work(bp);
fa7e2812
MC
1165 }
1166 rxr->rx_next_cons = 0xffff;
1167}
1168
ec4d8e7c
MC
1169static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1170{
1171 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1172 u16 idx = agg_id & MAX_TPA_P5_MASK;
1173
1174 if (test_bit(idx, map->agg_idx_bmap))
1175 idx = find_first_zero_bit(map->agg_idx_bmap,
1176 BNXT_AGG_IDX_BMAP_SIZE);
1177 __set_bit(idx, map->agg_idx_bmap);
1178 map->agg_id_tbl[agg_id] = idx;
1179 return idx;
1180}
1181
1182static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1183{
1184 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1185
1186 __clear_bit(idx, map->agg_idx_bmap);
1187}
1188
1189static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1190{
1191 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1192
1193 return map->agg_id_tbl[agg_id];
1194}
1195
c0c050c5
MC
1196static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1197 struct rx_tpa_start_cmp *tpa_start,
1198 struct rx_tpa_start_cmp_ext *tpa_start1)
1199{
c0c050c5 1200 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
bfcd8d79
MC
1201 struct bnxt_tpa_info *tpa_info;
1202 u16 cons, prod, agg_id;
c0c050c5
MC
1203 struct rx_bd *prod_bd;
1204 dma_addr_t mapping;
1205
ec4d8e7c 1206 if (bp->flags & BNXT_FLAG_CHIP_P5) {
bfcd8d79 1207 agg_id = TPA_START_AGG_ID_P5(tpa_start);
ec4d8e7c
MC
1208 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1209 } else {
bfcd8d79 1210 agg_id = TPA_START_AGG_ID(tpa_start);
ec4d8e7c 1211 }
c0c050c5
MC
1212 cons = tpa_start->rx_tpa_start_cmp_opaque;
1213 prod = rxr->rx_prod;
1214 cons_rx_buf = &rxr->rx_buf_ring[cons];
1215 prod_rx_buf = &rxr->rx_buf_ring[prod];
1216 tpa_info = &rxr->rx_tpa[agg_id];
1217
bfcd8d79
MC
1218 if (unlikely(cons != rxr->rx_next_cons ||
1219 TPA_START_ERROR(tpa_start))) {
1220 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1221 cons, rxr->rx_next_cons,
1222 TPA_START_ERROR_CODE(tpa_start1));
fa7e2812
MC
1223 bnxt_sched_reset(bp, rxr);
1224 return;
1225 }
ee5c7fb3
SP
1226 /* Store cfa_code in tpa_info to use in tpa_end
1227 * completion processing.
1228 */
1229 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
c0c050c5 1230 prod_rx_buf->data = tpa_info->data;
6bb19474 1231 prod_rx_buf->data_ptr = tpa_info->data_ptr;
c0c050c5
MC
1232
1233 mapping = tpa_info->mapping;
11cd119d 1234 prod_rx_buf->mapping = mapping;
c0c050c5
MC
1235
1236 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1237
1238 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1239
1240 tpa_info->data = cons_rx_buf->data;
6bb19474 1241 tpa_info->data_ptr = cons_rx_buf->data_ptr;
c0c050c5 1242 cons_rx_buf->data = NULL;
11cd119d 1243 tpa_info->mapping = cons_rx_buf->mapping;
c0c050c5
MC
1244
1245 tpa_info->len =
1246 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1247 RX_TPA_START_CMP_LEN_SHIFT;
1248 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1249 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1250
1251 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1252 tpa_info->gso_type = SKB_GSO_TCPV4;
1253 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
50f011b6 1254 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
c0c050c5
MC
1255 tpa_info->gso_type = SKB_GSO_TCPV6;
1256 tpa_info->rss_hash =
1257 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1258 } else {
1259 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1260 tpa_info->gso_type = 0;
1261 if (netif_msg_rx_err(bp))
1262 netdev_warn(bp->dev, "TPA packet without valid hash\n");
1263 }
1264 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1265 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
94758f8d 1266 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
bfcd8d79 1267 tpa_info->agg_count = 0;
c0c050c5
MC
1268
1269 rxr->rx_prod = NEXT_RX(prod);
1270 cons = NEXT_RX(cons);
376a5b86 1271 rxr->rx_next_cons = NEXT_RX(cons);
c0c050c5
MC
1272 cons_rx_buf = &rxr->rx_buf_ring[cons];
1273
1274 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1275 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1276 cons_rx_buf->data = NULL;
1277}
1278
4a228a3a 1279static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
c0c050c5
MC
1280{
1281 if (agg_bufs)
4a228a3a 1282 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
c0c050c5
MC
1283}
1284
bee5a188
MC
1285#ifdef CONFIG_INET
1286static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1287{
1288 struct udphdr *uh = NULL;
1289
1290 if (ip_proto == htons(ETH_P_IP)) {
1291 struct iphdr *iph = (struct iphdr *)skb->data;
1292
1293 if (iph->protocol == IPPROTO_UDP)
1294 uh = (struct udphdr *)(iph + 1);
1295 } else {
1296 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1297
1298 if (iph->nexthdr == IPPROTO_UDP)
1299 uh = (struct udphdr *)(iph + 1);
1300 }
1301 if (uh) {
1302 if (uh->check)
1303 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1304 else
1305 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1306 }
1307}
1308#endif
1309
94758f8d
MC
1310static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1311 int payload_off, int tcp_ts,
1312 struct sk_buff *skb)
1313{
1314#ifdef CONFIG_INET
1315 struct tcphdr *th;
1316 int len, nw_off;
1317 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1318 u32 hdr_info = tpa_info->hdr_info;
1319 bool loopback = false;
1320
1321 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1322 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1323 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1324
1325 /* If the packet is an internal loopback packet, the offsets will
1326 * have an extra 4 bytes.
1327 */
1328 if (inner_mac_off == 4) {
1329 loopback = true;
1330 } else if (inner_mac_off > 4) {
1331 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1332 ETH_HLEN - 2));
1333
1334 /* We only support inner iPv4/ipv6. If we don't see the
1335 * correct protocol ID, it must be a loopback packet where
1336 * the offsets are off by 4.
1337 */
09a7636a 1338 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
94758f8d
MC
1339 loopback = true;
1340 }
1341 if (loopback) {
1342 /* internal loopback packet, subtract all offsets by 4 */
1343 inner_ip_off -= 4;
1344 inner_mac_off -= 4;
1345 outer_ip_off -= 4;
1346 }
1347
1348 nw_off = inner_ip_off - ETH_HLEN;
1349 skb_set_network_header(skb, nw_off);
1350 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1351 struct ipv6hdr *iph = ipv6_hdr(skb);
1352
1353 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1354 len = skb->len - skb_transport_offset(skb);
1355 th = tcp_hdr(skb);
1356 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1357 } else {
1358 struct iphdr *iph = ip_hdr(skb);
1359
1360 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1361 len = skb->len - skb_transport_offset(skb);
1362 th = tcp_hdr(skb);
1363 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1364 }
1365
1366 if (inner_mac_off) { /* tunnel */
94758f8d
MC
1367 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1368 ETH_HLEN - 2));
1369
bee5a188 1370 bnxt_gro_tunnel(skb, proto);
94758f8d
MC
1371 }
1372#endif
1373 return skb;
1374}
1375
67912c36
MC
1376static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1377 int payload_off, int tcp_ts,
1378 struct sk_buff *skb)
1379{
1380#ifdef CONFIG_INET
1381 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1382 u32 hdr_info = tpa_info->hdr_info;
1383 int iphdr_len, nw_off;
1384
1385 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1386 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1387 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1388
1389 nw_off = inner_ip_off - ETH_HLEN;
1390 skb_set_network_header(skb, nw_off);
1391 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1392 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1393 skb_set_transport_header(skb, nw_off + iphdr_len);
1394
1395 if (inner_mac_off) { /* tunnel */
1396 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1397 ETH_HLEN - 2));
1398
1399 bnxt_gro_tunnel(skb, proto);
1400 }
1401#endif
1402 return skb;
1403}
1404
c0c050c5
MC
1405#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1406#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1407
309369c9
MC
1408static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1409 int payload_off, int tcp_ts,
c0c050c5
MC
1410 struct sk_buff *skb)
1411{
d1611c3a 1412#ifdef CONFIG_INET
c0c050c5 1413 struct tcphdr *th;
719ca811 1414 int len, nw_off, tcp_opt_len = 0;
27e24189 1415
309369c9 1416 if (tcp_ts)
c0c050c5
MC
1417 tcp_opt_len = 12;
1418
c0c050c5
MC
1419 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1420 struct iphdr *iph;
1421
1422 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1423 ETH_HLEN;
1424 skb_set_network_header(skb, nw_off);
1425 iph = ip_hdr(skb);
1426 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1427 len = skb->len - skb_transport_offset(skb);
1428 th = tcp_hdr(skb);
1429 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1430 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1431 struct ipv6hdr *iph;
1432
1433 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1434 ETH_HLEN;
1435 skb_set_network_header(skb, nw_off);
1436 iph = ipv6_hdr(skb);
1437 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1438 len = skb->len - skb_transport_offset(skb);
1439 th = tcp_hdr(skb);
1440 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1441 } else {
1442 dev_kfree_skb_any(skb);
1443 return NULL;
1444 }
c0c050c5 1445
bee5a188
MC
1446 if (nw_off) /* tunnel */
1447 bnxt_gro_tunnel(skb, skb->protocol);
c0c050c5
MC
1448#endif
1449 return skb;
1450}
1451
309369c9
MC
1452static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1453 struct bnxt_tpa_info *tpa_info,
1454 struct rx_tpa_end_cmp *tpa_end,
1455 struct rx_tpa_end_cmp_ext *tpa_end1,
1456 struct sk_buff *skb)
1457{
1458#ifdef CONFIG_INET
1459 int payload_off;
1460 u16 segs;
1461
1462 segs = TPA_END_TPA_SEGS(tpa_end);
1463 if (segs == 1)
1464 return skb;
1465
1466 NAPI_GRO_CB(skb)->count = segs;
1467 skb_shinfo(skb)->gso_size =
1468 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1469 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
bfcd8d79
MC
1470 if (bp->flags & BNXT_FLAG_CHIP_P5)
1471 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1472 else
1473 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
309369c9 1474 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
5910906c
MC
1475 if (likely(skb))
1476 tcp_gro_complete(skb);
309369c9
MC
1477#endif
1478 return skb;
1479}
1480
ee5c7fb3
SP
1481/* Given the cfa_code of a received packet determine which
1482 * netdev (vf-rep or PF) the packet is destined to.
1483 */
1484static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1485{
1486 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1487
1488 /* if vf-rep dev is NULL, the must belongs to the PF */
1489 return dev ? dev : bp->dev;
1490}
1491
c0c050c5 1492static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
e44758b7 1493 struct bnxt_cp_ring_info *cpr,
c0c050c5
MC
1494 u32 *raw_cons,
1495 struct rx_tpa_end_cmp *tpa_end,
1496 struct rx_tpa_end_cmp_ext *tpa_end1,
4e5dbbda 1497 u8 *event)
c0c050c5 1498{
e44758b7 1499 struct bnxt_napi *bnapi = cpr->bnapi;
b6ab4b01 1500 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
6bb19474 1501 u8 *data_ptr, agg_bufs;
c0c050c5
MC
1502 unsigned int len;
1503 struct bnxt_tpa_info *tpa_info;
1504 dma_addr_t mapping;
1505 struct sk_buff *skb;
bfcd8d79 1506 u16 idx = 0, agg_id;
6bb19474 1507 void *data;
bfcd8d79 1508 bool gro;
c0c050c5 1509
fa7e2812 1510 if (unlikely(bnapi->in_reset)) {
e44758b7 1511 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
fa7e2812
MC
1512
1513 if (rc < 0)
1514 return ERR_PTR(-EBUSY);
1515 return NULL;
1516 }
1517
bfcd8d79
MC
1518 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1519 agg_id = TPA_END_AGG_ID_P5(tpa_end);
ec4d8e7c 1520 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
bfcd8d79
MC
1521 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1522 tpa_info = &rxr->rx_tpa[agg_id];
1523 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1524 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1525 agg_bufs, tpa_info->agg_count);
1526 agg_bufs = tpa_info->agg_count;
1527 }
1528 tpa_info->agg_count = 0;
1529 *event |= BNXT_AGG_EVENT;
ec4d8e7c 1530 bnxt_free_agg_idx(rxr, agg_id);
bfcd8d79
MC
1531 idx = agg_id;
1532 gro = !!(bp->flags & BNXT_FLAG_GRO);
1533 } else {
1534 agg_id = TPA_END_AGG_ID(tpa_end);
1535 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1536 tpa_info = &rxr->rx_tpa[agg_id];
1537 idx = RING_CMP(*raw_cons);
1538 if (agg_bufs) {
1539 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1540 return ERR_PTR(-EBUSY);
1541
1542 *event |= BNXT_AGG_EVENT;
1543 idx = NEXT_CMP(idx);
1544 }
1545 gro = !!TPA_END_GRO(tpa_end);
1546 }
c0c050c5 1547 data = tpa_info->data;
6bb19474
MC
1548 data_ptr = tpa_info->data_ptr;
1549 prefetch(data_ptr);
c0c050c5
MC
1550 len = tpa_info->len;
1551 mapping = tpa_info->mapping;
1552
69c149e2 1553 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
4a228a3a 1554 bnxt_abort_tpa(cpr, idx, agg_bufs);
69c149e2
MC
1555 if (agg_bufs > MAX_SKB_FRAGS)
1556 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1557 agg_bufs, (int)MAX_SKB_FRAGS);
c0c050c5
MC
1558 return NULL;
1559 }
1560
1561 if (len <= bp->rx_copy_thresh) {
6bb19474 1562 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
c0c050c5 1563 if (!skb) {
4a228a3a 1564 bnxt_abort_tpa(cpr, idx, agg_bufs);
c0c050c5
MC
1565 return NULL;
1566 }
1567 } else {
1568 u8 *new_data;
1569 dma_addr_t new_mapping;
1570
1571 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1572 if (!new_data) {
4a228a3a 1573 bnxt_abort_tpa(cpr, idx, agg_bufs);
c0c050c5
MC
1574 return NULL;
1575 }
1576
1577 tpa_info->data = new_data;
b3dba77c 1578 tpa_info->data_ptr = new_data + bp->rx_offset;
c0c050c5
MC
1579 tpa_info->mapping = new_mapping;
1580
1581 skb = build_skb(data, 0);
c519fe9a
SN
1582 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1583 bp->rx_buf_use_size, bp->rx_dir,
1584 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
1585
1586 if (!skb) {
1587 kfree(data);
4a228a3a 1588 bnxt_abort_tpa(cpr, idx, agg_bufs);
c0c050c5
MC
1589 return NULL;
1590 }
b3dba77c 1591 skb_reserve(skb, bp->rx_offset);
c0c050c5
MC
1592 skb_put(skb, len);
1593 }
1594
1595 if (agg_bufs) {
4a228a3a 1596 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
c0c050c5
MC
1597 if (!skb) {
1598 /* Page reuse already handled by bnxt_rx_pages(). */
1599 return NULL;
1600 }
1601 }
ee5c7fb3
SP
1602
1603 skb->protocol =
1604 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
c0c050c5
MC
1605
1606 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1607 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1608
8852ddb4
MC
1609 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1610 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
c0c050c5
MC
1611 u16 vlan_proto = tpa_info->metadata >>
1612 RX_CMP_FLAGS2_METADATA_TPID_SFT;
ed7bc602 1613 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
c0c050c5 1614
8852ddb4 1615 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
c0c050c5
MC
1616 }
1617
1618 skb_checksum_none_assert(skb);
1619 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1620 skb->ip_summed = CHECKSUM_UNNECESSARY;
1621 skb->csum_level =
1622 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1623 }
1624
bfcd8d79 1625 if (gro)
309369c9 1626 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
c0c050c5
MC
1627
1628 return skb;
1629}
1630
8fe88ce7
MC
1631static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1632 struct rx_agg_cmp *rx_agg)
1633{
1634 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1635 struct bnxt_tpa_info *tpa_info;
1636
ec4d8e7c 1637 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
8fe88ce7
MC
1638 tpa_info = &rxr->rx_tpa[agg_id];
1639 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1640 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1641}
1642
ee5c7fb3
SP
1643static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1644 struct sk_buff *skb)
1645{
1646 if (skb->dev != bp->dev) {
1647 /* this packet belongs to a vf-rep */
1648 bnxt_vf_rep_rx(bp, skb);
1649 return;
1650 }
1651 skb_record_rx_queue(skb, bnapi->index);
1652 napi_gro_receive(&bnapi->napi, skb);
1653}
1654
c0c050c5
MC
1655/* returns the following:
1656 * 1 - 1 packet successfully received
1657 * 0 - successful TPA_START, packet not completed yet
1658 * -EBUSY - completion ring does not have all the agg buffers yet
1659 * -ENOMEM - packet aborted due to out of memory
1660 * -EIO - packet aborted due to hw error indicated in BD
1661 */
e44758b7
MC
1662static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1663 u32 *raw_cons, u8 *event)
c0c050c5 1664{
e44758b7 1665 struct bnxt_napi *bnapi = cpr->bnapi;
b6ab4b01 1666 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
1667 struct net_device *dev = bp->dev;
1668 struct rx_cmp *rxcmp;
1669 struct rx_cmp_ext *rxcmp1;
1670 u32 tmp_raw_cons = *raw_cons;
ee5c7fb3 1671 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
c0c050c5
MC
1672 struct bnxt_sw_rx_bd *rx_buf;
1673 unsigned int len;
6bb19474 1674 u8 *data_ptr, agg_bufs, cmp_type;
c0c050c5
MC
1675 dma_addr_t dma_addr;
1676 struct sk_buff *skb;
6bb19474 1677 void *data;
c0c050c5 1678 int rc = 0;
c61fb99c 1679 u32 misc;
c0c050c5
MC
1680
1681 rxcmp = (struct rx_cmp *)
1682 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1683
8fe88ce7
MC
1684 cmp_type = RX_CMP_TYPE(rxcmp);
1685
1686 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1687 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1688 goto next_rx_no_prod_no_len;
1689 }
1690
c0c050c5
MC
1691 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1692 cp_cons = RING_CMP(tmp_raw_cons);
1693 rxcmp1 = (struct rx_cmp_ext *)
1694 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1695
1696 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1697 return -EBUSY;
1698
c0c050c5
MC
1699 prod = rxr->rx_prod;
1700
1701 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1702 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1703 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1704
4e5dbbda 1705 *event |= BNXT_RX_EVENT;
e7e70fa6 1706 goto next_rx_no_prod_no_len;
c0c050c5
MC
1707
1708 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
e44758b7 1709 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
c0c050c5 1710 (struct rx_tpa_end_cmp *)rxcmp,
4e5dbbda 1711 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
c0c050c5 1712
1fac4b2f 1713 if (IS_ERR(skb))
c0c050c5
MC
1714 return -EBUSY;
1715
1716 rc = -ENOMEM;
1717 if (likely(skb)) {
ee5c7fb3 1718 bnxt_deliver_skb(bp, bnapi, skb);
c0c050c5
MC
1719 rc = 1;
1720 }
4e5dbbda 1721 *event |= BNXT_RX_EVENT;
e7e70fa6 1722 goto next_rx_no_prod_no_len;
c0c050c5
MC
1723 }
1724
1725 cons = rxcmp->rx_cmp_opaque;
fa7e2812 1726 if (unlikely(cons != rxr->rx_next_cons)) {
e44758b7 1727 int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
fa7e2812 1728
a1b0e4e6
MC
1729 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1730 cons, rxr->rx_next_cons);
fa7e2812
MC
1731 bnxt_sched_reset(bp, rxr);
1732 return rc1;
1733 }
a1b0e4e6
MC
1734 rx_buf = &rxr->rx_buf_ring[cons];
1735 data = rx_buf->data;
1736 data_ptr = rx_buf->data_ptr;
6bb19474 1737 prefetch(data_ptr);
c0c050c5 1738
c61fb99c
MC
1739 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1740 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
c0c050c5
MC
1741
1742 if (agg_bufs) {
1743 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1744 return -EBUSY;
1745
1746 cp_cons = NEXT_CMP(cp_cons);
4e5dbbda 1747 *event |= BNXT_AGG_EVENT;
c0c050c5 1748 }
4e5dbbda 1749 *event |= BNXT_RX_EVENT;
c0c050c5
MC
1750
1751 rx_buf->data = NULL;
1752 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
8e44e96c
MC
1753 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1754
c0c050c5
MC
1755 bnxt_reuse_rx_data(rxr, cons, data);
1756 if (agg_bufs)
4a228a3a
MC
1757 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1758 false);
c0c050c5
MC
1759
1760 rc = -EIO;
8e44e96c
MC
1761 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1762 netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
1763 bnxt_sched_reset(bp, rxr);
1764 }
0b397b17 1765 goto next_rx_no_len;
c0c050c5
MC
1766 }
1767
1768 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
11cd119d 1769 dma_addr = rx_buf->mapping;
c0c050c5 1770
c6d30e83
MC
1771 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1772 rc = 1;
1773 goto next_rx;
1774 }
1775
c0c050c5 1776 if (len <= bp->rx_copy_thresh) {
6bb19474 1777 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
c0c050c5
MC
1778 bnxt_reuse_rx_data(rxr, cons, data);
1779 if (!skb) {
296d5b54 1780 if (agg_bufs)
4a228a3a
MC
1781 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1782 agg_bufs, false);
c0c050c5
MC
1783 rc = -ENOMEM;
1784 goto next_rx;
1785 }
1786 } else {
c61fb99c
MC
1787 u32 payload;
1788
c6d30e83
MC
1789 if (rx_buf->data_ptr == data_ptr)
1790 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1791 else
1792 payload = 0;
6bb19474 1793 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
c61fb99c 1794 payload | len);
c0c050c5
MC
1795 if (!skb) {
1796 rc = -ENOMEM;
1797 goto next_rx;
1798 }
1799 }
1800
1801 if (agg_bufs) {
4a228a3a 1802 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
c0c050c5
MC
1803 if (!skb) {
1804 rc = -ENOMEM;
1805 goto next_rx;
1806 }
1807 }
1808
1809 if (RX_CMP_HASH_VALID(rxcmp)) {
1810 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1811 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1812
1813 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1814 if (hash_type != 1 && hash_type != 3)
1815 type = PKT_HASH_TYPE_L3;
1816 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1817 }
1818
ee5c7fb3
SP
1819 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1820 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
c0c050c5 1821
8852ddb4
MC
1822 if ((rxcmp1->rx_cmp_flags2 &
1823 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1824 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
c0c050c5 1825 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
ed7bc602 1826 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
c0c050c5
MC
1827 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1828
8852ddb4 1829 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
c0c050c5
MC
1830 }
1831
1832 skb_checksum_none_assert(skb);
1833 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1834 if (dev->features & NETIF_F_RXCSUM) {
1835 skb->ip_summed = CHECKSUM_UNNECESSARY;
1836 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1837 }
1838 } else {
665e350d
SB
1839 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1840 if (dev->features & NETIF_F_RXCSUM)
d1981929 1841 bnapi->cp_ring.rx_l4_csum_errors++;
665e350d 1842 }
c0c050c5
MC
1843 }
1844
ee5c7fb3 1845 bnxt_deliver_skb(bp, bnapi, skb);
c0c050c5
MC
1846 rc = 1;
1847
1848next_rx:
6a8788f2
AG
1849 cpr->rx_packets += 1;
1850 cpr->rx_bytes += len;
e7e70fa6 1851
0b397b17
MC
1852next_rx_no_len:
1853 rxr->rx_prod = NEXT_RX(prod);
1854 rxr->rx_next_cons = NEXT_RX(cons);
1855
e7e70fa6 1856next_rx_no_prod_no_len:
c0c050c5
MC
1857 *raw_cons = tmp_raw_cons;
1858
1859 return rc;
1860}
1861
2270bc5d
MC
1862/* In netpoll mode, if we are using a combined completion ring, we need to
1863 * discard the rx packets and recycle the buffers.
1864 */
e44758b7
MC
1865static int bnxt_force_rx_discard(struct bnxt *bp,
1866 struct bnxt_cp_ring_info *cpr,
2270bc5d
MC
1867 u32 *raw_cons, u8 *event)
1868{
2270bc5d
MC
1869 u32 tmp_raw_cons = *raw_cons;
1870 struct rx_cmp_ext *rxcmp1;
1871 struct rx_cmp *rxcmp;
1872 u16 cp_cons;
1873 u8 cmp_type;
1874
1875 cp_cons = RING_CMP(tmp_raw_cons);
1876 rxcmp = (struct rx_cmp *)
1877 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1878
1879 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1880 cp_cons = RING_CMP(tmp_raw_cons);
1881 rxcmp1 = (struct rx_cmp_ext *)
1882 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1883
1884 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1885 return -EBUSY;
1886
1887 cmp_type = RX_CMP_TYPE(rxcmp);
1888 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1889 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1890 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1891 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1892 struct rx_tpa_end_cmp_ext *tpa_end1;
1893
1894 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1895 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1896 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1897 }
e44758b7 1898 return bnxt_rx_pkt(bp, cpr, raw_cons, event);
2270bc5d
MC
1899}
1900
7e914027
MC
1901u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
1902{
1903 struct bnxt_fw_health *fw_health = bp->fw_health;
1904 u32 reg = fw_health->regs[reg_idx];
1905 u32 reg_type, reg_off, val = 0;
1906
1907 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1908 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1909 switch (reg_type) {
1910 case BNXT_FW_HEALTH_REG_TYPE_CFG:
1911 pci_read_config_dword(bp->pdev, reg_off, &val);
1912 break;
1913 case BNXT_FW_HEALTH_REG_TYPE_GRC:
1914 reg_off = fw_health->mapped_regs[reg_idx];
1915 /* fall through */
1916 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1917 val = readl(bp->bar0 + reg_off);
1918 break;
1919 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1920 val = readl(bp->bar1 + reg_off);
1921 break;
1922 }
1923 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
1924 val &= fw_health->fw_reset_inprog_reg_mask;
1925 return val;
1926}
1927
4bb13abf 1928#define BNXT_GET_EVENT_PORT(data) \
87c374de
MC
1929 ((data) & \
1930 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
4bb13abf 1931
c0c050c5
MC
1932static int bnxt_async_event_process(struct bnxt *bp,
1933 struct hwrm_async_event_cmpl *cmpl)
1934{
1935 u16 event_id = le16_to_cpu(cmpl->event_id);
1936
1937 /* TODO CHIMP_FW: Define event id's for link change, error etc */
1938 switch (event_id) {
87c374de 1939 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
8cbde117
MC
1940 u32 data1 = le32_to_cpu(cmpl->event_data1);
1941 struct bnxt_link_info *link_info = &bp->link_info;
1942
1943 if (BNXT_VF(bp))
1944 goto async_event_process_exit;
a8168b6c
MC
1945
1946 /* print unsupported speed warning in forced speed mode only */
1947 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
1948 (data1 & 0x20000)) {
8cbde117
MC
1949 u16 fw_speed = link_info->force_link_speed;
1950 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1951
a8168b6c
MC
1952 if (speed != SPEED_UNKNOWN)
1953 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1954 speed);
8cbde117 1955 }
286ef9d6 1956 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
8cbde117 1957 }
bc171e87 1958 /* fall through */
87c374de 1959 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
c0c050c5 1960 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
19241368 1961 break;
87c374de 1962 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
19241368 1963 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
c0c050c5 1964 break;
87c374de 1965 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
4bb13abf
MC
1966 u32 data1 = le32_to_cpu(cmpl->event_data1);
1967 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1968
1969 if (BNXT_VF(bp))
1970 break;
1971
1972 if (bp->pf.port_id != port_id)
1973 break;
1974
4bb13abf
MC
1975 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1976 break;
1977 }
87c374de 1978 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
fc0f1929
MC
1979 if (BNXT_PF(bp))
1980 goto async_event_process_exit;
1981 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1982 break;
2151fe08
MC
1983 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY:
1984 bp->fw_reset_timestamp = jiffies;
1985 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
1986 if (!bp->fw_reset_min_dsecs)
1987 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
1988 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
1989 if (!bp->fw_reset_max_dsecs)
1990 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
1991 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
1992 break;
7e914027
MC
1993 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
1994 struct bnxt_fw_health *fw_health = bp->fw_health;
1995 u32 data1 = le32_to_cpu(cmpl->event_data1);
1996
1997 if (!fw_health)
1998 goto async_event_process_exit;
1999
2000 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
2001 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2002 if (!fw_health->enabled)
2003 break;
2004
2005 if (netif_msg_drv(bp))
2006 netdev_info(bp->dev, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n",
2007 fw_health->enabled, fw_health->master,
2008 bnxt_fw_health_readl(bp,
2009 BNXT_FW_RESET_CNT_REG),
2010 bnxt_fw_health_readl(bp,
2011 BNXT_FW_HEALTH_REG));
2012 fw_health->tmr_multiplier =
2013 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2014 bp->current_interval * 10);
2015 fw_health->tmr_counter = fw_health->tmr_multiplier;
2016 fw_health->last_fw_heartbeat =
2017 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2018 fw_health->last_fw_reset_cnt =
2019 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2020 goto async_event_process_exit;
2021 }
c0c050c5 2022 default:
19241368 2023 goto async_event_process_exit;
c0c050c5 2024 }
c213eae8 2025 bnxt_queue_sp_work(bp);
19241368 2026async_event_process_exit:
a588e458 2027 bnxt_ulp_async_events(bp, cmpl);
c0c050c5
MC
2028 return 0;
2029}
2030
2031static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2032{
2033 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2034 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2035 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2036 (struct hwrm_fwd_req_cmpl *)txcmp;
2037
2038 switch (cmpl_type) {
2039 case CMPL_BASE_TYPE_HWRM_DONE:
2040 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2041 if (seq_id == bp->hwrm_intr_seq_id)
fc718bb2 2042 bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
c0c050c5
MC
2043 else
2044 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
2045 break;
2046
2047 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2048 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2049
2050 if ((vf_id < bp->pf.first_vf_id) ||
2051 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2052 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2053 vf_id);
2054 return -EINVAL;
2055 }
2056
2057 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2058 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
c213eae8 2059 bnxt_queue_sp_work(bp);
c0c050c5
MC
2060 break;
2061
2062 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2063 bnxt_async_event_process(bp,
2064 (struct hwrm_async_event_cmpl *)txcmp);
2065
2066 default:
2067 break;
2068 }
2069
2070 return 0;
2071}
2072
2073static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2074{
2075 struct bnxt_napi *bnapi = dev_instance;
2076 struct bnxt *bp = bnapi->bp;
2077 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2078 u32 cons = RING_CMP(cpr->cp_raw_cons);
2079
6a8788f2 2080 cpr->event_ctr++;
c0c050c5
MC
2081 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2082 napi_schedule(&bnapi->napi);
2083 return IRQ_HANDLED;
2084}
2085
2086static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2087{
2088 u32 raw_cons = cpr->cp_raw_cons;
2089 u16 cons = RING_CMP(raw_cons);
2090 struct tx_cmp *txcmp;
2091
2092 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2093
2094 return TX_CMP_VALID(txcmp, raw_cons);
2095}
2096
c0c050c5
MC
2097static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2098{
2099 struct bnxt_napi *bnapi = dev_instance;
2100 struct bnxt *bp = bnapi->bp;
2101 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2102 u32 cons = RING_CMP(cpr->cp_raw_cons);
2103 u32 int_status;
2104
2105 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2106
2107 if (!bnxt_has_work(bp, cpr)) {
11809490 2108 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
c0c050c5
MC
2109 /* return if erroneous interrupt */
2110 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2111 return IRQ_NONE;
2112 }
2113
2114 /* disable ring IRQ */
697197e5 2115 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
c0c050c5
MC
2116
2117 /* Return here if interrupt is shared and is disabled. */
2118 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2119 return IRQ_HANDLED;
2120
2121 napi_schedule(&bnapi->napi);
2122 return IRQ_HANDLED;
2123}
2124
3675b92f
MC
2125static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2126 int budget)
c0c050c5 2127{
e44758b7 2128 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5
MC
2129 u32 raw_cons = cpr->cp_raw_cons;
2130 u32 cons;
2131 int tx_pkts = 0;
2132 int rx_pkts = 0;
4e5dbbda 2133 u8 event = 0;
c0c050c5
MC
2134 struct tx_cmp *txcmp;
2135
0fcec985 2136 cpr->has_more_work = 0;
c0c050c5
MC
2137 while (1) {
2138 int rc;
2139
2140 cons = RING_CMP(raw_cons);
2141 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2142
2143 if (!TX_CMP_VALID(txcmp, raw_cons))
2144 break;
2145
67a95e20
MC
2146 /* The valid test of the entry must be done first before
2147 * reading any further.
2148 */
b67daab0 2149 dma_rmb();
3675b92f 2150 cpr->had_work_done = 1;
c0c050c5
MC
2151 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2152 tx_pkts++;
2153 /* return full budget so NAPI will complete. */
73f21c65 2154 if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
c0c050c5 2155 rx_pkts = budget;
73f21c65 2156 raw_cons = NEXT_RAW_CMP(raw_cons);
0fcec985
MC
2157 if (budget)
2158 cpr->has_more_work = 1;
73f21c65
MC
2159 break;
2160 }
c0c050c5 2161 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2270bc5d 2162 if (likely(budget))
e44758b7 2163 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2270bc5d 2164 else
e44758b7 2165 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2270bc5d 2166 &event);
c0c050c5
MC
2167 if (likely(rc >= 0))
2168 rx_pkts += rc;
903649e7
MC
2169 /* Increment rx_pkts when rc is -ENOMEM to count towards
2170 * the NAPI budget. Otherwise, we may potentially loop
2171 * here forever if we consistently cannot allocate
2172 * buffers.
2173 */
2edbdb31 2174 else if (rc == -ENOMEM && budget)
903649e7 2175 rx_pkts++;
c0c050c5
MC
2176 else if (rc == -EBUSY) /* partial completion */
2177 break;
c0c050c5
MC
2178 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2179 CMPL_BASE_TYPE_HWRM_DONE) ||
2180 (TX_CMP_TYPE(txcmp) ==
2181 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2182 (TX_CMP_TYPE(txcmp) ==
2183 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2184 bnxt_hwrm_handler(bp, txcmp);
2185 }
2186 raw_cons = NEXT_RAW_CMP(raw_cons);
2187
0fcec985
MC
2188 if (rx_pkts && rx_pkts == budget) {
2189 cpr->has_more_work = 1;
c0c050c5 2190 break;
0fcec985 2191 }
c0c050c5
MC
2192 }
2193
f18c2b77
AG
2194 if (event & BNXT_REDIRECT_EVENT)
2195 xdp_do_flush_map();
2196
38413406
MC
2197 if (event & BNXT_TX_EVENT) {
2198 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
38413406
MC
2199 u16 prod = txr->tx_prod;
2200
2201 /* Sync BD data before updating doorbell */
2202 wmb();
2203
697197e5 2204 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
38413406
MC
2205 }
2206
c0c050c5 2207 cpr->cp_raw_cons = raw_cons;
3675b92f
MC
2208 bnapi->tx_pkts += tx_pkts;
2209 bnapi->events |= event;
2210 return rx_pkts;
2211}
c0c050c5 2212
3675b92f
MC
2213static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2214{
2215 if (bnapi->tx_pkts) {
2216 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2217 bnapi->tx_pkts = 0;
2218 }
c0c050c5 2219
3675b92f 2220 if (bnapi->events & BNXT_RX_EVENT) {
b6ab4b01 2221 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5 2222
3675b92f 2223 if (bnapi->events & BNXT_AGG_EVENT)
697197e5 2224 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
e8f267b0 2225 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
c0c050c5 2226 }
3675b92f
MC
2227 bnapi->events = 0;
2228}
2229
2230static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2231 int budget)
2232{
2233 struct bnxt_napi *bnapi = cpr->bnapi;
2234 int rx_pkts;
2235
2236 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2237
2238 /* ACK completion ring before freeing tx ring and producing new
2239 * buffers in rx/agg rings to prevent overflowing the completion
2240 * ring.
2241 */
2242 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2243
2244 __bnxt_poll_work_done(bp, bnapi);
c0c050c5
MC
2245 return rx_pkts;
2246}
2247
10bbdaf5
PS
2248static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2249{
2250 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2251 struct bnxt *bp = bnapi->bp;
2252 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2253 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2254 struct tx_cmp *txcmp;
2255 struct rx_cmp_ext *rxcmp1;
2256 u32 cp_cons, tmp_raw_cons;
2257 u32 raw_cons = cpr->cp_raw_cons;
2258 u32 rx_pkts = 0;
4e5dbbda 2259 u8 event = 0;
10bbdaf5
PS
2260
2261 while (1) {
2262 int rc;
2263
2264 cp_cons = RING_CMP(raw_cons);
2265 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2266
2267 if (!TX_CMP_VALID(txcmp, raw_cons))
2268 break;
2269
2270 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2271 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2272 cp_cons = RING_CMP(tmp_raw_cons);
2273 rxcmp1 = (struct rx_cmp_ext *)
2274 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2275
2276 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2277 break;
2278
2279 /* force an error to recycle the buffer */
2280 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2281 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2282
e44758b7 2283 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2edbdb31 2284 if (likely(rc == -EIO) && budget)
10bbdaf5
PS
2285 rx_pkts++;
2286 else if (rc == -EBUSY) /* partial completion */
2287 break;
2288 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2289 CMPL_BASE_TYPE_HWRM_DONE)) {
2290 bnxt_hwrm_handler(bp, txcmp);
2291 } else {
2292 netdev_err(bp->dev,
2293 "Invalid completion received on special ring\n");
2294 }
2295 raw_cons = NEXT_RAW_CMP(raw_cons);
2296
2297 if (rx_pkts == budget)
2298 break;
2299 }
2300
2301 cpr->cp_raw_cons = raw_cons;
697197e5
MC
2302 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2303 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
10bbdaf5 2304
434c975a 2305 if (event & BNXT_AGG_EVENT)
697197e5 2306 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
10bbdaf5
PS
2307
2308 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
6ad20165 2309 napi_complete_done(napi, rx_pkts);
697197e5 2310 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
10bbdaf5
PS
2311 }
2312 return rx_pkts;
2313}
2314
c0c050c5
MC
2315static int bnxt_poll(struct napi_struct *napi, int budget)
2316{
2317 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2318 struct bnxt *bp = bnapi->bp;
2319 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2320 int work_done = 0;
2321
c0c050c5 2322 while (1) {
e44758b7 2323 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
c0c050c5 2324
73f21c65
MC
2325 if (work_done >= budget) {
2326 if (!budget)
697197e5 2327 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
c0c050c5 2328 break;
73f21c65 2329 }
c0c050c5
MC
2330
2331 if (!bnxt_has_work(bp, cpr)) {
e7b95691 2332 if (napi_complete_done(napi, work_done))
697197e5 2333 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
c0c050c5
MC
2334 break;
2335 }
2336 }
6a8788f2 2337 if (bp->flags & BNXT_FLAG_DIM) {
f06d0ca4 2338 struct dim_sample dim_sample = {};
6a8788f2 2339
8960b389
TG
2340 dim_update_sample(cpr->event_ctr,
2341 cpr->rx_packets,
2342 cpr->rx_bytes,
2343 &dim_sample);
6a8788f2
AG
2344 net_dim(&cpr->dim, dim_sample);
2345 }
c0c050c5
MC
2346 return work_done;
2347}
2348
0fcec985
MC
2349static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2350{
2351 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2352 int i, work_done = 0;
2353
2354 for (i = 0; i < 2; i++) {
2355 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2356
2357 if (cpr2) {
2358 work_done += __bnxt_poll_work(bp, cpr2,
2359 budget - work_done);
2360 cpr->has_more_work |= cpr2->has_more_work;
2361 }
2362 }
2363 return work_done;
2364}
2365
2366static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2367 u64 dbr_type, bool all)
2368{
2369 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2370 int i;
2371
2372 for (i = 0; i < 2; i++) {
2373 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2374 struct bnxt_db_info *db;
2375
2376 if (cpr2 && (all || cpr2->had_work_done)) {
2377 db = &cpr2->cp_db;
2378 writeq(db->db_key64 | dbr_type |
2379 RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2380 cpr2->had_work_done = 0;
2381 }
2382 }
2383 __bnxt_poll_work_done(bp, bnapi);
2384}
2385
2386static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2387{
2388 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2389 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2390 u32 raw_cons = cpr->cp_raw_cons;
2391 struct bnxt *bp = bnapi->bp;
2392 struct nqe_cn *nqcmp;
2393 int work_done = 0;
2394 u32 cons;
2395
2396 if (cpr->has_more_work) {
2397 cpr->has_more_work = 0;
2398 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2399 if (cpr->has_more_work) {
2400 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false);
2401 return work_done;
2402 }
2403 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true);
2404 if (napi_complete_done(napi, work_done))
2405 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons);
2406 return work_done;
2407 }
2408 while (1) {
2409 cons = RING_CMP(raw_cons);
2410 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2411
2412 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2413 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
2414 false);
2415 cpr->cp_raw_cons = raw_cons;
2416 if (napi_complete_done(napi, work_done))
2417 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2418 cpr->cp_raw_cons);
2419 return work_done;
2420 }
2421
2422 /* The valid test of the entry must be done first before
2423 * reading any further.
2424 */
2425 dma_rmb();
2426
2427 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2428 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2429 struct bnxt_cp_ring_info *cpr2;
2430
2431 cpr2 = cpr->cp_ring_arr[idx];
2432 work_done += __bnxt_poll_work(bp, cpr2,
2433 budget - work_done);
2434 cpr->has_more_work = cpr2->has_more_work;
2435 } else {
2436 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2437 }
2438 raw_cons = NEXT_RAW_CMP(raw_cons);
2439 if (cpr->has_more_work)
2440 break;
2441 }
2442 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true);
2443 cpr->cp_raw_cons = raw_cons;
2444 return work_done;
2445}
2446
c0c050c5
MC
2447static void bnxt_free_tx_skbs(struct bnxt *bp)
2448{
2449 int i, max_idx;
2450 struct pci_dev *pdev = bp->pdev;
2451
b6ab4b01 2452 if (!bp->tx_ring)
c0c050c5
MC
2453 return;
2454
2455 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2456 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2457 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
2458 int j;
2459
c0c050c5
MC
2460 for (j = 0; j < max_idx;) {
2461 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
f18c2b77 2462 struct sk_buff *skb;
c0c050c5
MC
2463 int k, last;
2464
f18c2b77
AG
2465 if (i < bp->tx_nr_rings_xdp &&
2466 tx_buf->action == XDP_REDIRECT) {
2467 dma_unmap_single(&pdev->dev,
2468 dma_unmap_addr(tx_buf, mapping),
2469 dma_unmap_len(tx_buf, len),
2470 PCI_DMA_TODEVICE);
2471 xdp_return_frame(tx_buf->xdpf);
2472 tx_buf->action = 0;
2473 tx_buf->xdpf = NULL;
2474 j++;
2475 continue;
2476 }
2477
2478 skb = tx_buf->skb;
c0c050c5
MC
2479 if (!skb) {
2480 j++;
2481 continue;
2482 }
2483
2484 tx_buf->skb = NULL;
2485
2486 if (tx_buf->is_push) {
2487 dev_kfree_skb(skb);
2488 j += 2;
2489 continue;
2490 }
2491
2492 dma_unmap_single(&pdev->dev,
2493 dma_unmap_addr(tx_buf, mapping),
2494 skb_headlen(skb),
2495 PCI_DMA_TODEVICE);
2496
2497 last = tx_buf->nr_frags;
2498 j += 2;
d612a579
MC
2499 for (k = 0; k < last; k++, j++) {
2500 int ring_idx = j & bp->tx_ring_mask;
c0c050c5
MC
2501 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2502
d612a579 2503 tx_buf = &txr->tx_buf_ring[ring_idx];
c0c050c5
MC
2504 dma_unmap_page(
2505 &pdev->dev,
2506 dma_unmap_addr(tx_buf, mapping),
2507 skb_frag_size(frag), PCI_DMA_TODEVICE);
2508 }
2509 dev_kfree_skb(skb);
2510 }
2511 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2512 }
2513}
2514
2515static void bnxt_free_rx_skbs(struct bnxt *bp)
2516{
2517 int i, max_idx, max_agg_idx;
2518 struct pci_dev *pdev = bp->pdev;
2519
b6ab4b01 2520 if (!bp->rx_ring)
c0c050c5
MC
2521 return;
2522
2523 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2524 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2525 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 2526 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
ec4d8e7c 2527 struct bnxt_tpa_idx_map *map;
c0c050c5
MC
2528 int j;
2529
c0c050c5 2530 if (rxr->rx_tpa) {
79632e9b 2531 for (j = 0; j < bp->max_tpa; j++) {
c0c050c5
MC
2532 struct bnxt_tpa_info *tpa_info =
2533 &rxr->rx_tpa[j];
2534 u8 *data = tpa_info->data;
2535
2536 if (!data)
2537 continue;
2538
c519fe9a
SN
2539 dma_unmap_single_attrs(&pdev->dev,
2540 tpa_info->mapping,
2541 bp->rx_buf_use_size,
2542 bp->rx_dir,
2543 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
2544
2545 tpa_info->data = NULL;
2546
2547 kfree(data);
2548 }
2549 }
2550
2551 for (j = 0; j < max_idx; j++) {
2552 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
3ed3a83e 2553 dma_addr_t mapping = rx_buf->mapping;
6bb19474 2554 void *data = rx_buf->data;
c0c050c5
MC
2555
2556 if (!data)
2557 continue;
2558
c0c050c5
MC
2559 rx_buf->data = NULL;
2560
3ed3a83e
MC
2561 if (BNXT_RX_PAGE_MODE(bp)) {
2562 mapping -= bp->rx_dma_offset;
c519fe9a
SN
2563 dma_unmap_page_attrs(&pdev->dev, mapping,
2564 PAGE_SIZE, bp->rx_dir,
2565 DMA_ATTR_WEAK_ORDERING);
322b87ca 2566 page_pool_recycle_direct(rxr->page_pool, data);
3ed3a83e 2567 } else {
c519fe9a
SN
2568 dma_unmap_single_attrs(&pdev->dev, mapping,
2569 bp->rx_buf_use_size,
2570 bp->rx_dir,
2571 DMA_ATTR_WEAK_ORDERING);
c61fb99c 2572 kfree(data);
3ed3a83e 2573 }
c0c050c5
MC
2574 }
2575
2576 for (j = 0; j < max_agg_idx; j++) {
2577 struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2578 &rxr->rx_agg_ring[j];
2579 struct page *page = rx_agg_buf->page;
2580
2581 if (!page)
2582 continue;
2583
c519fe9a
SN
2584 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2585 BNXT_RX_PAGE_SIZE,
2586 PCI_DMA_FROMDEVICE,
2587 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
2588
2589 rx_agg_buf->page = NULL;
2590 __clear_bit(j, rxr->rx_agg_bmap);
2591
2592 __free_page(page);
2593 }
89d0a06c
MC
2594 if (rxr->rx_page) {
2595 __free_page(rxr->rx_page);
2596 rxr->rx_page = NULL;
2597 }
ec4d8e7c
MC
2598 map = rxr->rx_tpa_idx_map;
2599 if (map)
2600 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
c0c050c5
MC
2601 }
2602}
2603
2604static void bnxt_free_skbs(struct bnxt *bp)
2605{
2606 bnxt_free_tx_skbs(bp);
2607 bnxt_free_rx_skbs(bp);
2608}
2609
6fe19886 2610static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
c0c050c5
MC
2611{
2612 struct pci_dev *pdev = bp->pdev;
2613 int i;
2614
6fe19886
MC
2615 for (i = 0; i < rmem->nr_pages; i++) {
2616 if (!rmem->pg_arr[i])
c0c050c5
MC
2617 continue;
2618
6fe19886
MC
2619 dma_free_coherent(&pdev->dev, rmem->page_size,
2620 rmem->pg_arr[i], rmem->dma_arr[i]);
c0c050c5 2621
6fe19886 2622 rmem->pg_arr[i] = NULL;
c0c050c5 2623 }
6fe19886 2624 if (rmem->pg_tbl) {
4f49b2b8
MC
2625 size_t pg_tbl_size = rmem->nr_pages * 8;
2626
2627 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2628 pg_tbl_size = rmem->page_size;
2629 dma_free_coherent(&pdev->dev, pg_tbl_size,
6fe19886
MC
2630 rmem->pg_tbl, rmem->pg_tbl_map);
2631 rmem->pg_tbl = NULL;
c0c050c5 2632 }
6fe19886
MC
2633 if (rmem->vmem_size && *rmem->vmem) {
2634 vfree(*rmem->vmem);
2635 *rmem->vmem = NULL;
c0c050c5
MC
2636 }
2637}
2638
6fe19886 2639static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
c0c050c5 2640{
c0c050c5 2641 struct pci_dev *pdev = bp->pdev;
66cca20a 2642 u64 valid_bit = 0;
6fe19886 2643 int i;
c0c050c5 2644
66cca20a
MC
2645 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2646 valid_bit = PTU_PTE_VALID;
4f49b2b8
MC
2647 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2648 size_t pg_tbl_size = rmem->nr_pages * 8;
2649
2650 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2651 pg_tbl_size = rmem->page_size;
2652 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
6fe19886 2653 &rmem->pg_tbl_map,
c0c050c5 2654 GFP_KERNEL);
6fe19886 2655 if (!rmem->pg_tbl)
c0c050c5
MC
2656 return -ENOMEM;
2657 }
2658
6fe19886 2659 for (i = 0; i < rmem->nr_pages; i++) {
66cca20a
MC
2660 u64 extra_bits = valid_bit;
2661
6fe19886
MC
2662 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2663 rmem->page_size,
2664 &rmem->dma_arr[i],
c0c050c5 2665 GFP_KERNEL);
6fe19886 2666 if (!rmem->pg_arr[i])
c0c050c5
MC
2667 return -ENOMEM;
2668
4f49b2b8 2669 if (rmem->nr_pages > 1 || rmem->depth > 0) {
66cca20a
MC
2670 if (i == rmem->nr_pages - 2 &&
2671 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2672 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2673 else if (i == rmem->nr_pages - 1 &&
2674 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2675 extra_bits |= PTU_PTE_LAST;
2676 rmem->pg_tbl[i] =
2677 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2678 }
c0c050c5
MC
2679 }
2680
6fe19886
MC
2681 if (rmem->vmem_size) {
2682 *rmem->vmem = vzalloc(rmem->vmem_size);
2683 if (!(*rmem->vmem))
c0c050c5
MC
2684 return -ENOMEM;
2685 }
2686 return 0;
2687}
2688
4a228a3a
MC
2689static void bnxt_free_tpa_info(struct bnxt *bp)
2690{
2691 int i;
2692
2693 for (i = 0; i < bp->rx_nr_rings; i++) {
2694 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2695
ec4d8e7c
MC
2696 kfree(rxr->rx_tpa_idx_map);
2697 rxr->rx_tpa_idx_map = NULL;
79632e9b
MC
2698 if (rxr->rx_tpa) {
2699 kfree(rxr->rx_tpa[0].agg_arr);
2700 rxr->rx_tpa[0].agg_arr = NULL;
2701 }
4a228a3a
MC
2702 kfree(rxr->rx_tpa);
2703 rxr->rx_tpa = NULL;
2704 }
2705}
2706
2707static int bnxt_alloc_tpa_info(struct bnxt *bp)
2708{
79632e9b
MC
2709 int i, j, total_aggs = 0;
2710
2711 bp->max_tpa = MAX_TPA;
2712 if (bp->flags & BNXT_FLAG_CHIP_P5) {
2713 if (!bp->max_tpa_v2)
2714 return 0;
2715 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2716 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
2717 }
4a228a3a
MC
2718
2719 for (i = 0; i < bp->rx_nr_rings; i++) {
2720 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
79632e9b 2721 struct rx_agg_cmp *agg;
4a228a3a 2722
79632e9b 2723 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
4a228a3a
MC
2724 GFP_KERNEL);
2725 if (!rxr->rx_tpa)
2726 return -ENOMEM;
79632e9b
MC
2727
2728 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2729 continue;
2730 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
2731 rxr->rx_tpa[0].agg_arr = agg;
2732 if (!agg)
2733 return -ENOMEM;
2734 for (j = 1; j < bp->max_tpa; j++)
2735 rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
ec4d8e7c
MC
2736 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
2737 GFP_KERNEL);
2738 if (!rxr->rx_tpa_idx_map)
2739 return -ENOMEM;
4a228a3a
MC
2740 }
2741 return 0;
2742}
2743
c0c050c5
MC
2744static void bnxt_free_rx_rings(struct bnxt *bp)
2745{
2746 int i;
2747
b6ab4b01 2748 if (!bp->rx_ring)
c0c050c5
MC
2749 return;
2750
4a228a3a 2751 bnxt_free_tpa_info(bp);
c0c050c5 2752 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 2753 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
2754 struct bnxt_ring_struct *ring;
2755
c6d30e83
MC
2756 if (rxr->xdp_prog)
2757 bpf_prog_put(rxr->xdp_prog);
2758
96a8604f
JDB
2759 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2760 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2761
12479f62 2762 page_pool_destroy(rxr->page_pool);
322b87ca
AG
2763 rxr->page_pool = NULL;
2764
c0c050c5
MC
2765 kfree(rxr->rx_agg_bmap);
2766 rxr->rx_agg_bmap = NULL;
2767
2768 ring = &rxr->rx_ring_struct;
6fe19886 2769 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
2770
2771 ring = &rxr->rx_agg_ring_struct;
6fe19886 2772 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
2773 }
2774}
2775
322b87ca
AG
2776static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
2777 struct bnxt_rx_ring_info *rxr)
2778{
2779 struct page_pool_params pp = { 0 };
2780
2781 pp.pool_size = bp->rx_ring_size;
2782 pp.nid = dev_to_node(&bp->pdev->dev);
2783 pp.dev = &bp->pdev->dev;
2784 pp.dma_dir = DMA_BIDIRECTIONAL;
2785
2786 rxr->page_pool = page_pool_create(&pp);
2787 if (IS_ERR(rxr->page_pool)) {
2788 int err = PTR_ERR(rxr->page_pool);
2789
2790 rxr->page_pool = NULL;
2791 return err;
2792 }
2793 return 0;
2794}
2795
c0c050c5
MC
2796static int bnxt_alloc_rx_rings(struct bnxt *bp)
2797{
4a228a3a 2798 int i, rc = 0, agg_rings = 0;
c0c050c5 2799
b6ab4b01
MC
2800 if (!bp->rx_ring)
2801 return -ENOMEM;
2802
c0c050c5
MC
2803 if (bp->flags & BNXT_FLAG_AGG_RINGS)
2804 agg_rings = 1;
2805
c0c050c5 2806 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 2807 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
2808 struct bnxt_ring_struct *ring;
2809
c0c050c5
MC
2810 ring = &rxr->rx_ring_struct;
2811
322b87ca
AG
2812 rc = bnxt_alloc_rx_page_pool(bp, rxr);
2813 if (rc)
2814 return rc;
2815
96a8604f 2816 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
12479f62 2817 if (rc < 0)
96a8604f
JDB
2818 return rc;
2819
f18c2b77 2820 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
322b87ca
AG
2821 MEM_TYPE_PAGE_POOL,
2822 rxr->page_pool);
f18c2b77
AG
2823 if (rc) {
2824 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2825 return rc;
2826 }
2827
6fe19886 2828 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
2829 if (rc)
2830 return rc;
2831
2c61d211 2832 ring->grp_idx = i;
c0c050c5
MC
2833 if (agg_rings) {
2834 u16 mem_size;
2835
2836 ring = &rxr->rx_agg_ring_struct;
6fe19886 2837 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
2838 if (rc)
2839 return rc;
2840
9899bb59 2841 ring->grp_idx = i;
c0c050c5
MC
2842 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2843 mem_size = rxr->rx_agg_bmap_size / 8;
2844 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2845 if (!rxr->rx_agg_bmap)
2846 return -ENOMEM;
c0c050c5
MC
2847 }
2848 }
4a228a3a
MC
2849 if (bp->flags & BNXT_FLAG_TPA)
2850 rc = bnxt_alloc_tpa_info(bp);
2851 return rc;
c0c050c5
MC
2852}
2853
2854static void bnxt_free_tx_rings(struct bnxt *bp)
2855{
2856 int i;
2857 struct pci_dev *pdev = bp->pdev;
2858
b6ab4b01 2859 if (!bp->tx_ring)
c0c050c5
MC
2860 return;
2861
2862 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2863 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
2864 struct bnxt_ring_struct *ring;
2865
c0c050c5
MC
2866 if (txr->tx_push) {
2867 dma_free_coherent(&pdev->dev, bp->tx_push_size,
2868 txr->tx_push, txr->tx_push_mapping);
2869 txr->tx_push = NULL;
2870 }
2871
2872 ring = &txr->tx_ring_struct;
2873
6fe19886 2874 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
2875 }
2876}
2877
2878static int bnxt_alloc_tx_rings(struct bnxt *bp)
2879{
2880 int i, j, rc;
2881 struct pci_dev *pdev = bp->pdev;
2882
2883 bp->tx_push_size = 0;
2884 if (bp->tx_push_thresh) {
2885 int push_size;
2886
2887 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2888 bp->tx_push_thresh);
2889
4419dbe6 2890 if (push_size > 256) {
c0c050c5
MC
2891 push_size = 0;
2892 bp->tx_push_thresh = 0;
2893 }
2894
2895 bp->tx_push_size = push_size;
2896 }
2897
2898 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2899 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5 2900 struct bnxt_ring_struct *ring;
2e8ef77e 2901 u8 qidx;
c0c050c5 2902
c0c050c5
MC
2903 ring = &txr->tx_ring_struct;
2904
6fe19886 2905 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
2906 if (rc)
2907 return rc;
2908
9899bb59 2909 ring->grp_idx = txr->bnapi->index;
c0c050c5 2910 if (bp->tx_push_size) {
c0c050c5
MC
2911 dma_addr_t mapping;
2912
2913 /* One pre-allocated DMA buffer to backup
2914 * TX push operation
2915 */
2916 txr->tx_push = dma_alloc_coherent(&pdev->dev,
2917 bp->tx_push_size,
2918 &txr->tx_push_mapping,
2919 GFP_KERNEL);
2920
2921 if (!txr->tx_push)
2922 return -ENOMEM;
2923
c0c050c5
MC
2924 mapping = txr->tx_push_mapping +
2925 sizeof(struct tx_push_bd);
4419dbe6 2926 txr->data_mapping = cpu_to_le64(mapping);
c0c050c5 2927 }
2e8ef77e
MC
2928 qidx = bp->tc_to_qidx[j];
2929 ring->queue_id = bp->q_info[qidx].queue_id;
5f449249
MC
2930 if (i < bp->tx_nr_rings_xdp)
2931 continue;
c0c050c5
MC
2932 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2933 j++;
2934 }
2935 return 0;
2936}
2937
2938static void bnxt_free_cp_rings(struct bnxt *bp)
2939{
2940 int i;
2941
2942 if (!bp->bnapi)
2943 return;
2944
2945 for (i = 0; i < bp->cp_nr_rings; i++) {
2946 struct bnxt_napi *bnapi = bp->bnapi[i];
2947 struct bnxt_cp_ring_info *cpr;
2948 struct bnxt_ring_struct *ring;
50e3ab78 2949 int j;
c0c050c5
MC
2950
2951 if (!bnapi)
2952 continue;
2953
2954 cpr = &bnapi->cp_ring;
2955 ring = &cpr->cp_ring_struct;
2956
6fe19886 2957 bnxt_free_ring(bp, &ring->ring_mem);
50e3ab78
MC
2958
2959 for (j = 0; j < 2; j++) {
2960 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2961
2962 if (cpr2) {
2963 ring = &cpr2->cp_ring_struct;
2964 bnxt_free_ring(bp, &ring->ring_mem);
2965 kfree(cpr2);
2966 cpr->cp_ring_arr[j] = NULL;
2967 }
2968 }
c0c050c5
MC
2969 }
2970}
2971
50e3ab78
MC
2972static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
2973{
2974 struct bnxt_ring_mem_info *rmem;
2975 struct bnxt_ring_struct *ring;
2976 struct bnxt_cp_ring_info *cpr;
2977 int rc;
2978
2979 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
2980 if (!cpr)
2981 return NULL;
2982
2983 ring = &cpr->cp_ring_struct;
2984 rmem = &ring->ring_mem;
2985 rmem->nr_pages = bp->cp_nr_pages;
2986 rmem->page_size = HW_CMPD_RING_SIZE;
2987 rmem->pg_arr = (void **)cpr->cp_desc_ring;
2988 rmem->dma_arr = cpr->cp_desc_mapping;
2989 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
2990 rc = bnxt_alloc_ring(bp, rmem);
2991 if (rc) {
2992 bnxt_free_ring(bp, rmem);
2993 kfree(cpr);
2994 cpr = NULL;
2995 }
2996 return cpr;
2997}
2998
c0c050c5
MC
2999static int bnxt_alloc_cp_rings(struct bnxt *bp)
3000{
50e3ab78 3001 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
e5811b8c 3002 int i, rc, ulp_base_vec, ulp_msix;
c0c050c5 3003
e5811b8c
MC
3004 ulp_msix = bnxt_get_ulp_msix_num(bp);
3005 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
c0c050c5
MC
3006 for (i = 0; i < bp->cp_nr_rings; i++) {
3007 struct bnxt_napi *bnapi = bp->bnapi[i];
3008 struct bnxt_cp_ring_info *cpr;
3009 struct bnxt_ring_struct *ring;
3010
3011 if (!bnapi)
3012 continue;
3013
3014 cpr = &bnapi->cp_ring;
50e3ab78 3015 cpr->bnapi = bnapi;
c0c050c5
MC
3016 ring = &cpr->cp_ring_struct;
3017
6fe19886 3018 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
3019 if (rc)
3020 return rc;
e5811b8c
MC
3021
3022 if (ulp_msix && i >= ulp_base_vec)
3023 ring->map_idx = i + ulp_msix;
3024 else
3025 ring->map_idx = i;
50e3ab78
MC
3026
3027 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3028 continue;
3029
3030 if (i < bp->rx_nr_rings) {
3031 struct bnxt_cp_ring_info *cpr2 =
3032 bnxt_alloc_cp_sub_ring(bp);
3033
3034 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3035 if (!cpr2)
3036 return -ENOMEM;
3037 cpr2->bnapi = bnapi;
3038 }
3039 if ((sh && i < bp->tx_nr_rings) ||
3040 (!sh && i >= bp->rx_nr_rings)) {
3041 struct bnxt_cp_ring_info *cpr2 =
3042 bnxt_alloc_cp_sub_ring(bp);
3043
3044 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3045 if (!cpr2)
3046 return -ENOMEM;
3047 cpr2->bnapi = bnapi;
3048 }
c0c050c5
MC
3049 }
3050 return 0;
3051}
3052
3053static void bnxt_init_ring_struct(struct bnxt *bp)
3054{
3055 int i;
3056
3057 for (i = 0; i < bp->cp_nr_rings; i++) {
3058 struct bnxt_napi *bnapi = bp->bnapi[i];
6fe19886 3059 struct bnxt_ring_mem_info *rmem;
c0c050c5
MC
3060 struct bnxt_cp_ring_info *cpr;
3061 struct bnxt_rx_ring_info *rxr;
3062 struct bnxt_tx_ring_info *txr;
3063 struct bnxt_ring_struct *ring;
3064
3065 if (!bnapi)
3066 continue;
3067
3068 cpr = &bnapi->cp_ring;
3069 ring = &cpr->cp_ring_struct;
6fe19886
MC
3070 rmem = &ring->ring_mem;
3071 rmem->nr_pages = bp->cp_nr_pages;
3072 rmem->page_size = HW_CMPD_RING_SIZE;
3073 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3074 rmem->dma_arr = cpr->cp_desc_mapping;
3075 rmem->vmem_size = 0;
c0c050c5 3076
b6ab4b01 3077 rxr = bnapi->rx_ring;
3b2b7d9d
MC
3078 if (!rxr)
3079 goto skip_rx;
3080
c0c050c5 3081 ring = &rxr->rx_ring_struct;
6fe19886
MC
3082 rmem = &ring->ring_mem;
3083 rmem->nr_pages = bp->rx_nr_pages;
3084 rmem->page_size = HW_RXBD_RING_SIZE;
3085 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3086 rmem->dma_arr = rxr->rx_desc_mapping;
3087 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3088 rmem->vmem = (void **)&rxr->rx_buf_ring;
c0c050c5
MC
3089
3090 ring = &rxr->rx_agg_ring_struct;
6fe19886
MC
3091 rmem = &ring->ring_mem;
3092 rmem->nr_pages = bp->rx_agg_nr_pages;
3093 rmem->page_size = HW_RXBD_RING_SIZE;
3094 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3095 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3096 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3097 rmem->vmem = (void **)&rxr->rx_agg_ring;
c0c050c5 3098
3b2b7d9d 3099skip_rx:
b6ab4b01 3100 txr = bnapi->tx_ring;
3b2b7d9d
MC
3101 if (!txr)
3102 continue;
3103
c0c050c5 3104 ring = &txr->tx_ring_struct;
6fe19886
MC
3105 rmem = &ring->ring_mem;
3106 rmem->nr_pages = bp->tx_nr_pages;
3107 rmem->page_size = HW_RXBD_RING_SIZE;
3108 rmem->pg_arr = (void **)txr->tx_desc_ring;
3109 rmem->dma_arr = txr->tx_desc_mapping;
3110 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3111 rmem->vmem = (void **)&txr->tx_buf_ring;
c0c050c5
MC
3112 }
3113}
3114
3115static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3116{
3117 int i;
3118 u32 prod;
3119 struct rx_bd **rx_buf_ring;
3120
6fe19886
MC
3121 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3122 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
c0c050c5
MC
3123 int j;
3124 struct rx_bd *rxbd;
3125
3126 rxbd = rx_buf_ring[i];
3127 if (!rxbd)
3128 continue;
3129
3130 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3131 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3132 rxbd->rx_bd_opaque = prod;
3133 }
3134 }
3135}
3136
3137static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3138{
3139 struct net_device *dev = bp->dev;
c0c050c5
MC
3140 struct bnxt_rx_ring_info *rxr;
3141 struct bnxt_ring_struct *ring;
3142 u32 prod, type;
3143 int i;
3144
c0c050c5
MC
3145 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3146 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3147
3148 if (NET_IP_ALIGN == 2)
3149 type |= RX_BD_FLAGS_SOP;
3150
b6ab4b01 3151 rxr = &bp->rx_ring[ring_nr];
c0c050c5
MC
3152 ring = &rxr->rx_ring_struct;
3153 bnxt_init_rxbd_pages(ring, type);
3154
c6d30e83
MC
3155 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3156 rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
3157 if (IS_ERR(rxr->xdp_prog)) {
3158 int rc = PTR_ERR(rxr->xdp_prog);
3159
3160 rxr->xdp_prog = NULL;
3161 return rc;
3162 }
3163 }
c0c050c5
MC
3164 prod = rxr->rx_prod;
3165 for (i = 0; i < bp->rx_ring_size; i++) {
3166 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
3167 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3168 ring_nr, i, bp->rx_ring_size);
3169 break;
3170 }
3171 prod = NEXT_RX(prod);
3172 }
3173 rxr->rx_prod = prod;
3174 ring->fw_ring_id = INVALID_HW_RING_ID;
3175
edd0c2cc
MC
3176 ring = &rxr->rx_agg_ring_struct;
3177 ring->fw_ring_id = INVALID_HW_RING_ID;
3178
c0c050c5
MC
3179 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3180 return 0;
3181
2839f28b 3182 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
c0c050c5
MC
3183 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3184
3185 bnxt_init_rxbd_pages(ring, type);
3186
3187 prod = rxr->rx_agg_prod;
3188 for (i = 0; i < bp->rx_agg_ring_size; i++) {
3189 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
3190 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3191 ring_nr, i, bp->rx_ring_size);
3192 break;
3193 }
3194 prod = NEXT_RX_AGG(prod);
3195 }
3196 rxr->rx_agg_prod = prod;
c0c050c5
MC
3197
3198 if (bp->flags & BNXT_FLAG_TPA) {
3199 if (rxr->rx_tpa) {
3200 u8 *data;
3201 dma_addr_t mapping;
3202
79632e9b 3203 for (i = 0; i < bp->max_tpa; i++) {
c0c050c5
MC
3204 data = __bnxt_alloc_rx_data(bp, &mapping,
3205 GFP_KERNEL);
3206 if (!data)
3207 return -ENOMEM;
3208
3209 rxr->rx_tpa[i].data = data;
b3dba77c 3210 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
c0c050c5
MC
3211 rxr->rx_tpa[i].mapping = mapping;
3212 }
3213 } else {
3214 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
3215 return -ENOMEM;
3216 }
3217 }
3218
3219 return 0;
3220}
3221
2247925f
SP
3222static void bnxt_init_cp_rings(struct bnxt *bp)
3223{
3e08b184 3224 int i, j;
2247925f
SP
3225
3226 for (i = 0; i < bp->cp_nr_rings; i++) {
3227 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3228 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3229
3230 ring->fw_ring_id = INVALID_HW_RING_ID;
6a8788f2
AG
3231 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3232 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3e08b184
MC
3233 for (j = 0; j < 2; j++) {
3234 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3235
3236 if (!cpr2)
3237 continue;
3238
3239 ring = &cpr2->cp_ring_struct;
3240 ring->fw_ring_id = INVALID_HW_RING_ID;
3241 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3242 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3243 }
2247925f
SP
3244 }
3245}
3246
c0c050c5
MC
3247static int bnxt_init_rx_rings(struct bnxt *bp)
3248{
3249 int i, rc = 0;
3250
c61fb99c 3251 if (BNXT_RX_PAGE_MODE(bp)) {
c6d30e83
MC
3252 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3253 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
c61fb99c
MC
3254 } else {
3255 bp->rx_offset = BNXT_RX_OFFSET;
3256 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3257 }
b3dba77c 3258
c0c050c5
MC
3259 for (i = 0; i < bp->rx_nr_rings; i++) {
3260 rc = bnxt_init_one_rx_ring(bp, i);
3261 if (rc)
3262 break;
3263 }
3264
3265 return rc;
3266}
3267
3268static int bnxt_init_tx_rings(struct bnxt *bp)
3269{
3270 u16 i;
3271
3272 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3273 MAX_SKB_FRAGS + 1);
3274
3275 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 3276 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
3277 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3278
3279 ring->fw_ring_id = INVALID_HW_RING_ID;
3280 }
3281
3282 return 0;
3283}
3284
3285static void bnxt_free_ring_grps(struct bnxt *bp)
3286{
3287 kfree(bp->grp_info);
3288 bp->grp_info = NULL;
3289}
3290
3291static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3292{
3293 int i;
3294
3295 if (irq_re_init) {
3296 bp->grp_info = kcalloc(bp->cp_nr_rings,
3297 sizeof(struct bnxt_ring_grp_info),
3298 GFP_KERNEL);
3299 if (!bp->grp_info)
3300 return -ENOMEM;
3301 }
3302 for (i = 0; i < bp->cp_nr_rings; i++) {
3303 if (irq_re_init)
3304 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3305 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3306 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3307 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3308 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3309 }
3310 return 0;
3311}
3312
3313static void bnxt_free_vnics(struct bnxt *bp)
3314{
3315 kfree(bp->vnic_info);
3316 bp->vnic_info = NULL;
3317 bp->nr_vnics = 0;
3318}
3319
3320static int bnxt_alloc_vnics(struct bnxt *bp)
3321{
3322 int num_vnics = 1;
3323
3324#ifdef CONFIG_RFS_ACCEL
9b3d15e6 3325 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
c0c050c5
MC
3326 num_vnics += bp->rx_nr_rings;
3327#endif
3328
dc52c6c7
PS
3329 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3330 num_vnics++;
3331
c0c050c5
MC
3332 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3333 GFP_KERNEL);
3334 if (!bp->vnic_info)
3335 return -ENOMEM;
3336
3337 bp->nr_vnics = num_vnics;
3338 return 0;
3339}
3340
3341static void bnxt_init_vnics(struct bnxt *bp)
3342{
3343 int i;
3344
3345 for (i = 0; i < bp->nr_vnics; i++) {
3346 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
44c6f72a 3347 int j;
c0c050c5
MC
3348
3349 vnic->fw_vnic_id = INVALID_HW_RING_ID;
44c6f72a
MC
3350 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3351 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3352
c0c050c5
MC
3353 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3354
3355 if (bp->vnic_info[i].rss_hash_key) {
3356 if (i == 0)
3357 prandom_bytes(vnic->rss_hash_key,
3358 HW_HASH_KEY_SIZE);
3359 else
3360 memcpy(vnic->rss_hash_key,
3361 bp->vnic_info[0].rss_hash_key,
3362 HW_HASH_KEY_SIZE);
3363 }
3364 }
3365}
3366
3367static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3368{
3369 int pages;
3370
3371 pages = ring_size / desc_per_pg;
3372
3373 if (!pages)
3374 return 1;
3375
3376 pages++;
3377
3378 while (pages & (pages - 1))
3379 pages++;
3380
3381 return pages;
3382}
3383
c6d30e83 3384void bnxt_set_tpa_flags(struct bnxt *bp)
c0c050c5
MC
3385{
3386 bp->flags &= ~BNXT_FLAG_TPA;
341138c3
MC
3387 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3388 return;
c0c050c5
MC
3389 if (bp->dev->features & NETIF_F_LRO)
3390 bp->flags |= BNXT_FLAG_LRO;
1054aee8 3391 else if (bp->dev->features & NETIF_F_GRO_HW)
c0c050c5
MC
3392 bp->flags |= BNXT_FLAG_GRO;
3393}
3394
3395/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3396 * be set on entry.
3397 */
3398void bnxt_set_ring_params(struct bnxt *bp)
3399{
3400 u32 ring_size, rx_size, rx_space;
3401 u32 agg_factor = 0, agg_ring_size = 0;
3402
3403 /* 8 for CRC and VLAN */
3404 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3405
3406 rx_space = rx_size + NET_SKB_PAD +
3407 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3408
3409 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3410 ring_size = bp->rx_ring_size;
3411 bp->rx_agg_ring_size = 0;
3412 bp->rx_agg_nr_pages = 0;
3413
3414 if (bp->flags & BNXT_FLAG_TPA)
2839f28b 3415 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
c0c050c5
MC
3416
3417 bp->flags &= ~BNXT_FLAG_JUMBO;
bdbd1eb5 3418 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
c0c050c5
MC
3419 u32 jumbo_factor;
3420
3421 bp->flags |= BNXT_FLAG_JUMBO;
3422 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3423 if (jumbo_factor > agg_factor)
3424 agg_factor = jumbo_factor;
3425 }
3426 agg_ring_size = ring_size * agg_factor;
3427
3428 if (agg_ring_size) {
3429 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3430 RX_DESC_CNT);
3431 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3432 u32 tmp = agg_ring_size;
3433
3434 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3435 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3436 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3437 tmp, agg_ring_size);
3438 }
3439 bp->rx_agg_ring_size = agg_ring_size;
3440 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3441 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3442 rx_space = rx_size + NET_SKB_PAD +
3443 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3444 }
3445
3446 bp->rx_buf_use_size = rx_size;
3447 bp->rx_buf_size = rx_space;
3448
3449 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3450 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3451
3452 ring_size = bp->tx_ring_size;
3453 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3454 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3455
3456 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
3457 bp->cp_ring_size = ring_size;
3458
3459 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3460 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3461 bp->cp_nr_pages = MAX_CP_PAGES;
3462 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3463 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3464 ring_size, bp->cp_ring_size);
3465 }
3466 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3467 bp->cp_ring_mask = bp->cp_bit - 1;
3468}
3469
96a8604f
JDB
3470/* Changing allocation mode of RX rings.
3471 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3472 */
c61fb99c 3473int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
6bb19474 3474{
c61fb99c
MC
3475 if (page_mode) {
3476 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3477 return -EOPNOTSUPP;
7eb9bb3a
MC
3478 bp->dev->max_mtu =
3479 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
c61fb99c
MC
3480 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3481 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
c61fb99c
MC
3482 bp->rx_dir = DMA_BIDIRECTIONAL;
3483 bp->rx_skb_func = bnxt_rx_page_skb;
1054aee8
MC
3484 /* Disable LRO or GRO_HW */
3485 netdev_update_features(bp->dev);
c61fb99c 3486 } else {
7eb9bb3a 3487 bp->dev->max_mtu = bp->max_mtu;
c61fb99c
MC
3488 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3489 bp->rx_dir = DMA_FROM_DEVICE;
3490 bp->rx_skb_func = bnxt_rx_skb;
3491 }
6bb19474
MC
3492 return 0;
3493}
3494
c0c050c5
MC
3495static void bnxt_free_vnic_attributes(struct bnxt *bp)
3496{
3497 int i;
3498 struct bnxt_vnic_info *vnic;
3499 struct pci_dev *pdev = bp->pdev;
3500
3501 if (!bp->vnic_info)
3502 return;
3503
3504 for (i = 0; i < bp->nr_vnics; i++) {
3505 vnic = &bp->vnic_info[i];
3506
3507 kfree(vnic->fw_grp_ids);
3508 vnic->fw_grp_ids = NULL;
3509
3510 kfree(vnic->uc_list);
3511 vnic->uc_list = NULL;
3512
3513 if (vnic->mc_list) {
3514 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3515 vnic->mc_list, vnic->mc_list_mapping);
3516 vnic->mc_list = NULL;
3517 }
3518
3519 if (vnic->rss_table) {
3520 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3521 vnic->rss_table,
3522 vnic->rss_table_dma_addr);
3523 vnic->rss_table = NULL;
3524 }
3525
3526 vnic->rss_hash_key = NULL;
3527 vnic->flags = 0;
3528 }
3529}
3530
3531static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3532{
3533 int i, rc = 0, size;
3534 struct bnxt_vnic_info *vnic;
3535 struct pci_dev *pdev = bp->pdev;
3536 int max_rings;
3537
3538 for (i = 0; i < bp->nr_vnics; i++) {
3539 vnic = &bp->vnic_info[i];
3540
3541 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3542 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3543
3544 if (mem_size > 0) {
3545 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3546 if (!vnic->uc_list) {
3547 rc = -ENOMEM;
3548 goto out;
3549 }
3550 }
3551 }
3552
3553 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3554 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3555 vnic->mc_list =
3556 dma_alloc_coherent(&pdev->dev,
3557 vnic->mc_list_size,
3558 &vnic->mc_list_mapping,
3559 GFP_KERNEL);
3560 if (!vnic->mc_list) {
3561 rc = -ENOMEM;
3562 goto out;
3563 }
3564 }
3565
44c6f72a
MC
3566 if (bp->flags & BNXT_FLAG_CHIP_P5)
3567 goto vnic_skip_grps;
3568
c0c050c5
MC
3569 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3570 max_rings = bp->rx_nr_rings;
3571 else
3572 max_rings = 1;
3573
3574 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3575 if (!vnic->fw_grp_ids) {
3576 rc = -ENOMEM;
3577 goto out;
3578 }
44c6f72a 3579vnic_skip_grps:
ae10ae74
MC
3580 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3581 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3582 continue;
3583
c0c050c5
MC
3584 /* Allocate rss table and hash key */
3585 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3586 &vnic->rss_table_dma_addr,
3587 GFP_KERNEL);
3588 if (!vnic->rss_table) {
3589 rc = -ENOMEM;
3590 goto out;
3591 }
3592
3593 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3594
3595 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3596 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3597 }
3598 return 0;
3599
3600out:
3601 return rc;
3602}
3603
3604static void bnxt_free_hwrm_resources(struct bnxt *bp)
3605{
3606 struct pci_dev *pdev = bp->pdev;
3607
a2bf74f4
VD
3608 if (bp->hwrm_cmd_resp_addr) {
3609 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3610 bp->hwrm_cmd_resp_dma_addr);
3611 bp->hwrm_cmd_resp_addr = NULL;
3612 }
760b6d33
VD
3613
3614 if (bp->hwrm_cmd_kong_resp_addr) {
3615 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3616 bp->hwrm_cmd_kong_resp_addr,
3617 bp->hwrm_cmd_kong_resp_dma_addr);
3618 bp->hwrm_cmd_kong_resp_addr = NULL;
3619 }
3620}
3621
3622static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3623{
3624 struct pci_dev *pdev = bp->pdev;
3625
ba642ab7
MC
3626 if (bp->hwrm_cmd_kong_resp_addr)
3627 return 0;
3628
760b6d33
VD
3629 bp->hwrm_cmd_kong_resp_addr =
3630 dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3631 &bp->hwrm_cmd_kong_resp_dma_addr,
3632 GFP_KERNEL);
3633 if (!bp->hwrm_cmd_kong_resp_addr)
3634 return -ENOMEM;
3635
3636 return 0;
c0c050c5
MC
3637}
3638
3639static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3640{
3641 struct pci_dev *pdev = bp->pdev;
3642
3643 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3644 &bp->hwrm_cmd_resp_dma_addr,
3645 GFP_KERNEL);
3646 if (!bp->hwrm_cmd_resp_addr)
3647 return -ENOMEM;
c0c050c5
MC
3648
3649 return 0;
3650}
3651
e605db80
DK
3652static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3653{
3654 if (bp->hwrm_short_cmd_req_addr) {
3655 struct pci_dev *pdev = bp->pdev;
3656
1dfddc41 3657 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
e605db80
DK
3658 bp->hwrm_short_cmd_req_addr,
3659 bp->hwrm_short_cmd_req_dma_addr);
3660 bp->hwrm_short_cmd_req_addr = NULL;
3661 }
3662}
3663
3664static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3665{
3666 struct pci_dev *pdev = bp->pdev;
3667
ba642ab7
MC
3668 if (bp->hwrm_short_cmd_req_addr)
3669 return 0;
3670
e605db80 3671 bp->hwrm_short_cmd_req_addr =
1dfddc41 3672 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
e605db80
DK
3673 &bp->hwrm_short_cmd_req_dma_addr,
3674 GFP_KERNEL);
3675 if (!bp->hwrm_short_cmd_req_addr)
3676 return -ENOMEM;
3677
3678 return 0;
3679}
3680
fd3ab1c7 3681static void bnxt_free_port_stats(struct bnxt *bp)
c0c050c5 3682{
c0c050c5
MC
3683 struct pci_dev *pdev = bp->pdev;
3684
00db3cba
VV
3685 bp->flags &= ~BNXT_FLAG_PORT_STATS;
3686 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
3687
3bdf56c4
MC
3688 if (bp->hw_rx_port_stats) {
3689 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
3690 bp->hw_rx_port_stats,
3691 bp->hw_rx_port_stats_map);
3692 bp->hw_rx_port_stats = NULL;
00db3cba
VV
3693 }
3694
36e53349
MC
3695 if (bp->hw_tx_port_stats_ext) {
3696 dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext),
3697 bp->hw_tx_port_stats_ext,
3698 bp->hw_tx_port_stats_ext_map);
3699 bp->hw_tx_port_stats_ext = NULL;
3700 }
3701
00db3cba
VV
3702 if (bp->hw_rx_port_stats_ext) {
3703 dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3704 bp->hw_rx_port_stats_ext,
3705 bp->hw_rx_port_stats_ext_map);
3706 bp->hw_rx_port_stats_ext = NULL;
3bdf56c4 3707 }
55e4398d
VV
3708
3709 if (bp->hw_pcie_stats) {
3710 dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
3711 bp->hw_pcie_stats, bp->hw_pcie_stats_map);
3712 bp->hw_pcie_stats = NULL;
3713 }
fd3ab1c7
MC
3714}
3715
3716static void bnxt_free_ring_stats(struct bnxt *bp)
3717{
3718 struct pci_dev *pdev = bp->pdev;
3719 int size, i;
3bdf56c4 3720
c0c050c5
MC
3721 if (!bp->bnapi)
3722 return;
3723
4e748506 3724 size = bp->hw_ring_stats_size;
c0c050c5
MC
3725
3726 for (i = 0; i < bp->cp_nr_rings; i++) {
3727 struct bnxt_napi *bnapi = bp->bnapi[i];
3728 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3729
3730 if (cpr->hw_stats) {
3731 dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
3732 cpr->hw_stats_map);
3733 cpr->hw_stats = NULL;
3734 }
3735 }
3736}
3737
3738static int bnxt_alloc_stats(struct bnxt *bp)
3739{
3740 u32 size, i;
3741 struct pci_dev *pdev = bp->pdev;
3742
4e748506 3743 size = bp->hw_ring_stats_size;
c0c050c5
MC
3744
3745 for (i = 0; i < bp->cp_nr_rings; i++) {
3746 struct bnxt_napi *bnapi = bp->bnapi[i];
3747 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3748
3749 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
3750 &cpr->hw_stats_map,
3751 GFP_KERNEL);
3752 if (!cpr->hw_stats)
3753 return -ENOMEM;
3754
3755 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3756 }
3bdf56c4 3757
a220eabc
VV
3758 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
3759 return 0;
fd3ab1c7 3760
a220eabc
VV
3761 if (bp->hw_rx_port_stats)
3762 goto alloc_ext_stats;
3bdf56c4 3763
a220eabc
VV
3764 bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
3765 sizeof(struct tx_port_stats) + 1024;
3bdf56c4 3766
a220eabc
VV
3767 bp->hw_rx_port_stats =
3768 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
3769 &bp->hw_rx_port_stats_map,
3770 GFP_KERNEL);
3771 if (!bp->hw_rx_port_stats)
3772 return -ENOMEM;
3bdf56c4 3773
a220eabc
VV
3774 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512;
3775 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
3776 sizeof(struct rx_port_stats) + 512;
3777 bp->flags |= BNXT_FLAG_PORT_STATS;
00db3cba 3778
fd3ab1c7 3779alloc_ext_stats:
a220eabc
VV
3780 /* Display extended statistics only if FW supports it */
3781 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
6154532f 3782 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
00db3cba
VV
3783 return 0;
3784
a220eabc
VV
3785 if (bp->hw_rx_port_stats_ext)
3786 goto alloc_tx_ext_stats;
fd3ab1c7 3787
a220eabc
VV
3788 bp->hw_rx_port_stats_ext =
3789 dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3790 &bp->hw_rx_port_stats_ext_map, GFP_KERNEL);
3791 if (!bp->hw_rx_port_stats_ext)
3792 return 0;
00db3cba 3793
fd3ab1c7 3794alloc_tx_ext_stats:
a220eabc 3795 if (bp->hw_tx_port_stats_ext)
55e4398d 3796 goto alloc_pcie_stats;
fd3ab1c7 3797
6154532f
VV
3798 if (bp->hwrm_spec_code >= 0x10902 ||
3799 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
a220eabc
VV
3800 bp->hw_tx_port_stats_ext =
3801 dma_alloc_coherent(&pdev->dev,
3802 sizeof(struct tx_port_stats_ext),
3803 &bp->hw_tx_port_stats_ext_map,
3804 GFP_KERNEL);
3bdf56c4 3805 }
a220eabc 3806 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
55e4398d
VV
3807
3808alloc_pcie_stats:
3809 if (bp->hw_pcie_stats ||
3810 !(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
3811 return 0;
3812
3813 bp->hw_pcie_stats =
3814 dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
3815 &bp->hw_pcie_stats_map, GFP_KERNEL);
3816 if (!bp->hw_pcie_stats)
3817 return 0;
3818
3819 bp->flags |= BNXT_FLAG_PCIE_STATS;
c0c050c5
MC
3820 return 0;
3821}
3822
3823static void bnxt_clear_ring_indices(struct bnxt *bp)
3824{
3825 int i;
3826
3827 if (!bp->bnapi)
3828 return;
3829
3830 for (i = 0; i < bp->cp_nr_rings; i++) {
3831 struct bnxt_napi *bnapi = bp->bnapi[i];
3832 struct bnxt_cp_ring_info *cpr;
3833 struct bnxt_rx_ring_info *rxr;
3834 struct bnxt_tx_ring_info *txr;
3835
3836 if (!bnapi)
3837 continue;
3838
3839 cpr = &bnapi->cp_ring;
3840 cpr->cp_raw_cons = 0;
3841
b6ab4b01 3842 txr = bnapi->tx_ring;
3b2b7d9d
MC
3843 if (txr) {
3844 txr->tx_prod = 0;
3845 txr->tx_cons = 0;
3846 }
c0c050c5 3847
b6ab4b01 3848 rxr = bnapi->rx_ring;
3b2b7d9d
MC
3849 if (rxr) {
3850 rxr->rx_prod = 0;
3851 rxr->rx_agg_prod = 0;
3852 rxr->rx_sw_agg_prod = 0;
376a5b86 3853 rxr->rx_next_cons = 0;
3b2b7d9d 3854 }
c0c050c5
MC
3855 }
3856}
3857
3858static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
3859{
3860#ifdef CONFIG_RFS_ACCEL
3861 int i;
3862
3863 /* Under rtnl_lock and all our NAPIs have been disabled. It's
3864 * safe to delete the hash table.
3865 */
3866 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
3867 struct hlist_head *head;
3868 struct hlist_node *tmp;
3869 struct bnxt_ntuple_filter *fltr;
3870
3871 head = &bp->ntp_fltr_hash_tbl[i];
3872 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
3873 hlist_del(&fltr->hash);
3874 kfree(fltr);
3875 }
3876 }
3877 if (irq_reinit) {
3878 kfree(bp->ntp_fltr_bmap);
3879 bp->ntp_fltr_bmap = NULL;
3880 }
3881 bp->ntp_fltr_count = 0;
3882#endif
3883}
3884
3885static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
3886{
3887#ifdef CONFIG_RFS_ACCEL
3888 int i, rc = 0;
3889
3890 if (!(bp->flags & BNXT_FLAG_RFS))
3891 return 0;
3892
3893 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
3894 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
3895
3896 bp->ntp_fltr_count = 0;
ac45bd93
DC
3897 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
3898 sizeof(long),
c0c050c5
MC
3899 GFP_KERNEL);
3900
3901 if (!bp->ntp_fltr_bmap)
3902 rc = -ENOMEM;
3903
3904 return rc;
3905#else
3906 return 0;
3907#endif
3908}
3909
3910static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
3911{
3912 bnxt_free_vnic_attributes(bp);
3913 bnxt_free_tx_rings(bp);
3914 bnxt_free_rx_rings(bp);
3915 bnxt_free_cp_rings(bp);
3916 bnxt_free_ntp_fltrs(bp, irq_re_init);
3917 if (irq_re_init) {
fd3ab1c7 3918 bnxt_free_ring_stats(bp);
c0c050c5
MC
3919 bnxt_free_ring_grps(bp);
3920 bnxt_free_vnics(bp);
a960dec9
MC
3921 kfree(bp->tx_ring_map);
3922 bp->tx_ring_map = NULL;
b6ab4b01
MC
3923 kfree(bp->tx_ring);
3924 bp->tx_ring = NULL;
3925 kfree(bp->rx_ring);
3926 bp->rx_ring = NULL;
c0c050c5
MC
3927 kfree(bp->bnapi);
3928 bp->bnapi = NULL;
3929 } else {
3930 bnxt_clear_ring_indices(bp);
3931 }
3932}
3933
3934static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
3935{
01657bcd 3936 int i, j, rc, size, arr_size;
c0c050c5
MC
3937 void *bnapi;
3938
3939 if (irq_re_init) {
3940 /* Allocate bnapi mem pointer array and mem block for
3941 * all queues
3942 */
3943 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
3944 bp->cp_nr_rings);
3945 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
3946 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
3947 if (!bnapi)
3948 return -ENOMEM;
3949
3950 bp->bnapi = bnapi;
3951 bnapi += arr_size;
3952 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
3953 bp->bnapi[i] = bnapi;
3954 bp->bnapi[i]->index = i;
3955 bp->bnapi[i]->bp = bp;
e38287b7
MC
3956 if (bp->flags & BNXT_FLAG_CHIP_P5) {
3957 struct bnxt_cp_ring_info *cpr =
3958 &bp->bnapi[i]->cp_ring;
3959
3960 cpr->cp_ring_struct.ring_mem.flags =
3961 BNXT_RMEM_RING_PTE_FLAG;
3962 }
c0c050c5
MC
3963 }
3964
b6ab4b01
MC
3965 bp->rx_ring = kcalloc(bp->rx_nr_rings,
3966 sizeof(struct bnxt_rx_ring_info),
3967 GFP_KERNEL);
3968 if (!bp->rx_ring)
3969 return -ENOMEM;
3970
3971 for (i = 0; i < bp->rx_nr_rings; i++) {
e38287b7
MC
3972 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3973
3974 if (bp->flags & BNXT_FLAG_CHIP_P5) {
3975 rxr->rx_ring_struct.ring_mem.flags =
3976 BNXT_RMEM_RING_PTE_FLAG;
3977 rxr->rx_agg_ring_struct.ring_mem.flags =
3978 BNXT_RMEM_RING_PTE_FLAG;
3979 }
3980 rxr->bnapi = bp->bnapi[i];
b6ab4b01
MC
3981 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
3982 }
3983
3984 bp->tx_ring = kcalloc(bp->tx_nr_rings,
3985 sizeof(struct bnxt_tx_ring_info),
3986 GFP_KERNEL);
3987 if (!bp->tx_ring)
3988 return -ENOMEM;
3989
a960dec9
MC
3990 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
3991 GFP_KERNEL);
3992
3993 if (!bp->tx_ring_map)
3994 return -ENOMEM;
3995
01657bcd
MC
3996 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
3997 j = 0;
3998 else
3999 j = bp->rx_nr_rings;
4000
4001 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
e38287b7
MC
4002 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4003
4004 if (bp->flags & BNXT_FLAG_CHIP_P5)
4005 txr->tx_ring_struct.ring_mem.flags =
4006 BNXT_RMEM_RING_PTE_FLAG;
4007 txr->bnapi = bp->bnapi[j];
4008 bp->bnapi[j]->tx_ring = txr;
5f449249 4009 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
38413406 4010 if (i >= bp->tx_nr_rings_xdp) {
e38287b7 4011 txr->txq_index = i - bp->tx_nr_rings_xdp;
38413406
MC
4012 bp->bnapi[j]->tx_int = bnxt_tx_int;
4013 } else {
fa3e93e8 4014 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
38413406
MC
4015 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4016 }
b6ab4b01
MC
4017 }
4018
c0c050c5
MC
4019 rc = bnxt_alloc_stats(bp);
4020 if (rc)
4021 goto alloc_mem_err;
4022
4023 rc = bnxt_alloc_ntp_fltrs(bp);
4024 if (rc)
4025 goto alloc_mem_err;
4026
4027 rc = bnxt_alloc_vnics(bp);
4028 if (rc)
4029 goto alloc_mem_err;
4030 }
4031
4032 bnxt_init_ring_struct(bp);
4033
4034 rc = bnxt_alloc_rx_rings(bp);
4035 if (rc)
4036 goto alloc_mem_err;
4037
4038 rc = bnxt_alloc_tx_rings(bp);
4039 if (rc)
4040 goto alloc_mem_err;
4041
4042 rc = bnxt_alloc_cp_rings(bp);
4043 if (rc)
4044 goto alloc_mem_err;
4045
4046 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4047 BNXT_VNIC_UCAST_FLAG;
4048 rc = bnxt_alloc_vnic_attributes(bp);
4049 if (rc)
4050 goto alloc_mem_err;
4051 return 0;
4052
4053alloc_mem_err:
4054 bnxt_free_mem(bp, true);
4055 return rc;
4056}
4057
9d8bc097
MC
4058static void bnxt_disable_int(struct bnxt *bp)
4059{
4060 int i;
4061
4062 if (!bp->bnapi)
4063 return;
4064
4065 for (i = 0; i < bp->cp_nr_rings; i++) {
4066 struct bnxt_napi *bnapi = bp->bnapi[i];
4067 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
daf1f1e7 4068 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
9d8bc097 4069
daf1f1e7 4070 if (ring->fw_ring_id != INVALID_HW_RING_ID)
697197e5 4071 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
9d8bc097
MC
4072 }
4073}
4074
e5811b8c
MC
4075static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4076{
4077 struct bnxt_napi *bnapi = bp->bnapi[n];
4078 struct bnxt_cp_ring_info *cpr;
4079
4080 cpr = &bnapi->cp_ring;
4081 return cpr->cp_ring_struct.map_idx;
4082}
4083
9d8bc097
MC
4084static void bnxt_disable_int_sync(struct bnxt *bp)
4085{
4086 int i;
4087
4088 atomic_inc(&bp->intr_sem);
4089
4090 bnxt_disable_int(bp);
e5811b8c
MC
4091 for (i = 0; i < bp->cp_nr_rings; i++) {
4092 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4093
4094 synchronize_irq(bp->irq_tbl[map_idx].vector);
4095 }
9d8bc097
MC
4096}
4097
4098static void bnxt_enable_int(struct bnxt *bp)
4099{
4100 int i;
4101
4102 atomic_set(&bp->intr_sem, 0);
4103 for (i = 0; i < bp->cp_nr_rings; i++) {
4104 struct bnxt_napi *bnapi = bp->bnapi[i];
4105 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4106
697197e5 4107 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
9d8bc097
MC
4108 }
4109}
4110
c0c050c5
MC
4111void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
4112 u16 cmpl_ring, u16 target_id)
4113{
a8643e16 4114 struct input *req = request;
c0c050c5 4115
a8643e16
MC
4116 req->req_type = cpu_to_le16(req_type);
4117 req->cmpl_ring = cpu_to_le16(cmpl_ring);
4118 req->target_id = cpu_to_le16(target_id);
760b6d33
VD
4119 if (bnxt_kong_hwrm_message(bp, req))
4120 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
4121 else
4122 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
c0c050c5
MC
4123}
4124
d4f1420d
MC
4125static int bnxt_hwrm_to_stderr(u32 hwrm_err)
4126{
4127 switch (hwrm_err) {
4128 case HWRM_ERR_CODE_SUCCESS:
4129 return 0;
4130 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
4131 return -EACCES;
4132 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
4133 return -ENOSPC;
4134 case HWRM_ERR_CODE_INVALID_PARAMS:
4135 case HWRM_ERR_CODE_INVALID_FLAGS:
4136 case HWRM_ERR_CODE_INVALID_ENABLES:
4137 case HWRM_ERR_CODE_UNSUPPORTED_TLV:
4138 case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
4139 return -EINVAL;
4140 case HWRM_ERR_CODE_NO_BUFFER:
4141 return -ENOMEM;
4142 case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
4143 return -EAGAIN;
4144 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
4145 return -EOPNOTSUPP;
4146 default:
4147 return -EIO;
4148 }
4149}
4150
fbfbc485
MC
4151static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
4152 int timeout, bool silent)
c0c050c5 4153{
a11fa2be 4154 int i, intr_process, rc, tmo_count;
a8643e16 4155 struct input *req = msg;
c0c050c5 4156 u32 *data = msg;
845adfe4
MC
4157 __le32 *resp_len;
4158 u8 *valid;
c0c050c5
MC
4159 u16 cp_ring_id, len = 0;
4160 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
e605db80 4161 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
ebd5818c 4162 struct hwrm_short_input short_input = {0};
2e9ee398 4163 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
89455017 4164 u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr;
2e9ee398 4165 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
760b6d33 4166 u16 dst = BNXT_HWRM_CHNL_CHIMP;
c0c050c5 4167
1dfddc41
MC
4168 if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4169 if (msg_len > bp->hwrm_max_ext_req_len ||
4170 !bp->hwrm_short_cmd_req_addr)
4171 return -EINVAL;
4172 }
4173
760b6d33
VD
4174 if (bnxt_hwrm_kong_chnl(bp, req)) {
4175 dst = BNXT_HWRM_CHNL_KONG;
4176 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
4177 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
4178 resp = bp->hwrm_cmd_kong_resp_addr;
4179 resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr;
4180 }
4181
4182 memset(resp, 0, PAGE_SIZE);
4183 cp_ring_id = le16_to_cpu(req->cmpl_ring);
4184 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
4185
4186 req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
4187 /* currently supports only one outstanding message */
4188 if (intr_process)
4189 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
4190
1dfddc41
MC
4191 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
4192 msg_len > BNXT_HWRM_MAX_REQ_LEN) {
e605db80 4193 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
1dfddc41
MC
4194 u16 max_msg_len;
4195
4196 /* Set boundary for maximum extended request length for short
4197 * cmd format. If passed up from device use the max supported
4198 * internal req length.
4199 */
4200 max_msg_len = bp->hwrm_max_ext_req_len;
e605db80
DK
4201
4202 memcpy(short_cmd_req, req, msg_len);
1dfddc41
MC
4203 if (msg_len < max_msg_len)
4204 memset(short_cmd_req + msg_len, 0,
4205 max_msg_len - msg_len);
e605db80
DK
4206
4207 short_input.req_type = req->req_type;
4208 short_input.signature =
4209 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
4210 short_input.size = cpu_to_le16(msg_len);
4211 short_input.req_addr =
4212 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
4213
4214 data = (u32 *)&short_input;
4215 msg_len = sizeof(short_input);
4216
4217 /* Sync memory write before updating doorbell */
4218 wmb();
4219
4220 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
4221 }
4222
c0c050c5 4223 /* Write request msg to hwrm channel */
2e9ee398 4224 __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
c0c050c5 4225
e605db80 4226 for (i = msg_len; i < max_req_len; i += 4)
2e9ee398 4227 writel(0, bp->bar0 + bar_offset + i);
d79979a1 4228
c0c050c5 4229 /* Ring channel doorbell */
2e9ee398 4230 writel(1, bp->bar0 + doorbell_offset);
c0c050c5 4231
5bedb529
MC
4232 if (!pci_is_enabled(bp->pdev))
4233 return 0;
4234
ff4fe81d
MC
4235 if (!timeout)
4236 timeout = DFLT_HWRM_CMD_TIMEOUT;
9751e8e7
AG
4237 /* convert timeout to usec */
4238 timeout *= 1000;
ff4fe81d 4239
c0c050c5 4240 i = 0;
9751e8e7
AG
4241 /* Short timeout for the first few iterations:
4242 * number of loops = number of loops for short timeout +
4243 * number of loops for standard timeout.
4244 */
4245 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
4246 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
4247 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
89455017
VD
4248 resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET);
4249
c0c050c5 4250 if (intr_process) {
fc718bb2
VD
4251 u16 seq_id = bp->hwrm_intr_seq_id;
4252
c0c050c5 4253 /* Wait until hwrm response cmpl interrupt is processed */
fc718bb2 4254 while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
a11fa2be 4255 i++ < tmo_count) {
9751e8e7
AG
4256 /* on first few passes, just barely sleep */
4257 if (i < HWRM_SHORT_TIMEOUT_COUNTER)
4258 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4259 HWRM_SHORT_MAX_TIMEOUT);
4260 else
4261 usleep_range(HWRM_MIN_TIMEOUT,
4262 HWRM_MAX_TIMEOUT);
c0c050c5
MC
4263 }
4264
fc718bb2 4265 if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
5bedb529
MC
4266 if (!silent)
4267 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
4268 le16_to_cpu(req->req_type));
a935cb7e 4269 return -EBUSY;
c0c050c5 4270 }
845adfe4
MC
4271 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
4272 HWRM_RESP_LEN_SFT;
89455017 4273 valid = resp_addr + len - 1;
c0c050c5 4274 } else {
cc559c1a
MC
4275 int j;
4276
c0c050c5 4277 /* Check if response len is updated */
a11fa2be 4278 for (i = 0; i < tmo_count; i++) {
c0c050c5
MC
4279 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
4280 HWRM_RESP_LEN_SFT;
4281 if (len)
4282 break;
9751e8e7 4283 /* on first few passes, just barely sleep */
67681d02 4284 if (i < HWRM_SHORT_TIMEOUT_COUNTER)
9751e8e7
AG
4285 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4286 HWRM_SHORT_MAX_TIMEOUT);
4287 else
4288 usleep_range(HWRM_MIN_TIMEOUT,
4289 HWRM_MAX_TIMEOUT);
c0c050c5
MC
4290 }
4291
a11fa2be 4292 if (i >= tmo_count) {
5bedb529
MC
4293 if (!silent)
4294 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
4295 HWRM_TOTAL_TIMEOUT(i),
4296 le16_to_cpu(req->req_type),
4297 le16_to_cpu(req->seq_id), len);
a935cb7e 4298 return -EBUSY;
c0c050c5
MC
4299 }
4300
845adfe4 4301 /* Last byte of resp contains valid bit */
89455017 4302 valid = resp_addr + len - 1;
cc559c1a 4303 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
845adfe4
MC
4304 /* make sure we read from updated DMA memory */
4305 dma_rmb();
4306 if (*valid)
c0c050c5 4307 break;
0000b81a 4308 usleep_range(1, 5);
c0c050c5
MC
4309 }
4310
cc559c1a 4311 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
5bedb529
MC
4312 if (!silent)
4313 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
4314 HWRM_TOTAL_TIMEOUT(i),
4315 le16_to_cpu(req->req_type),
4316 le16_to_cpu(req->seq_id), len,
4317 *valid);
a935cb7e 4318 return -EBUSY;
c0c050c5
MC
4319 }
4320 }
4321
845adfe4
MC
4322 /* Zero valid bit for compatibility. Valid bit in an older spec
4323 * may become a new field in a newer spec. We must make sure that
4324 * a new field not implemented by old spec will read zero.
4325 */
4326 *valid = 0;
c0c050c5 4327 rc = le16_to_cpu(resp->error_code);
fbfbc485 4328 if (rc && !silent)
c0c050c5
MC
4329 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
4330 le16_to_cpu(resp->req_type),
4331 le16_to_cpu(resp->seq_id), rc);
d4f1420d 4332 return bnxt_hwrm_to_stderr(rc);
fbfbc485
MC
4333}
4334
4335int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4336{
4337 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
c0c050c5
MC
4338}
4339
cc72f3b1
MC
4340int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4341 int timeout)
4342{
4343 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4344}
4345
c0c050c5
MC
4346int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4347{
4348 int rc;
4349
4350 mutex_lock(&bp->hwrm_cmd_lock);
4351 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
4352 mutex_unlock(&bp->hwrm_cmd_lock);
4353 return rc;
4354}
4355
90e20921
MC
4356int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4357 int timeout)
4358{
4359 int rc;
4360
4361 mutex_lock(&bp->hwrm_cmd_lock);
4362 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4363 mutex_unlock(&bp->hwrm_cmd_lock);
4364 return rc;
4365}
4366
a1653b13
MC
4367int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
4368 int bmap_size)
c0c050c5
MC
4369{
4370 struct hwrm_func_drv_rgtr_input req = {0};
25be8623
MC
4371 DECLARE_BITMAP(async_events_bmap, 256);
4372 u32 *events = (u32 *)async_events_bmap;
a1653b13 4373 int i;
c0c050c5
MC
4374
4375 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4376
4377 req.enables =
a1653b13 4378 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
c0c050c5 4379
25be8623 4380 memset(async_events_bmap, 0, sizeof(async_events_bmap));
7e914027
MC
4381 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4382 u16 event_id = bnxt_async_events_arr[i];
25be8623 4383
7e914027
MC
4384 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4385 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4386 continue;
4387 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4388 }
a1653b13
MC
4389 if (bmap && bmap_size) {
4390 for (i = 0; i < bmap_size; i++) {
4391 if (test_bit(i, bmap))
4392 __set_bit(i, async_events_bmap);
4393 }
4394 }
4395
25be8623
MC
4396 for (i = 0; i < 8; i++)
4397 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4398
a1653b13
MC
4399 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4400}
4401
4402static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
4403{
25e1acd6 4404 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
a1653b13 4405 struct hwrm_func_drv_rgtr_input req = {0};
25e1acd6 4406 int rc;
a1653b13
MC
4407
4408 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4409
4410 req.enables =
4411 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4412 FUNC_DRV_RGTR_REQ_ENABLES_VER);
4413
11f15ed3 4414 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
d4f52de0
MC
4415 req.flags = cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE);
4416 req.ver_maj_8b = DRV_VER_MAJ;
4417 req.ver_min_8b = DRV_VER_MIN;
4418 req.ver_upd_8b = DRV_VER_UPD;
4419 req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4420 req.ver_min = cpu_to_le16(DRV_VER_MIN);
4421 req.ver_upd = cpu_to_le16(DRV_VER_UPD);
c0c050c5
MC
4422
4423 if (BNXT_PF(bp)) {
9b0436c3 4424 u32 data[8];
a1653b13 4425 int i;
c0c050c5 4426
9b0436c3
MC
4427 memset(data, 0, sizeof(data));
4428 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4429 u16 cmd = bnxt_vf_req_snif[i];
4430 unsigned int bit, idx;
4431
4432 idx = cmd / 32;
4433 bit = cmd % 32;
4434 data[idx] |= 1 << bit;
4435 }
c0c050c5 4436
de68f5de
MC
4437 for (i = 0; i < 8; i++)
4438 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4439
c0c050c5
MC
4440 req.enables |=
4441 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4442 }
4443
abd43a13
VD
4444 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4445 req.flags |= cpu_to_le32(
4446 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4447
25e1acd6
MC
4448 mutex_lock(&bp->hwrm_cmd_lock);
4449 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
d4f1420d
MC
4450 if (!rc && (resp->flags &
4451 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)))
25e1acd6
MC
4452 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4453 mutex_unlock(&bp->hwrm_cmd_lock);
4454 return rc;
c0c050c5
MC
4455}
4456
be58a0da
JH
4457static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4458{
4459 struct hwrm_func_drv_unrgtr_input req = {0};
4460
4461 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4462 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4463}
4464
c0c050c5
MC
4465static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4466{
4467 u32 rc = 0;
4468 struct hwrm_tunnel_dst_port_free_input req = {0};
4469
4470 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4471 req.tunnel_type = tunnel_type;
4472
4473 switch (tunnel_type) {
4474 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4475 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
4476 break;
4477 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4478 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
4479 break;
4480 default:
4481 break;
4482 }
4483
4484 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4485 if (rc)
4486 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4487 rc);
4488 return rc;
4489}
4490
4491static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4492 u8 tunnel_type)
4493{
4494 u32 rc = 0;
4495 struct hwrm_tunnel_dst_port_alloc_input req = {0};
4496 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4497
4498 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4499
4500 req.tunnel_type = tunnel_type;
4501 req.tunnel_dst_port_val = port;
4502
4503 mutex_lock(&bp->hwrm_cmd_lock);
4504 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4505 if (rc) {
4506 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4507 rc);
4508 goto err_out;
4509 }
4510
57aac71b
CJ
4511 switch (tunnel_type) {
4512 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
c0c050c5 4513 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
57aac71b
CJ
4514 break;
4515 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
c0c050c5 4516 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
57aac71b
CJ
4517 break;
4518 default:
4519 break;
4520 }
4521
c0c050c5
MC
4522err_out:
4523 mutex_unlock(&bp->hwrm_cmd_lock);
4524 return rc;
4525}
4526
4527static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4528{
4529 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4530 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4531
4532 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
c193554e 4533 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
c0c050c5
MC
4534
4535 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4536 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4537 req.mask = cpu_to_le32(vnic->rx_mask);
4538 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4539}
4540
4541#ifdef CONFIG_RFS_ACCEL
4542static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4543 struct bnxt_ntuple_filter *fltr)
4544{
4545 struct hwrm_cfa_ntuple_filter_free_input req = {0};
4546
4547 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4548 req.ntuple_filter_id = fltr->filter_id;
4549 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4550}
4551
4552#define BNXT_NTP_FLTR_FLAGS \
4553 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4554 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
4555 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
4556 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
4557 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
4558 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
4559 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
4560 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
4561 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
4562 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
4563 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
4564 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
4565 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
c193554e 4566 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
c0c050c5 4567
61aad724
MC
4568#define BNXT_NTP_TUNNEL_FLTR_FLAG \
4569 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4570
c0c050c5
MC
4571static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4572 struct bnxt_ntuple_filter *fltr)
4573{
c0c050c5 4574 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
5c209fc8 4575 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
c0c050c5 4576 struct flow_keys *keys = &fltr->fkeys;
ac33906c
MC
4577 struct bnxt_vnic_info *vnic;
4578 u32 dst_ena = 0;
5c209fc8 4579 int rc = 0;
c0c050c5
MC
4580
4581 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
a54c4d74 4582 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
c0c050c5 4583
ac33906c
MC
4584 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX) {
4585 dst_ena = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
4586 req.rfs_ring_tbl_idx = cpu_to_le16(fltr->rxq);
4587 vnic = &bp->vnic_info[0];
4588 } else {
4589 vnic = &bp->vnic_info[fltr->rxq + 1];
4590 }
4591 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
4592 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS | dst_ena);
c0c050c5
MC
4593
4594 req.ethertype = htons(ETH_P_IP);
4595 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
c193554e 4596 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
c0c050c5
MC
4597 req.ip_protocol = keys->basic.ip_proto;
4598
dda0e746
MC
4599 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4600 int i;
4601
4602 req.ethertype = htons(ETH_P_IPV6);
4603 req.ip_addr_type =
4604 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4605 *(struct in6_addr *)&req.src_ipaddr[0] =
4606 keys->addrs.v6addrs.src;
4607 *(struct in6_addr *)&req.dst_ipaddr[0] =
4608 keys->addrs.v6addrs.dst;
4609 for (i = 0; i < 4; i++) {
4610 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4611 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4612 }
4613 } else {
4614 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4615 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4616 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4617 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4618 }
61aad724
MC
4619 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4620 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4621 req.tunnel_type =
4622 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4623 }
c0c050c5
MC
4624
4625 req.src_port = keys->ports.src;
4626 req.src_port_mask = cpu_to_be16(0xffff);
4627 req.dst_port = keys->ports.dst;
4628 req.dst_port_mask = cpu_to_be16(0xffff);
4629
c0c050c5
MC
4630 mutex_lock(&bp->hwrm_cmd_lock);
4631 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5c209fc8
VD
4632 if (!rc) {
4633 resp = bnxt_get_hwrm_resp_addr(bp, &req);
c0c050c5 4634 fltr->filter_id = resp->ntuple_filter_id;
5c209fc8 4635 }
c0c050c5
MC
4636 mutex_unlock(&bp->hwrm_cmd_lock);
4637 return rc;
4638}
4639#endif
4640
4641static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4642 u8 *mac_addr)
4643{
4644 u32 rc = 0;
4645 struct hwrm_cfa_l2_filter_alloc_input req = {0};
4646 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4647
4648 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
dc52c6c7
PS
4649 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4650 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4651 req.flags |=
4652 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
c193554e 4653 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
c0c050c5
MC
4654 req.enables =
4655 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
c193554e 4656 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
c0c050c5
MC
4657 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4658 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
4659 req.l2_addr_mask[0] = 0xff;
4660 req.l2_addr_mask[1] = 0xff;
4661 req.l2_addr_mask[2] = 0xff;
4662 req.l2_addr_mask[3] = 0xff;
4663 req.l2_addr_mask[4] = 0xff;
4664 req.l2_addr_mask[5] = 0xff;
4665
4666 mutex_lock(&bp->hwrm_cmd_lock);
4667 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4668 if (!rc)
4669 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4670 resp->l2_filter_id;
4671 mutex_unlock(&bp->hwrm_cmd_lock);
4672 return rc;
4673}
4674
4675static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4676{
4677 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4678 int rc = 0;
4679
4680 /* Any associated ntuple filters will also be cleared by firmware. */
4681 mutex_lock(&bp->hwrm_cmd_lock);
4682 for (i = 0; i < num_of_vnics; i++) {
4683 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4684
4685 for (j = 0; j < vnic->uc_filter_count; j++) {
4686 struct hwrm_cfa_l2_filter_free_input req = {0};
4687
4688 bnxt_hwrm_cmd_hdr_init(bp, &req,
4689 HWRM_CFA_L2_FILTER_FREE, -1, -1);
4690
4691 req.l2_filter_id = vnic->fw_l2_filter_id[j];
4692
4693 rc = _hwrm_send_message(bp, &req, sizeof(req),
4694 HWRM_CMD_TIMEOUT);
4695 }
4696 vnic->uc_filter_count = 0;
4697 }
4698 mutex_unlock(&bp->hwrm_cmd_lock);
4699
4700 return rc;
4701}
4702
4703static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4704{
4705 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
79632e9b 4706 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
c0c050c5
MC
4707 struct hwrm_vnic_tpa_cfg_input req = {0};
4708
3c4fe80b
MC
4709 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4710 return 0;
4711
c0c050c5
MC
4712 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
4713
4714 if (tpa_flags) {
4715 u16 mss = bp->dev->mtu - 40;
4716 u32 nsegs, n, segs = 0, flags;
4717
4718 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4719 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4720 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4721 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4722 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4723 if (tpa_flags & BNXT_FLAG_GRO)
4724 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4725
4726 req.flags = cpu_to_le32(flags);
4727
4728 req.enables =
4729 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
c193554e
MC
4730 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4731 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
c0c050c5
MC
4732
4733 /* Number of segs are log2 units, and first packet is not
4734 * included as part of this units.
4735 */
2839f28b
MC
4736 if (mss <= BNXT_RX_PAGE_SIZE) {
4737 n = BNXT_RX_PAGE_SIZE / mss;
c0c050c5
MC
4738 nsegs = (MAX_SKB_FRAGS - 1) * n;
4739 } else {
2839f28b
MC
4740 n = mss / BNXT_RX_PAGE_SIZE;
4741 if (mss & (BNXT_RX_PAGE_SIZE - 1))
c0c050c5
MC
4742 n++;
4743 nsegs = (MAX_SKB_FRAGS - n) / n;
4744 }
4745
79632e9b
MC
4746 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4747 segs = MAX_TPA_SEGS_P5;
4748 max_aggs = bp->max_tpa;
4749 } else {
4750 segs = ilog2(nsegs);
4751 }
c0c050c5 4752 req.max_agg_segs = cpu_to_le16(segs);
79632e9b 4753 req.max_aggs = cpu_to_le16(max_aggs);
c193554e
MC
4754
4755 req.min_agg_len = cpu_to_le32(512);
c0c050c5
MC
4756 }
4757 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4758
4759 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4760}
4761
2c61d211
MC
4762static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4763{
4764 struct bnxt_ring_grp_info *grp_info;
4765
4766 grp_info = &bp->grp_info[ring->grp_idx];
4767 return grp_info->cp_fw_ring_id;
4768}
4769
4770static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
4771{
4772 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4773 struct bnxt_napi *bnapi = rxr->bnapi;
4774 struct bnxt_cp_ring_info *cpr;
4775
4776 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
4777 return cpr->cp_ring_struct.fw_ring_id;
4778 } else {
4779 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
4780 }
4781}
4782
4783static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
4784{
4785 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4786 struct bnxt_napi *bnapi = txr->bnapi;
4787 struct bnxt_cp_ring_info *cpr;
4788
4789 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
4790 return cpr->cp_ring_struct.fw_ring_id;
4791 } else {
4792 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
4793 }
4794}
4795
c0c050c5
MC
4796static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
4797{
4798 u32 i, j, max_rings;
4799 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4800 struct hwrm_vnic_rss_cfg_input req = {0};
4801
7b3af4f7
MC
4802 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
4803 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
c0c050c5
MC
4804 return 0;
4805
4806 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4807 if (set_rss) {
87da7f79 4808 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
50f011b6 4809 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
dc52c6c7
PS
4810 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
4811 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4812 max_rings = bp->rx_nr_rings - 1;
4813 else
4814 max_rings = bp->rx_nr_rings;
4815 } else {
c0c050c5 4816 max_rings = 1;
dc52c6c7 4817 }
c0c050c5
MC
4818
4819 /* Fill the RSS indirection table with ring group ids */
4820 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
4821 if (j == max_rings)
4822 j = 0;
4823 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
4824 }
4825
4826 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4827 req.hash_key_tbl_addr =
4828 cpu_to_le64(vnic->rss_hash_key_dma_addr);
4829 }
94ce9caa 4830 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
c0c050c5
MC
4831 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4832}
4833
7b3af4f7
MC
4834static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
4835{
4836 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4837 u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings;
4838 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4839 struct hwrm_vnic_rss_cfg_input req = {0};
4840
4841 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4842 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4843 if (!set_rss) {
4844 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4845 return 0;
4846 }
4847 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4848 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4849 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4850 req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
4851 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
4852 for (i = 0, k = 0; i < nr_ctxs; i++) {
4853 __le16 *ring_tbl = vnic->rss_table;
4854 int rc;
4855
4856 req.ring_table_pair_index = i;
4857 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
4858 for (j = 0; j < 64; j++) {
4859 u16 ring_id;
4860
4861 ring_id = rxr->rx_ring_struct.fw_ring_id;
4862 *ring_tbl++ = cpu_to_le16(ring_id);
4863 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
4864 *ring_tbl++ = cpu_to_le16(ring_id);
4865 rxr++;
4866 k++;
4867 if (k == max_rings) {
4868 k = 0;
4869 rxr = &bp->rx_ring[0];
4870 }
4871 }
4872 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4873 if (rc)
d4f1420d 4874 return rc;
7b3af4f7
MC
4875 }
4876 return 0;
4877}
4878
c0c050c5
MC
4879static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
4880{
4881 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4882 struct hwrm_vnic_plcmodes_cfg_input req = {0};
4883
4884 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
4885 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
4886 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
4887 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
4888 req.enables =
4889 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
4890 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
4891 /* thresholds not implemented in firmware yet */
4892 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
4893 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
4894 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4895 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4896}
4897
94ce9caa
PS
4898static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
4899 u16 ctx_idx)
c0c050c5
MC
4900{
4901 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
4902
4903 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
4904 req.rss_cos_lb_ctx_id =
94ce9caa 4905 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
c0c050c5
MC
4906
4907 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
94ce9caa 4908 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
c0c050c5
MC
4909}
4910
4911static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
4912{
94ce9caa 4913 int i, j;
c0c050c5
MC
4914
4915 for (i = 0; i < bp->nr_vnics; i++) {
4916 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4917
94ce9caa
PS
4918 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
4919 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
4920 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
4921 }
c0c050c5
MC
4922 }
4923 bp->rsscos_nr_ctxs = 0;
4924}
4925
94ce9caa 4926static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
c0c050c5
MC
4927{
4928 int rc;
4929 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
4930 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
4931 bp->hwrm_cmd_resp_addr;
4932
4933 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
4934 -1);
4935
4936 mutex_lock(&bp->hwrm_cmd_lock);
4937 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4938 if (!rc)
94ce9caa 4939 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
c0c050c5
MC
4940 le16_to_cpu(resp->rss_cos_lb_ctx_id);
4941 mutex_unlock(&bp->hwrm_cmd_lock);
4942
4943 return rc;
4944}
4945
abe93ad2
MC
4946static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
4947{
4948 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
4949 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
4950 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
4951}
4952
a588e458 4953int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
c0c050c5 4954{
b81a90d3 4955 unsigned int ring = 0, grp_idx;
c0c050c5
MC
4956 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4957 struct hwrm_vnic_cfg_input req = {0};
cf6645f8 4958 u16 def_vlan = 0;
c0c050c5
MC
4959
4960 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
dc52c6c7 4961
7b3af4f7
MC
4962 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4963 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4964
4965 req.default_rx_ring_id =
4966 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
4967 req.default_cmpl_ring_id =
4968 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
4969 req.enables =
4970 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
4971 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
4972 goto vnic_mru;
4973 }
dc52c6c7 4974 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
c0c050c5 4975 /* Only RSS support for now TBD: COS & LB */
dc52c6c7
PS
4976 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
4977 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
4978 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
4979 VNIC_CFG_REQ_ENABLES_MRU);
ae10ae74
MC
4980 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
4981 req.rss_rule =
4982 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
4983 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
4984 VNIC_CFG_REQ_ENABLES_MRU);
4985 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
dc52c6c7
PS
4986 } else {
4987 req.rss_rule = cpu_to_le16(0xffff);
4988 }
94ce9caa 4989
dc52c6c7
PS
4990 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
4991 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
94ce9caa
PS
4992 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
4993 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
4994 } else {
4995 req.cos_rule = cpu_to_le16(0xffff);
4996 }
4997
c0c050c5 4998 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
b81a90d3 4999 ring = 0;
c0c050c5 5000 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
b81a90d3 5001 ring = vnic_id - 1;
76595193
PS
5002 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5003 ring = bp->rx_nr_rings - 1;
c0c050c5 5004
b81a90d3 5005 grp_idx = bp->rx_ring[ring].bnapi->index;
c0c050c5 5006 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
c0c050c5 5007 req.lb_rule = cpu_to_le16(0xffff);
7b3af4f7 5008vnic_mru:
c0c050c5
MC
5009 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
5010 VLAN_HLEN);
5011
7b3af4f7 5012 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
cf6645f8
MC
5013#ifdef CONFIG_BNXT_SRIOV
5014 if (BNXT_VF(bp))
5015 def_vlan = bp->vf.vlan;
5016#endif
5017 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
c0c050c5 5018 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
a588e458 5019 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
abe93ad2 5020 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
c0c050c5
MC
5021
5022 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5023}
5024
5025static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5026{
5027 u32 rc = 0;
5028
5029 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5030 struct hwrm_vnic_free_input req = {0};
5031
5032 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
5033 req.vnic_id =
5034 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5035
5036 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5037 if (rc)
5038 return rc;
5039 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5040 }
5041 return rc;
5042}
5043
5044static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5045{
5046 u16 i;
5047
5048 for (i = 0; i < bp->nr_vnics; i++)
5049 bnxt_hwrm_vnic_free_one(bp, i);
5050}
5051
b81a90d3
MC
5052static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5053 unsigned int start_rx_ring_idx,
5054 unsigned int nr_rings)
c0c050c5 5055{
b81a90d3
MC
5056 int rc = 0;
5057 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
c0c050c5
MC
5058 struct hwrm_vnic_alloc_input req = {0};
5059 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
44c6f72a
MC
5060 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5061
5062 if (bp->flags & BNXT_FLAG_CHIP_P5)
5063 goto vnic_no_ring_grps;
c0c050c5
MC
5064
5065 /* map ring groups to this vnic */
b81a90d3
MC
5066 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5067 grp_idx = bp->rx_ring[i].bnapi->index;
5068 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
c0c050c5 5069 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
b81a90d3 5070 j, nr_rings);
c0c050c5
MC
5071 break;
5072 }
44c6f72a 5073 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
c0c050c5
MC
5074 }
5075
44c6f72a
MC
5076vnic_no_ring_grps:
5077 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5078 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
c0c050c5
MC
5079 if (vnic_id == 0)
5080 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5081
5082 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
5083
5084 mutex_lock(&bp->hwrm_cmd_lock);
5085 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5086 if (!rc)
44c6f72a 5087 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
c0c050c5
MC
5088 mutex_unlock(&bp->hwrm_cmd_lock);
5089 return rc;
5090}
5091
8fdefd63
MC
5092static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5093{
5094 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5095 struct hwrm_vnic_qcaps_input req = {0};
5096 int rc;
5097
fbbdbc64 5098 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
ba642ab7 5099 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
8fdefd63
MC
5100 if (bp->hwrm_spec_code < 0x10600)
5101 return 0;
5102
5103 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
5104 mutex_lock(&bp->hwrm_cmd_lock);
5105 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5106 if (!rc) {
abe93ad2
MC
5107 u32 flags = le32_to_cpu(resp->flags);
5108
41e8d798
MC
5109 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5110 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
8fdefd63 5111 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
abe93ad2
MC
5112 if (flags &
5113 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5114 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
79632e9b 5115 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
4e748506
MC
5116 if (bp->max_tpa_v2)
5117 bp->hw_ring_stats_size =
5118 sizeof(struct ctx_hw_stats_ext);
8fdefd63
MC
5119 }
5120 mutex_unlock(&bp->hwrm_cmd_lock);
5121 return rc;
5122}
5123
c0c050c5
MC
5124static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5125{
5126 u16 i;
5127 u32 rc = 0;
5128
44c6f72a
MC
5129 if (bp->flags & BNXT_FLAG_CHIP_P5)
5130 return 0;
5131
c0c050c5
MC
5132 mutex_lock(&bp->hwrm_cmd_lock);
5133 for (i = 0; i < bp->rx_nr_rings; i++) {
5134 struct hwrm_ring_grp_alloc_input req = {0};
5135 struct hwrm_ring_grp_alloc_output *resp =
5136 bp->hwrm_cmd_resp_addr;
b81a90d3 5137 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
c0c050c5
MC
5138
5139 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
5140
b81a90d3
MC
5141 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5142 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5143 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5144 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
c0c050c5
MC
5145
5146 rc = _hwrm_send_message(bp, &req, sizeof(req),
5147 HWRM_CMD_TIMEOUT);
5148 if (rc)
5149 break;
5150
b81a90d3
MC
5151 bp->grp_info[grp_idx].fw_grp_id =
5152 le32_to_cpu(resp->ring_group_id);
c0c050c5
MC
5153 }
5154 mutex_unlock(&bp->hwrm_cmd_lock);
5155 return rc;
5156}
5157
5158static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5159{
5160 u16 i;
5161 u32 rc = 0;
5162 struct hwrm_ring_grp_free_input req = {0};
5163
44c6f72a 5164 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
c0c050c5
MC
5165 return 0;
5166
5167 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
5168
5169 mutex_lock(&bp->hwrm_cmd_lock);
5170 for (i = 0; i < bp->cp_nr_rings; i++) {
5171 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5172 continue;
5173 req.ring_group_id =
5174 cpu_to_le32(bp->grp_info[i].fw_grp_id);
5175
5176 rc = _hwrm_send_message(bp, &req, sizeof(req),
5177 HWRM_CMD_TIMEOUT);
5178 if (rc)
5179 break;
5180 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5181 }
5182 mutex_unlock(&bp->hwrm_cmd_lock);
5183 return rc;
5184}
5185
5186static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5187 struct bnxt_ring_struct *ring,
9899bb59 5188 u32 ring_type, u32 map_index)
c0c050c5
MC
5189{
5190 int rc = 0, err = 0;
5191 struct hwrm_ring_alloc_input req = {0};
5192 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6fe19886 5193 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
9899bb59 5194 struct bnxt_ring_grp_info *grp_info;
c0c050c5
MC
5195 u16 ring_id;
5196
5197 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
5198
5199 req.enables = 0;
6fe19886
MC
5200 if (rmem->nr_pages > 1) {
5201 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
c0c050c5
MC
5202 /* Page size is in log2 units */
5203 req.page_size = BNXT_PAGE_SHIFT;
5204 req.page_tbl_depth = 1;
5205 } else {
6fe19886 5206 req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
c0c050c5
MC
5207 }
5208 req.fbo = 0;
5209 /* Association of ring index with doorbell index and MSIX number */
5210 req.logical_id = cpu_to_le16(map_index);
5211
5212 switch (ring_type) {
2c61d211
MC
5213 case HWRM_RING_ALLOC_TX: {
5214 struct bnxt_tx_ring_info *txr;
5215
5216 txr = container_of(ring, struct bnxt_tx_ring_info,
5217 tx_ring_struct);
c0c050c5
MC
5218 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5219 /* Association of transmit ring with completion ring */
9899bb59 5220 grp_info = &bp->grp_info[ring->grp_idx];
2c61d211 5221 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
c0c050c5 5222 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
9899bb59 5223 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
c0c050c5
MC
5224 req.queue_id = cpu_to_le16(ring->queue_id);
5225 break;
2c61d211 5226 }
c0c050c5
MC
5227 case HWRM_RING_ALLOC_RX:
5228 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5229 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
23aefdd7
MC
5230 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5231 u16 flags = 0;
5232
5233 /* Association of rx ring with stats context */
5234 grp_info = &bp->grp_info[ring->grp_idx];
5235 req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5236 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5237 req.enables |= cpu_to_le32(
5238 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5239 if (NET_IP_ALIGN == 2)
5240 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5241 req.flags = cpu_to_le16(flags);
5242 }
c0c050c5
MC
5243 break;
5244 case HWRM_RING_ALLOC_AGG:
23aefdd7
MC
5245 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5246 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5247 /* Association of agg ring with rx ring */
5248 grp_info = &bp->grp_info[ring->grp_idx];
5249 req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5250 req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5251 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5252 req.enables |= cpu_to_le32(
5253 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5254 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5255 } else {
5256 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5257 }
c0c050c5
MC
5258 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5259 break;
5260 case HWRM_RING_ALLOC_CMPL:
bac9a7e0 5261 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
c0c050c5 5262 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
23aefdd7
MC
5263 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5264 /* Association of cp ring with nq */
5265 grp_info = &bp->grp_info[map_index];
5266 req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5267 req.cq_handle = cpu_to_le64(ring->handle);
5268 req.enables |= cpu_to_le32(
5269 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5270 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5271 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5272 }
5273 break;
5274 case HWRM_RING_ALLOC_NQ:
5275 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5276 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
c0c050c5
MC
5277 if (bp->flags & BNXT_FLAG_USING_MSIX)
5278 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5279 break;
5280 default:
5281 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5282 ring_type);
5283 return -1;
5284 }
5285
5286 mutex_lock(&bp->hwrm_cmd_lock);
5287 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5288 err = le16_to_cpu(resp->error_code);
5289 ring_id = le16_to_cpu(resp->ring_id);
5290 mutex_unlock(&bp->hwrm_cmd_lock);
5291
5292 if (rc || err) {
2727c888
MC
5293 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5294 ring_type, rc, err);
5295 return -EIO;
c0c050c5
MC
5296 }
5297 ring->fw_ring_id = ring_id;
5298 return rc;
5299}
5300
486b5c22
MC
5301static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5302{
5303 int rc;
5304
5305 if (BNXT_PF(bp)) {
5306 struct hwrm_func_cfg_input req = {0};
5307
5308 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5309 req.fid = cpu_to_le16(0xffff);
5310 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5311 req.async_event_cr = cpu_to_le16(idx);
5312 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5313 } else {
5314 struct hwrm_func_vf_cfg_input req = {0};
5315
5316 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
5317 req.enables =
5318 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5319 req.async_event_cr = cpu_to_le16(idx);
5320 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5321 }
5322 return rc;
5323}
5324
697197e5
MC
5325static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5326 u32 map_idx, u32 xid)
5327{
5328 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5329 if (BNXT_PF(bp))
5330 db->doorbell = bp->bar1 + 0x10000;
5331 else
5332 db->doorbell = bp->bar1 + 0x4000;
5333 switch (ring_type) {
5334 case HWRM_RING_ALLOC_TX:
5335 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5336 break;
5337 case HWRM_RING_ALLOC_RX:
5338 case HWRM_RING_ALLOC_AGG:
5339 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5340 break;
5341 case HWRM_RING_ALLOC_CMPL:
5342 db->db_key64 = DBR_PATH_L2;
5343 break;
5344 case HWRM_RING_ALLOC_NQ:
5345 db->db_key64 = DBR_PATH_L2;
5346 break;
5347 }
5348 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5349 } else {
5350 db->doorbell = bp->bar1 + map_idx * 0x80;
5351 switch (ring_type) {
5352 case HWRM_RING_ALLOC_TX:
5353 db->db_key32 = DB_KEY_TX;
5354 break;
5355 case HWRM_RING_ALLOC_RX:
5356 case HWRM_RING_ALLOC_AGG:
5357 db->db_key32 = DB_KEY_RX;
5358 break;
5359 case HWRM_RING_ALLOC_CMPL:
5360 db->db_key32 = DB_KEY_CP;
5361 break;
5362 }
5363 }
5364}
5365
c0c050c5
MC
5366static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5367{
e8f267b0 5368 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
c0c050c5 5369 int i, rc = 0;
697197e5 5370 u32 type;
c0c050c5 5371
23aefdd7
MC
5372 if (bp->flags & BNXT_FLAG_CHIP_P5)
5373 type = HWRM_RING_ALLOC_NQ;
5374 else
5375 type = HWRM_RING_ALLOC_CMPL;
edd0c2cc
MC
5376 for (i = 0; i < bp->cp_nr_rings; i++) {
5377 struct bnxt_napi *bnapi = bp->bnapi[i];
5378 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5379 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
9899bb59 5380 u32 map_idx = ring->map_idx;
5e66e35a 5381 unsigned int vector;
c0c050c5 5382
5e66e35a
MC
5383 vector = bp->irq_tbl[map_idx].vector;
5384 disable_irq_nosync(vector);
697197e5 5385 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5e66e35a
MC
5386 if (rc) {
5387 enable_irq(vector);
edd0c2cc 5388 goto err_out;
5e66e35a 5389 }
697197e5
MC
5390 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5391 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5e66e35a 5392 enable_irq(vector);
edd0c2cc 5393 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
486b5c22
MC
5394
5395 if (!i) {
5396 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5397 if (rc)
5398 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5399 }
c0c050c5
MC
5400 }
5401
697197e5 5402 type = HWRM_RING_ALLOC_TX;
edd0c2cc 5403 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 5404 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3e08b184
MC
5405 struct bnxt_ring_struct *ring;
5406 u32 map_idx;
c0c050c5 5407
3e08b184
MC
5408 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5409 struct bnxt_napi *bnapi = txr->bnapi;
5410 struct bnxt_cp_ring_info *cpr, *cpr2;
5411 u32 type2 = HWRM_RING_ALLOC_CMPL;
5412
5413 cpr = &bnapi->cp_ring;
5414 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5415 ring = &cpr2->cp_ring_struct;
5416 ring->handle = BNXT_TX_HDL;
5417 map_idx = bnapi->index;
5418 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5419 if (rc)
5420 goto err_out;
5421 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5422 ring->fw_ring_id);
5423 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5424 }
5425 ring = &txr->tx_ring_struct;
5426 map_idx = i;
697197e5 5427 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
edd0c2cc
MC
5428 if (rc)
5429 goto err_out;
697197e5 5430 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
c0c050c5
MC
5431 }
5432
697197e5 5433 type = HWRM_RING_ALLOC_RX;
edd0c2cc 5434 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5435 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 5436 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
3e08b184
MC
5437 struct bnxt_napi *bnapi = rxr->bnapi;
5438 u32 map_idx = bnapi->index;
c0c050c5 5439
697197e5 5440 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
edd0c2cc
MC
5441 if (rc)
5442 goto err_out;
697197e5 5443 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
e8f267b0
MC
5444 /* If we have agg rings, post agg buffers first. */
5445 if (!agg_rings)
5446 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
b81a90d3 5447 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
3e08b184
MC
5448 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5449 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5450 u32 type2 = HWRM_RING_ALLOC_CMPL;
5451 struct bnxt_cp_ring_info *cpr2;
5452
5453 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5454 ring = &cpr2->cp_ring_struct;
5455 ring->handle = BNXT_RX_HDL;
5456 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5457 if (rc)
5458 goto err_out;
5459 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5460 ring->fw_ring_id);
5461 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5462 }
c0c050c5
MC
5463 }
5464
e8f267b0 5465 if (agg_rings) {
697197e5 5466 type = HWRM_RING_ALLOC_AGG;
c0c050c5 5467 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5468 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
5469 struct bnxt_ring_struct *ring =
5470 &rxr->rx_agg_ring_struct;
9899bb59 5471 u32 grp_idx = ring->grp_idx;
b81a90d3 5472 u32 map_idx = grp_idx + bp->rx_nr_rings;
c0c050c5 5473
697197e5 5474 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
c0c050c5
MC
5475 if (rc)
5476 goto err_out;
5477
697197e5
MC
5478 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5479 ring->fw_ring_id);
5480 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
e8f267b0 5481 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
b81a90d3 5482 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
c0c050c5
MC
5483 }
5484 }
5485err_out:
5486 return rc;
5487}
5488
5489static int hwrm_ring_free_send_msg(struct bnxt *bp,
5490 struct bnxt_ring_struct *ring,
5491 u32 ring_type, int cmpl_ring_id)
5492{
5493 int rc;
5494 struct hwrm_ring_free_input req = {0};
5495 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5496 u16 error_code;
5497
74608fc9 5498 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
c0c050c5
MC
5499 req.ring_type = ring_type;
5500 req.ring_id = cpu_to_le16(ring->fw_ring_id);
5501
5502 mutex_lock(&bp->hwrm_cmd_lock);
5503 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5504 error_code = le16_to_cpu(resp->error_code);
5505 mutex_unlock(&bp->hwrm_cmd_lock);
5506
5507 if (rc || error_code) {
2727c888
MC
5508 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5509 ring_type, rc, error_code);
5510 return -EIO;
c0c050c5
MC
5511 }
5512 return 0;
5513}
5514
edd0c2cc 5515static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
c0c050c5 5516{
23aefdd7 5517 u32 type;
edd0c2cc 5518 int i;
c0c050c5
MC
5519
5520 if (!bp->bnapi)
edd0c2cc 5521 return;
c0c050c5 5522
edd0c2cc 5523 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 5524 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
edd0c2cc 5525 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
edd0c2cc
MC
5526
5527 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
5528 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5529
edd0c2cc
MC
5530 hwrm_ring_free_send_msg(bp, ring,
5531 RING_FREE_REQ_RING_TYPE_TX,
5532 close_path ? cmpl_ring_id :
5533 INVALID_HW_RING_ID);
5534 ring->fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
5535 }
5536 }
5537
edd0c2cc 5538 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5539 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 5540 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
b81a90d3 5541 u32 grp_idx = rxr->bnapi->index;
edd0c2cc
MC
5542
5543 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
5544 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5545
edd0c2cc
MC
5546 hwrm_ring_free_send_msg(bp, ring,
5547 RING_FREE_REQ_RING_TYPE_RX,
5548 close_path ? cmpl_ring_id :
5549 INVALID_HW_RING_ID);
5550 ring->fw_ring_id = INVALID_HW_RING_ID;
b81a90d3
MC
5551 bp->grp_info[grp_idx].rx_fw_ring_id =
5552 INVALID_HW_RING_ID;
c0c050c5
MC
5553 }
5554 }
5555
23aefdd7
MC
5556 if (bp->flags & BNXT_FLAG_CHIP_P5)
5557 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5558 else
5559 type = RING_FREE_REQ_RING_TYPE_RX;
edd0c2cc 5560 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5561 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 5562 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
b81a90d3 5563 u32 grp_idx = rxr->bnapi->index;
edd0c2cc
MC
5564
5565 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
5566 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5567
23aefdd7 5568 hwrm_ring_free_send_msg(bp, ring, type,
edd0c2cc
MC
5569 close_path ? cmpl_ring_id :
5570 INVALID_HW_RING_ID);
5571 ring->fw_ring_id = INVALID_HW_RING_ID;
b81a90d3
MC
5572 bp->grp_info[grp_idx].agg_fw_ring_id =
5573 INVALID_HW_RING_ID;
c0c050c5
MC
5574 }
5575 }
5576
9d8bc097
MC
5577 /* The completion rings are about to be freed. After that the
5578 * IRQ doorbell will not work anymore. So we need to disable
5579 * IRQ here.
5580 */
5581 bnxt_disable_int_sync(bp);
5582
23aefdd7
MC
5583 if (bp->flags & BNXT_FLAG_CHIP_P5)
5584 type = RING_FREE_REQ_RING_TYPE_NQ;
5585 else
5586 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
edd0c2cc
MC
5587 for (i = 0; i < bp->cp_nr_rings; i++) {
5588 struct bnxt_napi *bnapi = bp->bnapi[i];
5589 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3e08b184
MC
5590 struct bnxt_ring_struct *ring;
5591 int j;
edd0c2cc 5592
3e08b184
MC
5593 for (j = 0; j < 2; j++) {
5594 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5595
5596 if (cpr2) {
5597 ring = &cpr2->cp_ring_struct;
5598 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5599 continue;
5600 hwrm_ring_free_send_msg(bp, ring,
5601 RING_FREE_REQ_RING_TYPE_L2_CMPL,
5602 INVALID_HW_RING_ID);
5603 ring->fw_ring_id = INVALID_HW_RING_ID;
5604 }
5605 }
5606 ring = &cpr->cp_ring_struct;
edd0c2cc 5607 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
23aefdd7 5608 hwrm_ring_free_send_msg(bp, ring, type,
edd0c2cc
MC
5609 INVALID_HW_RING_ID);
5610 ring->fw_ring_id = INVALID_HW_RING_ID;
5611 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
5612 }
5613 }
c0c050c5
MC
5614}
5615
41e8d798
MC
5616static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5617 bool shared);
5618
674f50a5
MC
5619static int bnxt_hwrm_get_rings(struct bnxt *bp)
5620{
5621 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5622 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5623 struct hwrm_func_qcfg_input req = {0};
5624 int rc;
5625
5626 if (bp->hwrm_spec_code < 0x10601)
5627 return 0;
5628
5629 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5630 req.fid = cpu_to_le16(0xffff);
5631 mutex_lock(&bp->hwrm_cmd_lock);
5632 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5633 if (rc) {
5634 mutex_unlock(&bp->hwrm_cmd_lock);
d4f1420d 5635 return rc;
674f50a5
MC
5636 }
5637
5638 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
f1ca94de 5639 if (BNXT_NEW_RM(bp)) {
674f50a5
MC
5640 u16 cp, stats;
5641
5642 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5643 hw_resc->resv_hw_ring_grps =
5644 le32_to_cpu(resp->alloc_hw_ring_grps);
5645 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
5646 cp = le16_to_cpu(resp->alloc_cmpl_rings);
5647 stats = le16_to_cpu(resp->alloc_stat_ctx);
75720e63 5648 hw_resc->resv_irqs = cp;
41e8d798
MC
5649 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5650 int rx = hw_resc->resv_rx_rings;
5651 int tx = hw_resc->resv_tx_rings;
5652
5653 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5654 rx >>= 1;
5655 if (cp < (rx + tx)) {
5656 bnxt_trim_rings(bp, &rx, &tx, cp, false);
5657 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5658 rx <<= 1;
5659 hw_resc->resv_rx_rings = rx;
5660 hw_resc->resv_tx_rings = tx;
5661 }
75720e63 5662 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
41e8d798
MC
5663 hw_resc->resv_hw_ring_grps = rx;
5664 }
674f50a5 5665 hw_resc->resv_cp_rings = cp;
780baad4 5666 hw_resc->resv_stat_ctxs = stats;
674f50a5
MC
5667 }
5668 mutex_unlock(&bp->hwrm_cmd_lock);
5669 return 0;
5670}
5671
391be5c2
MC
5672/* Caller must hold bp->hwrm_cmd_lock */
5673int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
5674{
5675 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5676 struct hwrm_func_qcfg_input req = {0};
5677 int rc;
5678
5679 if (bp->hwrm_spec_code < 0x10601)
5680 return 0;
5681
5682 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5683 req.fid = cpu_to_le16(fid);
5684 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5685 if (!rc)
5686 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5687
5688 return rc;
5689}
5690
41e8d798
MC
5691static bool bnxt_rfs_supported(struct bnxt *bp);
5692
4ed50ef4
MC
5693static void
5694__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
5695 int tx_rings, int rx_rings, int ring_grps,
780baad4 5696 int cp_rings, int stats, int vnics)
391be5c2 5697{
674f50a5 5698 u32 enables = 0;
391be5c2 5699
4ed50ef4
MC
5700 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
5701 req->fid = cpu_to_le16(0xffff);
674f50a5 5702 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
4ed50ef4 5703 req->num_tx_rings = cpu_to_le16(tx_rings);
f1ca94de 5704 if (BNXT_NEW_RM(bp)) {
674f50a5 5705 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
3f93cd3f 5706 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
41e8d798
MC
5707 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5708 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
5709 enables |= tx_rings + ring_grps ?
3f93cd3f 5710 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
5711 enables |= rx_rings ?
5712 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5713 } else {
5714 enables |= cp_rings ?
3f93cd3f 5715 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
5716 enables |= ring_grps ?
5717 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
5718 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5719 }
dbe80d44 5720 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
674f50a5 5721
4ed50ef4 5722 req->num_rx_rings = cpu_to_le16(rx_rings);
41e8d798
MC
5723 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5724 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5725 req->num_msix = cpu_to_le16(cp_rings);
5726 req->num_rsscos_ctxs =
5727 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5728 } else {
5729 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5730 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5731 req->num_rsscos_ctxs = cpu_to_le16(1);
5732 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
5733 bnxt_rfs_supported(bp))
5734 req->num_rsscos_ctxs =
5735 cpu_to_le16(ring_grps + 1);
5736 }
780baad4 5737 req->num_stat_ctxs = cpu_to_le16(stats);
4ed50ef4 5738 req->num_vnics = cpu_to_le16(vnics);
674f50a5 5739 }
4ed50ef4
MC
5740 req->enables = cpu_to_le32(enables);
5741}
5742
5743static void
5744__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
5745 struct hwrm_func_vf_cfg_input *req, int tx_rings,
5746 int rx_rings, int ring_grps, int cp_rings,
780baad4 5747 int stats, int vnics)
4ed50ef4
MC
5748{
5749 u32 enables = 0;
5750
5751 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
5752 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
41e8d798
MC
5753 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
5754 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
3f93cd3f 5755 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
41e8d798
MC
5756 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5757 enables |= tx_rings + ring_grps ?
3f93cd3f 5758 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
5759 } else {
5760 enables |= cp_rings ?
3f93cd3f 5761 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
5762 enables |= ring_grps ?
5763 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
5764 }
4ed50ef4 5765 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
41e8d798 5766 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
4ed50ef4 5767
41e8d798 5768 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
4ed50ef4
MC
5769 req->num_tx_rings = cpu_to_le16(tx_rings);
5770 req->num_rx_rings = cpu_to_le16(rx_rings);
41e8d798
MC
5771 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5772 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5773 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5774 } else {
5775 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5776 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5777 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
5778 }
780baad4 5779 req->num_stat_ctxs = cpu_to_le16(stats);
4ed50ef4
MC
5780 req->num_vnics = cpu_to_le16(vnics);
5781
5782 req->enables = cpu_to_le32(enables);
5783}
5784
5785static int
5786bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4 5787 int ring_grps, int cp_rings, int stats, int vnics)
4ed50ef4
MC
5788{
5789 struct hwrm_func_cfg_input req = {0};
5790 int rc;
5791
5792 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
780baad4 5793 cp_rings, stats, vnics);
4ed50ef4 5794 if (!req.enables)
391be5c2
MC
5795 return 0;
5796
674f50a5
MC
5797 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5798 if (rc)
d4f1420d 5799 return rc;
674f50a5
MC
5800
5801 if (bp->hwrm_spec_code < 0x10601)
5802 bp->hw_resc.resv_tx_rings = tx_rings;
5803
5804 rc = bnxt_hwrm_get_rings(bp);
5805 return rc;
5806}
5807
5808static int
5809bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4 5810 int ring_grps, int cp_rings, int stats, int vnics)
674f50a5
MC
5811{
5812 struct hwrm_func_vf_cfg_input req = {0};
674f50a5
MC
5813 int rc;
5814
f1ca94de 5815 if (!BNXT_NEW_RM(bp)) {
674f50a5 5816 bp->hw_resc.resv_tx_rings = tx_rings;
391be5c2 5817 return 0;
674f50a5 5818 }
391be5c2 5819
4ed50ef4 5820 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
780baad4 5821 cp_rings, stats, vnics);
391be5c2 5822 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
674f50a5 5823 if (rc)
d4f1420d 5824 return rc;
674f50a5
MC
5825
5826 rc = bnxt_hwrm_get_rings(bp);
5827 return rc;
5828}
5829
5830static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
780baad4 5831 int cp, int stat, int vnic)
674f50a5
MC
5832{
5833 if (BNXT_PF(bp))
780baad4
VV
5834 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
5835 vnic);
674f50a5 5836 else
780baad4
VV
5837 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
5838 vnic);
674f50a5
MC
5839}
5840
b16b6891 5841int bnxt_nq_rings_in_use(struct bnxt *bp)
08654eb2
MC
5842{
5843 int cp = bp->cp_nr_rings;
5844 int ulp_msix, ulp_base;
5845
5846 ulp_msix = bnxt_get_ulp_msix_num(bp);
5847 if (ulp_msix) {
5848 ulp_base = bnxt_get_ulp_msix_base(bp);
5849 cp += ulp_msix;
5850 if ((ulp_base + ulp_msix) > cp)
5851 cp = ulp_base + ulp_msix;
5852 }
5853 return cp;
5854}
5855
c0b8cda0
MC
5856static int bnxt_cp_rings_in_use(struct bnxt *bp)
5857{
5858 int cp;
5859
5860 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5861 return bnxt_nq_rings_in_use(bp);
5862
5863 cp = bp->tx_nr_rings + bp->rx_nr_rings;
5864 return cp;
5865}
5866
780baad4
VV
5867static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
5868{
d77b1ad8
MC
5869 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
5870 int cp = bp->cp_nr_rings;
5871
5872 if (!ulp_stat)
5873 return cp;
5874
5875 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
5876 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
5877
5878 return cp + ulp_stat;
780baad4
VV
5879}
5880
4e41dc5d
MC
5881static bool bnxt_need_reserve_rings(struct bnxt *bp)
5882{
5883 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
fbcfc8e4 5884 int cp = bnxt_cp_rings_in_use(bp);
c0b8cda0 5885 int nq = bnxt_nq_rings_in_use(bp);
780baad4 5886 int rx = bp->rx_nr_rings, stat;
4e41dc5d
MC
5887 int vnic = 1, grp = rx;
5888
5889 if (bp->hwrm_spec_code < 0x10601)
5890 return false;
5891
5892 if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
5893 return true;
5894
41e8d798 5895 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
4e41dc5d
MC
5896 vnic = rx + 1;
5897 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5898 rx <<= 1;
780baad4 5899 stat = bnxt_get_func_stat_ctxs(bp);
f1ca94de 5900 if (BNXT_NEW_RM(bp) &&
4e41dc5d 5901 (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
01989c6b 5902 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
41e8d798
MC
5903 (hw_resc->resv_hw_ring_grps != grp &&
5904 !(bp->flags & BNXT_FLAG_CHIP_P5))))
4e41dc5d 5905 return true;
01989c6b
MC
5906 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
5907 hw_resc->resv_irqs != nq)
5908 return true;
4e41dc5d
MC
5909 return false;
5910}
5911
674f50a5
MC
5912static int __bnxt_reserve_rings(struct bnxt *bp)
5913{
5914 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
c0b8cda0 5915 int cp = bnxt_nq_rings_in_use(bp);
674f50a5
MC
5916 int tx = bp->tx_nr_rings;
5917 int rx = bp->rx_nr_rings;
674f50a5 5918 int grp, rx_rings, rc;
780baad4 5919 int vnic = 1, stat;
674f50a5 5920 bool sh = false;
674f50a5 5921
4e41dc5d 5922 if (!bnxt_need_reserve_rings(bp))
674f50a5
MC
5923 return 0;
5924
5925 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5926 sh = true;
41e8d798 5927 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
674f50a5
MC
5928 vnic = rx + 1;
5929 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5930 rx <<= 1;
674f50a5 5931 grp = bp->rx_nr_rings;
780baad4 5932 stat = bnxt_get_func_stat_ctxs(bp);
674f50a5 5933
780baad4 5934 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
391be5c2
MC
5935 if (rc)
5936 return rc;
5937
674f50a5 5938 tx = hw_resc->resv_tx_rings;
f1ca94de 5939 if (BNXT_NEW_RM(bp)) {
674f50a5 5940 rx = hw_resc->resv_rx_rings;
c0b8cda0 5941 cp = hw_resc->resv_irqs;
674f50a5
MC
5942 grp = hw_resc->resv_hw_ring_grps;
5943 vnic = hw_resc->resv_vnics;
780baad4 5944 stat = hw_resc->resv_stat_ctxs;
674f50a5
MC
5945 }
5946
5947 rx_rings = rx;
5948 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
5949 if (rx >= 2) {
5950 rx_rings = rx >> 1;
5951 } else {
5952 if (netif_running(bp->dev))
5953 return -ENOMEM;
5954
5955 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
5956 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
5957 bp->dev->hw_features &= ~NETIF_F_LRO;
5958 bp->dev->features &= ~NETIF_F_LRO;
5959 bnxt_set_ring_params(bp);
5960 }
5961 }
5962 rx_rings = min_t(int, rx_rings, grp);
780baad4
VV
5963 cp = min_t(int, cp, bp->cp_nr_rings);
5964 if (stat > bnxt_get_ulp_stat_ctxs(bp))
5965 stat -= bnxt_get_ulp_stat_ctxs(bp);
5966 cp = min_t(int, cp, stat);
674f50a5
MC
5967 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
5968 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5969 rx = rx_rings << 1;
5970 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
5971 bp->tx_nr_rings = tx;
5972 bp->rx_nr_rings = rx_rings;
5973 bp->cp_nr_rings = cp;
5974
780baad4 5975 if (!tx || !rx || !cp || !grp || !vnic || !stat)
674f50a5
MC
5976 return -ENOMEM;
5977
391be5c2
MC
5978 return rc;
5979}
5980
8f23d638 5981static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
5982 int ring_grps, int cp_rings, int stats,
5983 int vnics)
98fdbe73 5984{
8f23d638 5985 struct hwrm_func_vf_cfg_input req = {0};
6fc2ffdf 5986 u32 flags;
98fdbe73
MC
5987 int rc;
5988
f1ca94de 5989 if (!BNXT_NEW_RM(bp))
98fdbe73
MC
5990 return 0;
5991
6fc2ffdf 5992 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
780baad4 5993 cp_rings, stats, vnics);
8f23d638
MC
5994 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
5995 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
5996 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8f23d638 5997 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
41e8d798
MC
5998 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
5999 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6000 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6001 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8f23d638
MC
6002
6003 req.flags = cpu_to_le32(flags);
8f23d638 6004 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
d4f1420d 6005 return rc;
8f23d638
MC
6006}
6007
6008static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
6009 int ring_grps, int cp_rings, int stats,
6010 int vnics)
8f23d638
MC
6011{
6012 struct hwrm_func_cfg_input req = {0};
6fc2ffdf 6013 u32 flags;
8f23d638 6014 int rc;
98fdbe73 6015
6fc2ffdf 6016 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
780baad4 6017 cp_rings, stats, vnics);
8f23d638 6018 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
41e8d798 6019 if (BNXT_NEW_RM(bp)) {
8f23d638
MC
6020 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6021 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8f23d638
MC
6022 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6023 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
41e8d798 6024 if (bp->flags & BNXT_FLAG_CHIP_P5)
0b815023
MC
6025 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6026 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
41e8d798
MC
6027 else
6028 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6029 }
6fc2ffdf 6030
8f23d638 6031 req.flags = cpu_to_le32(flags);
98fdbe73 6032 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
d4f1420d 6033 return rc;
98fdbe73
MC
6034}
6035
8f23d638 6036static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
6037 int ring_grps, int cp_rings, int stats,
6038 int vnics)
8f23d638
MC
6039{
6040 if (bp->hwrm_spec_code < 0x10801)
6041 return 0;
6042
6043 if (BNXT_PF(bp))
6044 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
780baad4
VV
6045 ring_grps, cp_rings, stats,
6046 vnics);
8f23d638
MC
6047
6048 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
780baad4 6049 cp_rings, stats, vnics);
8f23d638
MC
6050}
6051
74706afa
MC
6052static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6053{
6054 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6055 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6056 struct hwrm_ring_aggint_qcaps_input req = {0};
6057 int rc;
6058
6059 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6060 coal_cap->num_cmpl_dma_aggr_max = 63;
6061 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6062 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6063 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6064 coal_cap->int_lat_tmr_min_max = 65535;
6065 coal_cap->int_lat_tmr_max_max = 65535;
6066 coal_cap->num_cmpl_aggr_int_max = 65535;
6067 coal_cap->timer_units = 80;
6068
6069 if (bp->hwrm_spec_code < 0x10902)
6070 return;
6071
6072 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
6073 mutex_lock(&bp->hwrm_cmd_lock);
6074 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6075 if (!rc) {
6076 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
58590c8d 6077 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
74706afa
MC
6078 coal_cap->num_cmpl_dma_aggr_max =
6079 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6080 coal_cap->num_cmpl_dma_aggr_during_int_max =
6081 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6082 coal_cap->cmpl_aggr_dma_tmr_max =
6083 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6084 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6085 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6086 coal_cap->int_lat_tmr_min_max =
6087 le16_to_cpu(resp->int_lat_tmr_min_max);
6088 coal_cap->int_lat_tmr_max_max =
6089 le16_to_cpu(resp->int_lat_tmr_max_max);
6090 coal_cap->num_cmpl_aggr_int_max =
6091 le16_to_cpu(resp->num_cmpl_aggr_int_max);
6092 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6093 }
6094 mutex_unlock(&bp->hwrm_cmd_lock);
6095}
6096
6097static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6098{
6099 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6100
6101 return usec * 1000 / coal_cap->timer_units;
6102}
6103
6104static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6105 struct bnxt_coal *hw_coal,
bb053f52
MC
6106 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6107{
74706afa
MC
6108 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6109 u32 cmpl_params = coal_cap->cmpl_params;
6110 u16 val, tmr, max, flags = 0;
f8503969
MC
6111
6112 max = hw_coal->bufs_per_record * 128;
6113 if (hw_coal->budget)
6114 max = hw_coal->bufs_per_record * hw_coal->budget;
74706afa 6115 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
f8503969
MC
6116
6117 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6118 req->num_cmpl_aggr_int = cpu_to_le16(val);
b153cbc5 6119
74706afa 6120 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
f8503969
MC
6121 req->num_cmpl_dma_aggr = cpu_to_le16(val);
6122
74706afa
MC
6123 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6124 coal_cap->num_cmpl_dma_aggr_during_int_max);
f8503969
MC
6125 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6126
74706afa
MC
6127 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6128 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
f8503969
MC
6129 req->int_lat_tmr_max = cpu_to_le16(tmr);
6130
6131 /* min timer set to 1/2 of interrupt timer */
74706afa
MC
6132 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6133 val = tmr / 2;
6134 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6135 req->int_lat_tmr_min = cpu_to_le16(val);
6136 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6137 }
f8503969
MC
6138
6139 /* buf timer set to 1/4 of interrupt timer */
74706afa 6140 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
f8503969
MC
6141 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6142
74706afa
MC
6143 if (cmpl_params &
6144 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6145 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6146 val = clamp_t(u16, tmr, 1,
6147 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6148 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
6149 req->enables |=
6150 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6151 }
f8503969 6152
74706afa
MC
6153 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6154 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6155 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6156 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
f8503969 6157 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
bb053f52 6158 req->flags = cpu_to_le16(flags);
74706afa 6159 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
bb053f52
MC
6160}
6161
58590c8d
MC
6162/* Caller holds bp->hwrm_cmd_lock */
6163static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6164 struct bnxt_coal *hw_coal)
6165{
6166 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
6167 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6168 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6169 u32 nq_params = coal_cap->nq_params;
6170 u16 tmr;
6171
6172 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6173 return 0;
6174
6175 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
6176 -1, -1);
6177 req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6178 req.flags =
6179 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6180
6181 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6182 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6183 req.int_lat_tmr_min = cpu_to_le16(tmr);
6184 req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6185 return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6186}
6187
6a8788f2
AG
6188int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6189{
6190 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
6191 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6192 struct bnxt_coal coal;
6a8788f2
AG
6193
6194 /* Tick values in micro seconds.
6195 * 1 coal_buf x bufs_per_record = 1 completion record.
6196 */
6197 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6198
6199 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6200 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6201
6202 if (!bnapi->rx_ring)
6203 return -ENODEV;
6204
6205 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6206 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6207
74706afa 6208 bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
6a8788f2 6209
2c61d211 6210 req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6a8788f2
AG
6211
6212 return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
6213 HWRM_CMD_TIMEOUT);
6214}
6215
c0c050c5
MC
6216int bnxt_hwrm_set_coal(struct bnxt *bp)
6217{
6218 int i, rc = 0;
dfc9c94a
MC
6219 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
6220 req_tx = {0}, *req;
c0c050c5 6221
dfc9c94a
MC
6222 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6223 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6224 bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
6225 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
c0c050c5 6226
74706afa
MC
6227 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
6228 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
c0c050c5
MC
6229
6230 mutex_lock(&bp->hwrm_cmd_lock);
6231 for (i = 0; i < bp->cp_nr_rings; i++) {
dfc9c94a 6232 struct bnxt_napi *bnapi = bp->bnapi[i];
58590c8d 6233 struct bnxt_coal *hw_coal;
2c61d211 6234 u16 ring_id;
c0c050c5 6235
dfc9c94a 6236 req = &req_rx;
2c61d211
MC
6237 if (!bnapi->rx_ring) {
6238 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
dfc9c94a 6239 req = &req_tx;
2c61d211
MC
6240 } else {
6241 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6242 }
6243 req->ring_id = cpu_to_le16(ring_id);
dfc9c94a
MC
6244
6245 rc = _hwrm_send_message(bp, req, sizeof(*req),
c0c050c5
MC
6246 HWRM_CMD_TIMEOUT);
6247 if (rc)
6248 break;
58590c8d
MC
6249
6250 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6251 continue;
6252
6253 if (bnapi->rx_ring && bnapi->tx_ring) {
6254 req = &req_tx;
6255 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6256 req->ring_id = cpu_to_le16(ring_id);
6257 rc = _hwrm_send_message(bp, req, sizeof(*req),
6258 HWRM_CMD_TIMEOUT);
6259 if (rc)
6260 break;
6261 }
6262 if (bnapi->rx_ring)
6263 hw_coal = &bp->rx_coal;
6264 else
6265 hw_coal = &bp->tx_coal;
6266 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
c0c050c5
MC
6267 }
6268 mutex_unlock(&bp->hwrm_cmd_lock);
6269 return rc;
6270}
6271
6272static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6273{
6274 int rc = 0, i;
6275 struct hwrm_stat_ctx_free_input req = {0};
6276
6277 if (!bp->bnapi)
6278 return 0;
6279
3e8060fa
PS
6280 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6281 return 0;
6282
c0c050c5
MC
6283 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
6284
6285 mutex_lock(&bp->hwrm_cmd_lock);
6286 for (i = 0; i < bp->cp_nr_rings; i++) {
6287 struct bnxt_napi *bnapi = bp->bnapi[i];
6288 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6289
6290 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6291 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6292
6293 rc = _hwrm_send_message(bp, &req, sizeof(req),
6294 HWRM_CMD_TIMEOUT);
6295 if (rc)
6296 break;
6297
6298 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6299 }
6300 }
6301 mutex_unlock(&bp->hwrm_cmd_lock);
6302 return rc;
6303}
6304
6305static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6306{
6307 int rc = 0, i;
6308 struct hwrm_stat_ctx_alloc_input req = {0};
6309 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6310
3e8060fa
PS
6311 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6312 return 0;
6313
c0c050c5
MC
6314 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
6315
4e748506 6316 req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
51f30785 6317 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
c0c050c5
MC
6318
6319 mutex_lock(&bp->hwrm_cmd_lock);
6320 for (i = 0; i < bp->cp_nr_rings; i++) {
6321 struct bnxt_napi *bnapi = bp->bnapi[i];
6322 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6323
6324 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
6325
6326 rc = _hwrm_send_message(bp, &req, sizeof(req),
6327 HWRM_CMD_TIMEOUT);
6328 if (rc)
6329 break;
6330
6331 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6332
6333 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6334 }
6335 mutex_unlock(&bp->hwrm_cmd_lock);
89aa8445 6336 return rc;
c0c050c5
MC
6337}
6338
cf6645f8
MC
6339static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6340{
6341 struct hwrm_func_qcfg_input req = {0};
567b2abe 6342 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9315edca 6343 u16 flags;
cf6645f8
MC
6344 int rc;
6345
6346 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6347 req.fid = cpu_to_le16(0xffff);
6348 mutex_lock(&bp->hwrm_cmd_lock);
6349 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6350 if (rc)
6351 goto func_qcfg_exit;
6352
6353#ifdef CONFIG_BNXT_SRIOV
6354 if (BNXT_VF(bp)) {
cf6645f8
MC
6355 struct bnxt_vf_info *vf = &bp->vf;
6356
6357 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6358 }
6359#endif
9315edca
MC
6360 flags = le16_to_cpu(resp->flags);
6361 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6362 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
97381a18 6363 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
9315edca 6364 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
97381a18 6365 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
9315edca
MC
6366 }
6367 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6368 bp->flags |= BNXT_FLAG_MULTI_HOST;
bc39f885 6369
567b2abe
SB
6370 switch (resp->port_partition_type) {
6371 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6372 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6373 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6374 bp->port_partition_type = resp->port_partition_type;
6375 break;
6376 }
32e8239c
MC
6377 if (bp->hwrm_spec_code < 0x10707 ||
6378 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6379 bp->br_mode = BRIDGE_MODE_VEB;
6380 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6381 bp->br_mode = BRIDGE_MODE_VEPA;
6382 else
6383 bp->br_mode = BRIDGE_MODE_UNDEF;
cf6645f8 6384
7eb9bb3a
MC
6385 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6386 if (!bp->max_mtu)
6387 bp->max_mtu = BNXT_MAX_MTU;
6388
cf6645f8
MC
6389func_qcfg_exit:
6390 mutex_unlock(&bp->hwrm_cmd_lock);
6391 return rc;
6392}
6393
98f04cf0
MC
6394static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6395{
6396 struct hwrm_func_backing_store_qcaps_input req = {0};
6397 struct hwrm_func_backing_store_qcaps_output *resp =
6398 bp->hwrm_cmd_resp_addr;
6399 int rc;
6400
6401 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6402 return 0;
6403
6404 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
6405 mutex_lock(&bp->hwrm_cmd_lock);
6406 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6407 if (!rc) {
6408 struct bnxt_ctx_pg_info *ctx_pg;
6409 struct bnxt_ctx_mem_info *ctx;
6410 int i;
6411
6412 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6413 if (!ctx) {
6414 rc = -ENOMEM;
6415 goto ctx_err;
6416 }
6417 ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL);
6418 if (!ctx_pg) {
6419 kfree(ctx);
6420 rc = -ENOMEM;
6421 goto ctx_err;
6422 }
6423 for (i = 0; i < bp->max_q + 1; i++, ctx_pg++)
6424 ctx->tqm_mem[i] = ctx_pg;
6425
6426 bp->ctx = ctx;
6427 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6428 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6429 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6430 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6431 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6432 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6433 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6434 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6435 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6436 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6437 ctx->vnic_max_vnic_entries =
6438 le16_to_cpu(resp->vnic_max_vnic_entries);
6439 ctx->vnic_max_ring_table_entries =
6440 le16_to_cpu(resp->vnic_max_ring_table_entries);
6441 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6442 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6443 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6444 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6445 ctx->tqm_min_entries_per_ring =
6446 le32_to_cpu(resp->tqm_min_entries_per_ring);
6447 ctx->tqm_max_entries_per_ring =
6448 le32_to_cpu(resp->tqm_max_entries_per_ring);
6449 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6450 if (!ctx->tqm_entries_multiple)
6451 ctx->tqm_entries_multiple = 1;
6452 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6453 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
53579e37
DS
6454 ctx->mrav_num_entries_units =
6455 le16_to_cpu(resp->mrav_num_entries_units);
98f04cf0
MC
6456 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6457 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6458 } else {
6459 rc = 0;
6460 }
6461ctx_err:
6462 mutex_unlock(&bp->hwrm_cmd_lock);
6463 return rc;
6464}
6465
1b9394e5
MC
6466static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6467 __le64 *pg_dir)
6468{
6469 u8 pg_size = 0;
6470
6471 if (BNXT_PAGE_SHIFT == 13)
6472 pg_size = 1 << 4;
6473 else if (BNXT_PAGE_SIZE == 16)
6474 pg_size = 2 << 4;
6475
6476 *pg_attr = pg_size;
08fe9d18
MC
6477 if (rmem->depth >= 1) {
6478 if (rmem->depth == 2)
6479 *pg_attr |= 2;
6480 else
6481 *pg_attr |= 1;
1b9394e5
MC
6482 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6483 } else {
6484 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6485 }
6486}
6487
6488#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
6489 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
6490 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
6491 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
6492 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
6493 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6494
6495static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6496{
6497 struct hwrm_func_backing_store_cfg_input req = {0};
6498 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6499 struct bnxt_ctx_pg_info *ctx_pg;
6500 __le32 *num_entries;
6501 __le64 *pg_dir;
53579e37 6502 u32 flags = 0;
1b9394e5
MC
6503 u8 *pg_attr;
6504 int i, rc;
6505 u32 ena;
6506
6507 if (!ctx)
6508 return 0;
6509
6510 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
6511 req.enables = cpu_to_le32(enables);
6512
6513 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
6514 ctx_pg = &ctx->qp_mem;
6515 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
6516 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
6517 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
6518 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
6519 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6520 &req.qpc_pg_size_qpc_lvl,
6521 &req.qpc_page_dir);
6522 }
6523 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
6524 ctx_pg = &ctx->srq_mem;
6525 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
6526 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
6527 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
6528 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6529 &req.srq_pg_size_srq_lvl,
6530 &req.srq_page_dir);
6531 }
6532 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
6533 ctx_pg = &ctx->cq_mem;
6534 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
6535 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
6536 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
6537 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
6538 &req.cq_page_dir);
6539 }
6540 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
6541 ctx_pg = &ctx->vnic_mem;
6542 req.vnic_num_vnic_entries =
6543 cpu_to_le16(ctx->vnic_max_vnic_entries);
6544 req.vnic_num_ring_table_entries =
6545 cpu_to_le16(ctx->vnic_max_ring_table_entries);
6546 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
6547 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6548 &req.vnic_pg_size_vnic_lvl,
6549 &req.vnic_page_dir);
6550 }
6551 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
6552 ctx_pg = &ctx->stat_mem;
6553 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
6554 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
6555 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6556 &req.stat_pg_size_stat_lvl,
6557 &req.stat_page_dir);
6558 }
cf6daed0
MC
6559 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
6560 ctx_pg = &ctx->mrav_mem;
6561 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
53579e37
DS
6562 if (ctx->mrav_num_entries_units)
6563 flags |=
6564 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
cf6daed0
MC
6565 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
6566 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6567 &req.mrav_pg_size_mrav_lvl,
6568 &req.mrav_page_dir);
6569 }
6570 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
6571 ctx_pg = &ctx->tim_mem;
6572 req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
6573 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
6574 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6575 &req.tim_pg_size_tim_lvl,
6576 &req.tim_page_dir);
6577 }
1b9394e5
MC
6578 for (i = 0, num_entries = &req.tqm_sp_num_entries,
6579 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
6580 pg_dir = &req.tqm_sp_page_dir,
6581 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
6582 i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
6583 if (!(enables & ena))
6584 continue;
6585
6586 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
6587 ctx_pg = ctx->tqm_mem[i];
6588 *num_entries = cpu_to_le32(ctx_pg->entries);
6589 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
6590 }
53579e37 6591 req.flags = cpu_to_le32(flags);
1b9394e5 6592 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1b9394e5
MC
6593 return rc;
6594}
6595
98f04cf0 6596static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
08fe9d18 6597 struct bnxt_ctx_pg_info *ctx_pg)
98f04cf0
MC
6598{
6599 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6600
98f04cf0
MC
6601 rmem->page_size = BNXT_PAGE_SIZE;
6602 rmem->pg_arr = ctx_pg->ctx_pg_arr;
6603 rmem->dma_arr = ctx_pg->ctx_dma_arr;
1b9394e5 6604 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
08fe9d18
MC
6605 if (rmem->depth >= 1)
6606 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
98f04cf0
MC
6607 return bnxt_alloc_ring(bp, rmem);
6608}
6609
08fe9d18
MC
6610static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
6611 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
6612 u8 depth)
6613{
6614 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6615 int rc;
6616
6617 if (!mem_size)
6618 return 0;
6619
6620 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6621 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
6622 ctx_pg->nr_pages = 0;
6623 return -EINVAL;
6624 }
6625 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
6626 int nr_tbls, i;
6627
6628 rmem->depth = 2;
6629 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
6630 GFP_KERNEL);
6631 if (!ctx_pg->ctx_pg_tbl)
6632 return -ENOMEM;
6633 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
6634 rmem->nr_pages = nr_tbls;
6635 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6636 if (rc)
6637 return rc;
6638 for (i = 0; i < nr_tbls; i++) {
6639 struct bnxt_ctx_pg_info *pg_tbl;
6640
6641 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
6642 if (!pg_tbl)
6643 return -ENOMEM;
6644 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
6645 rmem = &pg_tbl->ring_mem;
6646 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
6647 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
6648 rmem->depth = 1;
6649 rmem->nr_pages = MAX_CTX_PAGES;
6ef982de
MC
6650 if (i == (nr_tbls - 1)) {
6651 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
6652
6653 if (rem)
6654 rmem->nr_pages = rem;
6655 }
08fe9d18
MC
6656 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
6657 if (rc)
6658 break;
6659 }
6660 } else {
6661 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6662 if (rmem->nr_pages > 1 || depth)
6663 rmem->depth = 1;
6664 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6665 }
6666 return rc;
6667}
6668
6669static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
6670 struct bnxt_ctx_pg_info *ctx_pg)
6671{
6672 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6673
6674 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
6675 ctx_pg->ctx_pg_tbl) {
6676 int i, nr_tbls = rmem->nr_pages;
6677
6678 for (i = 0; i < nr_tbls; i++) {
6679 struct bnxt_ctx_pg_info *pg_tbl;
6680 struct bnxt_ring_mem_info *rmem2;
6681
6682 pg_tbl = ctx_pg->ctx_pg_tbl[i];
6683 if (!pg_tbl)
6684 continue;
6685 rmem2 = &pg_tbl->ring_mem;
6686 bnxt_free_ring(bp, rmem2);
6687 ctx_pg->ctx_pg_arr[i] = NULL;
6688 kfree(pg_tbl);
6689 ctx_pg->ctx_pg_tbl[i] = NULL;
6690 }
6691 kfree(ctx_pg->ctx_pg_tbl);
6692 ctx_pg->ctx_pg_tbl = NULL;
6693 }
6694 bnxt_free_ring(bp, rmem);
6695 ctx_pg->nr_pages = 0;
6696}
6697
98f04cf0
MC
6698static void bnxt_free_ctx_mem(struct bnxt *bp)
6699{
6700 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6701 int i;
6702
6703 if (!ctx)
6704 return;
6705
6706 if (ctx->tqm_mem[0]) {
6707 for (i = 0; i < bp->max_q + 1; i++)
08fe9d18 6708 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
98f04cf0
MC
6709 kfree(ctx->tqm_mem[0]);
6710 ctx->tqm_mem[0] = NULL;
6711 }
6712
cf6daed0
MC
6713 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
6714 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
08fe9d18
MC
6715 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
6716 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
6717 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
6718 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
6719 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
98f04cf0
MC
6720 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
6721}
6722
6723static int bnxt_alloc_ctx_mem(struct bnxt *bp)
6724{
6725 struct bnxt_ctx_pg_info *ctx_pg;
6726 struct bnxt_ctx_mem_info *ctx;
1b9394e5 6727 u32 mem_size, ena, entries;
53579e37 6728 u32 num_mr, num_ah;
cf6daed0
MC
6729 u32 extra_srqs = 0;
6730 u32 extra_qps = 0;
6731 u8 pg_lvl = 1;
98f04cf0
MC
6732 int i, rc;
6733
6734 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
6735 if (rc) {
6736 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
6737 rc);
6738 return rc;
6739 }
6740 ctx = bp->ctx;
6741 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
6742 return 0;
6743
d629522e 6744 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
cf6daed0
MC
6745 pg_lvl = 2;
6746 extra_qps = 65536;
6747 extra_srqs = 8192;
6748 }
6749
98f04cf0 6750 ctx_pg = &ctx->qp_mem;
cf6daed0
MC
6751 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
6752 extra_qps;
98f04cf0 6753 mem_size = ctx->qp_entry_size * ctx_pg->entries;
cf6daed0 6754 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
98f04cf0
MC
6755 if (rc)
6756 return rc;
6757
6758 ctx_pg = &ctx->srq_mem;
cf6daed0 6759 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
98f04cf0 6760 mem_size = ctx->srq_entry_size * ctx_pg->entries;
cf6daed0 6761 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
98f04cf0
MC
6762 if (rc)
6763 return rc;
6764
6765 ctx_pg = &ctx->cq_mem;
cf6daed0 6766 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
98f04cf0 6767 mem_size = ctx->cq_entry_size * ctx_pg->entries;
cf6daed0 6768 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
98f04cf0
MC
6769 if (rc)
6770 return rc;
6771
6772 ctx_pg = &ctx->vnic_mem;
6773 ctx_pg->entries = ctx->vnic_max_vnic_entries +
6774 ctx->vnic_max_ring_table_entries;
6775 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
08fe9d18 6776 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
98f04cf0
MC
6777 if (rc)
6778 return rc;
6779
6780 ctx_pg = &ctx->stat_mem;
6781 ctx_pg->entries = ctx->stat_max_entries;
6782 mem_size = ctx->stat_entry_size * ctx_pg->entries;
08fe9d18 6783 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
98f04cf0
MC
6784 if (rc)
6785 return rc;
6786
cf6daed0
MC
6787 ena = 0;
6788 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
6789 goto skip_rdma;
6790
6791 ctx_pg = &ctx->mrav_mem;
53579e37
DS
6792 /* 128K extra is needed to accommodate static AH context
6793 * allocation by f/w.
6794 */
6795 num_mr = 1024 * 256;
6796 num_ah = 1024 * 128;
6797 ctx_pg->entries = num_mr + num_ah;
cf6daed0
MC
6798 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
6799 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2);
6800 if (rc)
6801 return rc;
6802 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
53579e37
DS
6803 if (ctx->mrav_num_entries_units)
6804 ctx_pg->entries =
6805 ((num_mr / ctx->mrav_num_entries_units) << 16) |
6806 (num_ah / ctx->mrav_num_entries_units);
cf6daed0
MC
6807
6808 ctx_pg = &ctx->tim_mem;
6809 ctx_pg->entries = ctx->qp_mem.entries;
6810 mem_size = ctx->tim_entry_size * ctx_pg->entries;
6811 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6812 if (rc)
6813 return rc;
6814 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
6815
6816skip_rdma:
6817 entries = ctx->qp_max_l2_entries + extra_qps;
98f04cf0
MC
6818 entries = roundup(entries, ctx->tqm_entries_multiple);
6819 entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
6820 ctx->tqm_max_entries_per_ring);
cf6daed0 6821 for (i = 0; i < bp->max_q + 1; i++) {
98f04cf0
MC
6822 ctx_pg = ctx->tqm_mem[i];
6823 ctx_pg->entries = entries;
6824 mem_size = ctx->tqm_entry_size * entries;
08fe9d18 6825 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
98f04cf0
MC
6826 if (rc)
6827 return rc;
1b9394e5 6828 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
98f04cf0 6829 }
1b9394e5
MC
6830 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
6831 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
6832 if (rc)
6833 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
6834 rc);
6835 else
6836 ctx->flags |= BNXT_CTX_FLAG_INITED;
6837
98f04cf0
MC
6838 return 0;
6839}
6840
db4723b3 6841int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
be0dd9c4
MC
6842{
6843 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6844 struct hwrm_func_resource_qcaps_input req = {0};
6845 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6846 int rc;
6847
6848 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
6849 req.fid = cpu_to_le16(0xffff);
6850
6851 mutex_lock(&bp->hwrm_cmd_lock);
351cbde9
JT
6852 rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
6853 HWRM_CMD_TIMEOUT);
d4f1420d 6854 if (rc)
be0dd9c4 6855 goto hwrm_func_resc_qcaps_exit;
be0dd9c4 6856
db4723b3
MC
6857 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
6858 if (!all)
6859 goto hwrm_func_resc_qcaps_exit;
6860
be0dd9c4
MC
6861 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
6862 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6863 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
6864 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6865 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
6866 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6867 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
6868 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6869 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
6870 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
6871 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
6872 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6873 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
6874 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6875 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
6876 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6877
9c1fabdf
MC
6878 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6879 u16 max_msix = le16_to_cpu(resp->max_msix);
6880
f7588cd8 6881 hw_resc->max_nqs = max_msix;
9c1fabdf
MC
6882 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
6883 }
6884
4673d664
MC
6885 if (BNXT_PF(bp)) {
6886 struct bnxt_pf_info *pf = &bp->pf;
6887
6888 pf->vf_resv_strategy =
6889 le16_to_cpu(resp->vf_reservation_strategy);
bf82736d 6890 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
4673d664
MC
6891 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
6892 }
be0dd9c4
MC
6893hwrm_func_resc_qcaps_exit:
6894 mutex_unlock(&bp->hwrm_cmd_lock);
6895 return rc;
6896}
6897
6898static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
c0c050c5
MC
6899{
6900 int rc = 0;
6901 struct hwrm_func_qcaps_input req = {0};
6902 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6a4f2947
MC
6903 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6904 u32 flags;
c0c050c5
MC
6905
6906 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
6907 req.fid = cpu_to_le16(0xffff);
6908
6909 mutex_lock(&bp->hwrm_cmd_lock);
6910 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6911 if (rc)
6912 goto hwrm_func_qcaps_exit;
6913
6a4f2947
MC
6914 flags = le32_to_cpu(resp->flags);
6915 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
e4060d30 6916 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
6a4f2947 6917 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
e4060d30 6918 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
55e4398d
VV
6919 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
6920 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
6154532f
VV
6921 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
6922 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
07f83d72
MC
6923 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
6924 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
e4060d30 6925
7cc5a20e 6926 bp->tx_push_thresh = 0;
6a4f2947 6927 if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
7cc5a20e
MC
6928 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
6929
6a4f2947
MC
6930 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6931 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6932 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6933 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6934 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
6935 if (!hw_resc->max_hw_ring_grps)
6936 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
6937 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6938 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6939 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6940
c0c050c5
MC
6941 if (BNXT_PF(bp)) {
6942 struct bnxt_pf_info *pf = &bp->pf;
6943
6944 pf->fw_fid = le16_to_cpu(resp->fid);
6945 pf->port_id = le16_to_cpu(resp->port_id);
87027db1 6946 bp->dev->dev_port = pf->port_id;
11f15ed3 6947 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
c0c050c5
MC
6948 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
6949 pf->max_vfs = le16_to_cpu(resp->max_vfs);
6950 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
6951 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
6952 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
6953 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
6954 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
6955 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
ba642ab7 6956 bp->flags &= ~BNXT_FLAG_WOL_CAP;
6a4f2947 6957 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
c1ef146a 6958 bp->flags |= BNXT_FLAG_WOL_CAP;
c0c050c5 6959 } else {
379a80a1 6960#ifdef CONFIG_BNXT_SRIOV
c0c050c5
MC
6961 struct bnxt_vf_info *vf = &bp->vf;
6962
6963 vf->fw_fid = le16_to_cpu(resp->fid);
7cc5a20e 6964 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
379a80a1 6965#endif
c0c050c5
MC
6966 }
6967
c0c050c5
MC
6968hwrm_func_qcaps_exit:
6969 mutex_unlock(&bp->hwrm_cmd_lock);
6970 return rc;
6971}
6972
804fba4e
MC
6973static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
6974
be0dd9c4
MC
6975static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
6976{
6977 int rc;
6978
6979 rc = __bnxt_hwrm_func_qcaps(bp);
6980 if (rc)
6981 return rc;
804fba4e
MC
6982 rc = bnxt_hwrm_queue_qportcfg(bp);
6983 if (rc) {
6984 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
6985 return rc;
6986 }
be0dd9c4 6987 if (bp->hwrm_spec_code >= 0x10803) {
98f04cf0
MC
6988 rc = bnxt_alloc_ctx_mem(bp);
6989 if (rc)
6990 return rc;
db4723b3 6991 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
be0dd9c4 6992 if (!rc)
97381a18 6993 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
be0dd9c4
MC
6994 }
6995 return 0;
6996}
6997
e969ae5b
MC
6998static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
6999{
7000 struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
7001 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7002 int rc = 0;
7003 u32 flags;
7004
7005 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7006 return 0;
7007
7008 resp = bp->hwrm_cmd_resp_addr;
7009 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
7010
7011 mutex_lock(&bp->hwrm_cmd_lock);
7012 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7013 if (rc)
7014 goto hwrm_cfa_adv_qcaps_exit;
7015
7016 flags = le32_to_cpu(resp->flags);
7017 if (flags &
7018 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED)
7019 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX;
7020
7021hwrm_cfa_adv_qcaps_exit:
7022 mutex_unlock(&bp->hwrm_cmd_lock);
7023 return rc;
7024}
7025
9ffbd677
MC
7026static int bnxt_map_fw_health_regs(struct bnxt *bp)
7027{
7028 struct bnxt_fw_health *fw_health = bp->fw_health;
7029 u32 reg_base = 0xffffffff;
7030 int i;
7031
7032 /* Only pre-map the monitoring GRC registers using window 3 */
7033 for (i = 0; i < 4; i++) {
7034 u32 reg = fw_health->regs[i];
7035
7036 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7037 continue;
7038 if (reg_base == 0xffffffff)
7039 reg_base = reg & BNXT_GRC_BASE_MASK;
7040 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7041 return -ERANGE;
7042 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_BASE +
7043 (reg & BNXT_GRC_OFFSET_MASK);
7044 }
7045 if (reg_base == 0xffffffff)
7046 return 0;
7047
7048 writel(reg_base, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7049 BNXT_FW_HEALTH_WIN_MAP_OFF);
7050 return 0;
7051}
7052
07f83d72
MC
7053static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7054{
7055 struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7056 struct bnxt_fw_health *fw_health = bp->fw_health;
7057 struct hwrm_error_recovery_qcfg_input req = {0};
7058 int rc, i;
7059
7060 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7061 return 0;
7062
7063 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
7064 mutex_lock(&bp->hwrm_cmd_lock);
7065 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7066 if (rc)
7067 goto err_recovery_out;
7068 if (!fw_health) {
7069 fw_health = kzalloc(sizeof(*fw_health), GFP_KERNEL);
7070 bp->fw_health = fw_health;
7071 if (!fw_health) {
7072 rc = -ENOMEM;
7073 goto err_recovery_out;
7074 }
7075 }
7076 fw_health->flags = le32_to_cpu(resp->flags);
7077 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7078 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7079 rc = -EINVAL;
7080 goto err_recovery_out;
7081 }
7082 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7083 fw_health->master_func_wait_dsecs =
7084 le32_to_cpu(resp->master_func_wait_period);
7085 fw_health->normal_func_wait_dsecs =
7086 le32_to_cpu(resp->normal_func_wait_period);
7087 fw_health->post_reset_wait_dsecs =
7088 le32_to_cpu(resp->master_func_wait_period_after_reset);
7089 fw_health->post_reset_max_wait_dsecs =
7090 le32_to_cpu(resp->max_bailout_time_after_reset);
7091 fw_health->regs[BNXT_FW_HEALTH_REG] =
7092 le32_to_cpu(resp->fw_health_status_reg);
7093 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7094 le32_to_cpu(resp->fw_heartbeat_reg);
7095 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7096 le32_to_cpu(resp->fw_reset_cnt_reg);
7097 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7098 le32_to_cpu(resp->reset_inprogress_reg);
7099 fw_health->fw_reset_inprog_reg_mask =
7100 le32_to_cpu(resp->reset_inprogress_reg_mask);
7101 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7102 if (fw_health->fw_reset_seq_cnt >= 16) {
7103 rc = -EINVAL;
7104 goto err_recovery_out;
7105 }
7106 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7107 fw_health->fw_reset_seq_regs[i] =
7108 le32_to_cpu(resp->reset_reg[i]);
7109 fw_health->fw_reset_seq_vals[i] =
7110 le32_to_cpu(resp->reset_reg_val[i]);
7111 fw_health->fw_reset_seq_delay_msec[i] =
7112 resp->delay_after_reset[i];
7113 }
7114err_recovery_out:
7115 mutex_unlock(&bp->hwrm_cmd_lock);
9ffbd677
MC
7116 if (!rc)
7117 rc = bnxt_map_fw_health_regs(bp);
07f83d72
MC
7118 if (rc)
7119 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7120 return rc;
7121}
7122
c0c050c5
MC
7123static int bnxt_hwrm_func_reset(struct bnxt *bp)
7124{
7125 struct hwrm_func_reset_input req = {0};
7126
7127 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
7128 req.enables = 0;
7129
7130 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
7131}
7132
7133static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7134{
7135 int rc = 0;
7136 struct hwrm_queue_qportcfg_input req = {0};
7137 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
aabfc016
MC
7138 u8 i, j, *qptr;
7139 bool no_rdma;
c0c050c5
MC
7140
7141 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
7142
7143 mutex_lock(&bp->hwrm_cmd_lock);
7144 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7145 if (rc)
7146 goto qportcfg_exit;
7147
7148 if (!resp->max_configurable_queues) {
7149 rc = -EINVAL;
7150 goto qportcfg_exit;
7151 }
7152 bp->max_tc = resp->max_configurable_queues;
87c374de 7153 bp->max_lltc = resp->max_configurable_lossless_queues;
c0c050c5
MC
7154 if (bp->max_tc > BNXT_MAX_QUEUE)
7155 bp->max_tc = BNXT_MAX_QUEUE;
7156
aabfc016
MC
7157 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7158 qptr = &resp->queue_id0;
7159 for (i = 0, j = 0; i < bp->max_tc; i++) {
98f04cf0
MC
7160 bp->q_info[j].queue_id = *qptr;
7161 bp->q_ids[i] = *qptr++;
aabfc016
MC
7162 bp->q_info[j].queue_profile = *qptr++;
7163 bp->tc_to_qidx[j] = j;
7164 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7165 (no_rdma && BNXT_PF(bp)))
7166 j++;
7167 }
98f04cf0 7168 bp->max_q = bp->max_tc;
aabfc016
MC
7169 bp->max_tc = max_t(u8, j, 1);
7170
441cabbb
MC
7171 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7172 bp->max_tc = 1;
7173
87c374de
MC
7174 if (bp->max_lltc > bp->max_tc)
7175 bp->max_lltc = bp->max_tc;
7176
c0c050c5
MC
7177qportcfg_exit:
7178 mutex_unlock(&bp->hwrm_cmd_lock);
7179 return rc;
7180}
7181
ba642ab7 7182static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
c0c050c5 7183{
c0c050c5 7184 struct hwrm_ver_get_input req = {0};
ba642ab7 7185 int rc;
c0c050c5
MC
7186
7187 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
7188 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
7189 req.hwrm_intf_min = HWRM_VERSION_MINOR;
7190 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
ba642ab7
MC
7191
7192 rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT,
7193 silent);
7194 return rc;
7195}
7196
7197static int bnxt_hwrm_ver_get(struct bnxt *bp)
7198{
7199 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
7200 u32 dev_caps_cfg;
7201 int rc;
7202
7203 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
c0c050c5 7204 mutex_lock(&bp->hwrm_cmd_lock);
ba642ab7 7205 rc = __bnxt_hwrm_ver_get(bp, false);
c0c050c5
MC
7206 if (rc)
7207 goto hwrm_ver_get_exit;
7208
7209 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7210
894aa69a
MC
7211 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7212 resp->hwrm_intf_min_8b << 8 |
7213 resp->hwrm_intf_upd_8b;
7214 if (resp->hwrm_intf_maj_8b < 1) {
c193554e 7215 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
894aa69a
MC
7216 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7217 resp->hwrm_intf_upd_8b);
c193554e 7218 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
c0c050c5 7219 }
431aa1eb 7220 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
894aa69a
MC
7221 resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
7222 resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
c0c050c5 7223
691aa620
VV
7224 if (strlen(resp->active_pkg_name)) {
7225 int fw_ver_len = strlen(bp->fw_ver_str);
7226
7227 snprintf(bp->fw_ver_str + fw_ver_len,
7228 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
7229 resp->active_pkg_name);
7230 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
7231 }
7232
ff4fe81d
MC
7233 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
7234 if (!bp->hwrm_cmd_timeout)
7235 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
7236
1dfddc41 7237 if (resp->hwrm_intf_maj_8b >= 1) {
e6ef2699 7238 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
1dfddc41
MC
7239 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
7240 }
7241 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
7242 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
e6ef2699 7243
659c805c 7244 bp->chip_num = le16_to_cpu(resp->chip_num);
3e8060fa
PS
7245 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
7246 !resp->chip_metal)
7247 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
659c805c 7248
e605db80
DK
7249 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
7250 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
7251 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
97381a18 7252 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
e605db80 7253
760b6d33
VD
7254 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
7255 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
7256
abd43a13
VD
7257 if (dev_caps_cfg &
7258 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
7259 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
7260
2a516444
MC
7261 if (dev_caps_cfg &
7262 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
7263 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
7264
e969ae5b
MC
7265 if (dev_caps_cfg &
7266 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
7267 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
7268
c0c050c5
MC
7269hwrm_ver_get_exit:
7270 mutex_unlock(&bp->hwrm_cmd_lock);
7271 return rc;
7272}
7273
5ac67d8b
RS
7274int bnxt_hwrm_fw_set_time(struct bnxt *bp)
7275{
7276 struct hwrm_fw_set_time_input req = {0};
7dfaa7bc
AB
7277 struct tm tm;
7278 time64_t now = ktime_get_real_seconds();
5ac67d8b 7279
ca2c39e2
MC
7280 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
7281 bp->hwrm_spec_code < 0x10400)
5ac67d8b
RS
7282 return -EOPNOTSUPP;
7283
7dfaa7bc 7284 time64_to_tm(now, 0, &tm);
5ac67d8b
RS
7285 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
7286 req.year = cpu_to_le16(1900 + tm.tm_year);
7287 req.month = 1 + tm.tm_mon;
7288 req.day = tm.tm_mday;
7289 req.hour = tm.tm_hour;
7290 req.minute = tm.tm_min;
7291 req.second = tm.tm_sec;
7292 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7293}
7294
3bdf56c4
MC
7295static int bnxt_hwrm_port_qstats(struct bnxt *bp)
7296{
7297 int rc;
7298 struct bnxt_pf_info *pf = &bp->pf;
7299 struct hwrm_port_qstats_input req = {0};
7300
7301 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
7302 return 0;
7303
7304 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
7305 req.port_id = cpu_to_le16(pf->port_id);
7306 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
7307 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
7308 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7309 return rc;
7310}
7311
00db3cba
VV
7312static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
7313{
36e53349 7314 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
e37fed79 7315 struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
00db3cba
VV
7316 struct hwrm_port_qstats_ext_input req = {0};
7317 struct bnxt_pf_info *pf = &bp->pf;
ad361adf 7318 u32 tx_stat_size;
36e53349 7319 int rc;
00db3cba
VV
7320
7321 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
7322 return 0;
7323
7324 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
7325 req.port_id = cpu_to_le16(pf->port_id);
7326 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
7327 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
ad361adf
MC
7328 tx_stat_size = bp->hw_tx_port_stats_ext ?
7329 sizeof(*bp->hw_tx_port_stats_ext) : 0;
7330 req.tx_stat_size = cpu_to_le16(tx_stat_size);
36e53349
MC
7331 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
7332 mutex_lock(&bp->hwrm_cmd_lock);
7333 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7334 if (!rc) {
7335 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
ad361adf
MC
7336 bp->fw_tx_stats_ext_size = tx_stat_size ?
7337 le16_to_cpu(resp->tx_stat_size) / 8 : 0;
36e53349
MC
7338 } else {
7339 bp->fw_rx_stats_ext_size = 0;
7340 bp->fw_tx_stats_ext_size = 0;
7341 }
e37fed79
MC
7342 if (bp->fw_tx_stats_ext_size <=
7343 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
7344 mutex_unlock(&bp->hwrm_cmd_lock);
7345 bp->pri2cos_valid = 0;
7346 return rc;
7347 }
7348
7349 bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
7350 req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
7351
7352 rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
7353 if (!rc) {
7354 struct hwrm_queue_pri2cos_qcfg_output *resp2;
7355 u8 *pri2cos;
7356 int i, j;
7357
7358 resp2 = bp->hwrm_cmd_resp_addr;
7359 pri2cos = &resp2->pri0_cos_queue_id;
7360 for (i = 0; i < 8; i++) {
7361 u8 queue_id = pri2cos[i];
7362
7363 for (j = 0; j < bp->max_q; j++) {
7364 if (bp->q_ids[j] == queue_id)
7365 bp->pri2cos[i] = j;
7366 }
7367 }
7368 bp->pri2cos_valid = 1;
7369 }
36e53349
MC
7370 mutex_unlock(&bp->hwrm_cmd_lock);
7371 return rc;
00db3cba
VV
7372}
7373
55e4398d
VV
7374static int bnxt_hwrm_pcie_qstats(struct bnxt *bp)
7375{
7376 struct hwrm_pcie_qstats_input req = {0};
7377
7378 if (!(bp->flags & BNXT_FLAG_PCIE_STATS))
7379 return 0;
7380
7381 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
7382 req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats));
7383 req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map);
7384 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7385}
7386
c0c050c5
MC
7387static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
7388{
7389 if (bp->vxlan_port_cnt) {
7390 bnxt_hwrm_tunnel_dst_port_free(
7391 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
7392 }
7393 bp->vxlan_port_cnt = 0;
7394 if (bp->nge_port_cnt) {
7395 bnxt_hwrm_tunnel_dst_port_free(
7396 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
7397 }
7398 bp->nge_port_cnt = 0;
7399}
7400
7401static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
7402{
7403 int rc, i;
7404 u32 tpa_flags = 0;
7405
7406 if (set_tpa)
7407 tpa_flags = bp->flags & BNXT_FLAG_TPA;
7408 for (i = 0; i < bp->nr_vnics; i++) {
7409 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
7410 if (rc) {
7411 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
23e12c89 7412 i, rc);
c0c050c5
MC
7413 return rc;
7414 }
7415 }
7416 return 0;
7417}
7418
7419static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
7420{
7421 int i;
7422
7423 for (i = 0; i < bp->nr_vnics; i++)
7424 bnxt_hwrm_vnic_set_rss(bp, i, false);
7425}
7426
a46ecb11 7427static void bnxt_clear_vnic(struct bnxt *bp)
c0c050c5 7428{
a46ecb11
MC
7429 if (!bp->vnic_info)
7430 return;
7431
7432 bnxt_hwrm_clear_vnic_filter(bp);
7433 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
c0c050c5
MC
7434 /* clear all RSS setting before free vnic ctx */
7435 bnxt_hwrm_clear_vnic_rss(bp);
7436 bnxt_hwrm_vnic_ctx_free(bp);
c0c050c5 7437 }
a46ecb11
MC
7438 /* before free the vnic, undo the vnic tpa settings */
7439 if (bp->flags & BNXT_FLAG_TPA)
7440 bnxt_set_tpa(bp, false);
7441 bnxt_hwrm_vnic_free(bp);
7442 if (bp->flags & BNXT_FLAG_CHIP_P5)
7443 bnxt_hwrm_vnic_ctx_free(bp);
7444}
7445
7446static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
7447 bool irq_re_init)
7448{
7449 bnxt_clear_vnic(bp);
c0c050c5
MC
7450 bnxt_hwrm_ring_free(bp, close_path);
7451 bnxt_hwrm_ring_grp_free(bp);
7452 if (irq_re_init) {
7453 bnxt_hwrm_stat_ctx_free(bp);
7454 bnxt_hwrm_free_tunnel_ports(bp);
7455 }
7456}
7457
39d8ba2e
MC
7458static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
7459{
7460 struct hwrm_func_cfg_input req = {0};
7461 int rc;
7462
7463 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
7464 req.fid = cpu_to_le16(0xffff);
7465 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
7466 if (br_mode == BRIDGE_MODE_VEB)
7467 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
7468 else if (br_mode == BRIDGE_MODE_VEPA)
7469 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
7470 else
7471 return -EINVAL;
7472 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
39d8ba2e
MC
7473 return rc;
7474}
7475
c3480a60
MC
7476static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
7477{
7478 struct hwrm_func_cfg_input req = {0};
7479 int rc;
7480
7481 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
7482 return 0;
7483
7484 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
7485 req.fid = cpu_to_le16(0xffff);
7486 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
d4f52de0 7487 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
c3480a60 7488 if (size == 128)
d4f52de0 7489 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
c3480a60
MC
7490
7491 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
c3480a60
MC
7492 return rc;
7493}
7494
7b3af4f7 7495static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
c0c050c5 7496{
ae10ae74 7497 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
c0c050c5
MC
7498 int rc;
7499
ae10ae74
MC
7500 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
7501 goto skip_rss_ctx;
7502
c0c050c5 7503 /* allocate context for vnic */
94ce9caa 7504 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
c0c050c5
MC
7505 if (rc) {
7506 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
7507 vnic_id, rc);
7508 goto vnic_setup_err;
7509 }
7510 bp->rsscos_nr_ctxs++;
7511
94ce9caa
PS
7512 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7513 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
7514 if (rc) {
7515 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
7516 vnic_id, rc);
7517 goto vnic_setup_err;
7518 }
7519 bp->rsscos_nr_ctxs++;
7520 }
7521
ae10ae74 7522skip_rss_ctx:
c0c050c5
MC
7523 /* configure default vnic, ring grp */
7524 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
7525 if (rc) {
7526 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
7527 vnic_id, rc);
7528 goto vnic_setup_err;
7529 }
7530
7531 /* Enable RSS hashing on vnic */
7532 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
7533 if (rc) {
7534 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
7535 vnic_id, rc);
7536 goto vnic_setup_err;
7537 }
7538
7539 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7540 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
7541 if (rc) {
7542 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
7543 vnic_id, rc);
7544 }
7545 }
7546
7547vnic_setup_err:
7548 return rc;
7549}
7550
7b3af4f7
MC
7551static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
7552{
7553 int rc, i, nr_ctxs;
7554
7555 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
7556 for (i = 0; i < nr_ctxs; i++) {
7557 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
7558 if (rc) {
7559 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
7560 vnic_id, i, rc);
7561 break;
7562 }
7563 bp->rsscos_nr_ctxs++;
7564 }
7565 if (i < nr_ctxs)
7566 return -ENOMEM;
7567
7568 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
7569 if (rc) {
7570 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
7571 vnic_id, rc);
7572 return rc;
7573 }
7574 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
7575 if (rc) {
7576 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
7577 vnic_id, rc);
7578 return rc;
7579 }
7580 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7581 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
7582 if (rc) {
7583 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
7584 vnic_id, rc);
7585 }
7586 }
7587 return rc;
7588}
7589
7590static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
7591{
7592 if (bp->flags & BNXT_FLAG_CHIP_P5)
7593 return __bnxt_setup_vnic_p5(bp, vnic_id);
7594 else
7595 return __bnxt_setup_vnic(bp, vnic_id);
7596}
7597
c0c050c5
MC
7598static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
7599{
7600#ifdef CONFIG_RFS_ACCEL
7601 int i, rc = 0;
7602
9b3d15e6
MC
7603 if (bp->flags & BNXT_FLAG_CHIP_P5)
7604 return 0;
7605
c0c050c5 7606 for (i = 0; i < bp->rx_nr_rings; i++) {
ae10ae74 7607 struct bnxt_vnic_info *vnic;
c0c050c5
MC
7608 u16 vnic_id = i + 1;
7609 u16 ring_id = i;
7610
7611 if (vnic_id >= bp->nr_vnics)
7612 break;
7613
ae10ae74
MC
7614 vnic = &bp->vnic_info[vnic_id];
7615 vnic->flags |= BNXT_VNIC_RFS_FLAG;
7616 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
7617 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
b81a90d3 7618 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
c0c050c5
MC
7619 if (rc) {
7620 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
7621 vnic_id, rc);
7622 break;
7623 }
7624 rc = bnxt_setup_vnic(bp, vnic_id);
7625 if (rc)
7626 break;
7627 }
7628 return rc;
7629#else
7630 return 0;
7631#endif
7632}
7633
17c71ac3
MC
7634/* Allow PF and VF with default VLAN to be in promiscuous mode */
7635static bool bnxt_promisc_ok(struct bnxt *bp)
7636{
7637#ifdef CONFIG_BNXT_SRIOV
7638 if (BNXT_VF(bp) && !bp->vf.vlan)
7639 return false;
7640#endif
7641 return true;
7642}
7643
dc52c6c7
PS
7644static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
7645{
7646 unsigned int rc = 0;
7647
7648 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
7649 if (rc) {
7650 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7651 rc);
7652 return rc;
7653 }
7654
7655 rc = bnxt_hwrm_vnic_cfg(bp, 1);
7656 if (rc) {
7657 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7658 rc);
7659 return rc;
7660 }
7661 return rc;
7662}
7663
b664f008 7664static int bnxt_cfg_rx_mode(struct bnxt *);
7d2837dd 7665static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
b664f008 7666
c0c050c5
MC
7667static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
7668{
7d2837dd 7669 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
c0c050c5 7670 int rc = 0;
76595193 7671 unsigned int rx_nr_rings = bp->rx_nr_rings;
c0c050c5
MC
7672
7673 if (irq_re_init) {
7674 rc = bnxt_hwrm_stat_ctx_alloc(bp);
7675 if (rc) {
7676 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
7677 rc);
7678 goto err_out;
7679 }
7680 }
7681
7682 rc = bnxt_hwrm_ring_alloc(bp);
7683 if (rc) {
7684 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
7685 goto err_out;
7686 }
7687
7688 rc = bnxt_hwrm_ring_grp_alloc(bp);
7689 if (rc) {
7690 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
7691 goto err_out;
7692 }
7693
76595193
PS
7694 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7695 rx_nr_rings--;
7696
c0c050c5 7697 /* default vnic 0 */
76595193 7698 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
c0c050c5
MC
7699 if (rc) {
7700 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
7701 goto err_out;
7702 }
7703
7704 rc = bnxt_setup_vnic(bp, 0);
7705 if (rc)
7706 goto err_out;
7707
7708 if (bp->flags & BNXT_FLAG_RFS) {
7709 rc = bnxt_alloc_rfs_vnics(bp);
7710 if (rc)
7711 goto err_out;
7712 }
7713
7714 if (bp->flags & BNXT_FLAG_TPA) {
7715 rc = bnxt_set_tpa(bp, true);
7716 if (rc)
7717 goto err_out;
7718 }
7719
7720 if (BNXT_VF(bp))
7721 bnxt_update_vf_mac(bp);
7722
7723 /* Filter for default vnic 0 */
7724 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
7725 if (rc) {
7726 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
7727 goto err_out;
7728 }
7d2837dd 7729 vnic->uc_filter_count = 1;
c0c050c5 7730
30e33848
MC
7731 vnic->rx_mask = 0;
7732 if (bp->dev->flags & IFF_BROADCAST)
7733 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
c0c050c5 7734
17c71ac3 7735 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
7d2837dd
MC
7736 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
7737
7738 if (bp->dev->flags & IFF_ALLMULTI) {
7739 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
7740 vnic->mc_list_count = 0;
7741 } else {
7742 u32 mask = 0;
7743
7744 bnxt_mc_list_updated(bp, &mask);
7745 vnic->rx_mask |= mask;
7746 }
c0c050c5 7747
b664f008
MC
7748 rc = bnxt_cfg_rx_mode(bp);
7749 if (rc)
c0c050c5 7750 goto err_out;
c0c050c5
MC
7751
7752 rc = bnxt_hwrm_set_coal(bp);
7753 if (rc)
7754 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
dc52c6c7
PS
7755 rc);
7756
7757 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7758 rc = bnxt_setup_nitroa0_vnic(bp);
7759 if (rc)
7760 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
7761 rc);
7762 }
c0c050c5 7763
cf6645f8
MC
7764 if (BNXT_VF(bp)) {
7765 bnxt_hwrm_func_qcfg(bp);
7766 netdev_update_features(bp->dev);
7767 }
7768
c0c050c5
MC
7769 return 0;
7770
7771err_out:
7772 bnxt_hwrm_resource_free(bp, 0, true);
7773
7774 return rc;
7775}
7776
7777static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
7778{
7779 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
7780 return 0;
7781}
7782
7783static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
7784{
2247925f 7785 bnxt_init_cp_rings(bp);
c0c050c5
MC
7786 bnxt_init_rx_rings(bp);
7787 bnxt_init_tx_rings(bp);
7788 bnxt_init_ring_grps(bp, irq_re_init);
7789 bnxt_init_vnics(bp);
7790
7791 return bnxt_init_chip(bp, irq_re_init);
7792}
7793
c0c050c5
MC
7794static int bnxt_set_real_num_queues(struct bnxt *bp)
7795{
7796 int rc;
7797 struct net_device *dev = bp->dev;
7798
5f449249
MC
7799 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
7800 bp->tx_nr_rings_xdp);
c0c050c5
MC
7801 if (rc)
7802 return rc;
7803
7804 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
7805 if (rc)
7806 return rc;
7807
7808#ifdef CONFIG_RFS_ACCEL
45019a18 7809 if (bp->flags & BNXT_FLAG_RFS)
c0c050c5 7810 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
c0c050c5
MC
7811#endif
7812
7813 return rc;
7814}
7815
6e6c5a57
MC
7816static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7817 bool shared)
7818{
7819 int _rx = *rx, _tx = *tx;
7820
7821 if (shared) {
7822 *rx = min_t(int, _rx, max);
7823 *tx = min_t(int, _tx, max);
7824 } else {
7825 if (max < 2)
7826 return -ENOMEM;
7827
7828 while (_rx + _tx > max) {
7829 if (_rx > _tx && _rx > 1)
7830 _rx--;
7831 else if (_tx > 1)
7832 _tx--;
7833 }
7834 *rx = _rx;
7835 *tx = _tx;
7836 }
7837 return 0;
7838}
7839
7809592d
MC
7840static void bnxt_setup_msix(struct bnxt *bp)
7841{
7842 const int len = sizeof(bp->irq_tbl[0].name);
7843 struct net_device *dev = bp->dev;
7844 int tcs, i;
7845
7846 tcs = netdev_get_num_tc(dev);
7847 if (tcs > 1) {
d1e7925e 7848 int i, off, count;
7809592d 7849
d1e7925e
MC
7850 for (i = 0; i < tcs; i++) {
7851 count = bp->tx_nr_rings_per_tc;
7852 off = i * count;
7853 netdev_set_tc_queue(dev, i, count, off);
7809592d
MC
7854 }
7855 }
7856
7857 for (i = 0; i < bp->cp_nr_rings; i++) {
e5811b8c 7858 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7809592d
MC
7859 char *attr;
7860
7861 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7862 attr = "TxRx";
7863 else if (i < bp->rx_nr_rings)
7864 attr = "rx";
7865 else
7866 attr = "tx";
7867
e5811b8c
MC
7868 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
7869 attr, i);
7870 bp->irq_tbl[map_idx].handler = bnxt_msix;
7809592d
MC
7871 }
7872}
7873
7874static void bnxt_setup_inta(struct bnxt *bp)
7875{
7876 const int len = sizeof(bp->irq_tbl[0].name);
7877
7878 if (netdev_get_num_tc(bp->dev))
7879 netdev_reset_tc(bp->dev);
7880
7881 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
7882 0);
7883 bp->irq_tbl[0].handler = bnxt_inta;
7884}
7885
7886static int bnxt_setup_int_mode(struct bnxt *bp)
7887{
7888 int rc;
7889
7890 if (bp->flags & BNXT_FLAG_USING_MSIX)
7891 bnxt_setup_msix(bp);
7892 else
7893 bnxt_setup_inta(bp);
7894
7895 rc = bnxt_set_real_num_queues(bp);
7896 return rc;
7897}
7898
b7429954 7899#ifdef CONFIG_RFS_ACCEL
8079e8f1
MC
7900static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
7901{
6a4f2947 7902 return bp->hw_resc.max_rsscos_ctxs;
8079e8f1
MC
7903}
7904
7905static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
7906{
6a4f2947 7907 return bp->hw_resc.max_vnics;
8079e8f1 7908}
b7429954 7909#endif
8079e8f1 7910
e4060d30
MC
7911unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
7912{
6a4f2947 7913 return bp->hw_resc.max_stat_ctxs;
e4060d30
MC
7914}
7915
7916unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
7917{
6a4f2947 7918 return bp->hw_resc.max_cp_rings;
e4060d30
MC
7919}
7920
e916b081 7921static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
a588e458 7922{
c0b8cda0
MC
7923 unsigned int cp = bp->hw_resc.max_cp_rings;
7924
7925 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
7926 cp -= bnxt_get_ulp_msix_num(bp);
7927
7928 return cp;
a588e458
MC
7929}
7930
ad95c27b 7931static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
7809592d 7932{
6a4f2947
MC
7933 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7934
f7588cd8
MC
7935 if (bp->flags & BNXT_FLAG_CHIP_P5)
7936 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
7937
6a4f2947 7938 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
7809592d
MC
7939}
7940
30f52947 7941static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
33c2657e 7942{
6a4f2947 7943 bp->hw_resc.max_irqs = max_irqs;
33c2657e
MC
7944}
7945
e916b081
MC
7946unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
7947{
7948 unsigned int cp;
7949
7950 cp = bnxt_get_max_func_cp_rings_for_en(bp);
7951 if (bp->flags & BNXT_FLAG_CHIP_P5)
7952 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
7953 else
7954 return cp - bp->cp_nr_rings;
7955}
7956
c027c6b4
VV
7957unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
7958{
d77b1ad8 7959 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
c027c6b4
VV
7960}
7961
fbcfc8e4
MC
7962int bnxt_get_avail_msix(struct bnxt *bp, int num)
7963{
7964 int max_cp = bnxt_get_max_func_cp_rings(bp);
7965 int max_irq = bnxt_get_max_func_irqs(bp);
7966 int total_req = bp->cp_nr_rings + num;
7967 int max_idx, avail_msix;
7968
75720e63
MC
7969 max_idx = bp->total_irqs;
7970 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
7971 max_idx = min_t(int, bp->total_irqs, max_cp);
fbcfc8e4 7972 avail_msix = max_idx - bp->cp_nr_rings;
f1ca94de 7973 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
fbcfc8e4
MC
7974 return avail_msix;
7975
7976 if (max_irq < total_req) {
7977 num = max_irq - bp->cp_nr_rings;
7978 if (num <= 0)
7979 return 0;
7980 }
7981 return num;
7982}
7983
08654eb2
MC
7984static int bnxt_get_num_msix(struct bnxt *bp)
7985{
f1ca94de 7986 if (!BNXT_NEW_RM(bp))
08654eb2
MC
7987 return bnxt_get_max_func_irqs(bp);
7988
c0b8cda0 7989 return bnxt_nq_rings_in_use(bp);
08654eb2
MC
7990}
7991
7809592d 7992static int bnxt_init_msix(struct bnxt *bp)
c0c050c5 7993{
fbcfc8e4 7994 int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
7809592d 7995 struct msix_entry *msix_ent;
c0c050c5 7996
08654eb2
MC
7997 total_vecs = bnxt_get_num_msix(bp);
7998 max = bnxt_get_max_func_irqs(bp);
7999 if (total_vecs > max)
8000 total_vecs = max;
8001
2773dfb2
MC
8002 if (!total_vecs)
8003 return 0;
8004
c0c050c5
MC
8005 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8006 if (!msix_ent)
8007 return -ENOMEM;
8008
8009 for (i = 0; i < total_vecs; i++) {
8010 msix_ent[i].entry = i;
8011 msix_ent[i].vector = 0;
8012 }
8013
01657bcd
MC
8014 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8015 min = 2;
8016
8017 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
fbcfc8e4
MC
8018 ulp_msix = bnxt_get_ulp_msix_num(bp);
8019 if (total_vecs < 0 || total_vecs < ulp_msix) {
c0c050c5
MC
8020 rc = -ENODEV;
8021 goto msix_setup_exit;
8022 }
8023
8024 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8025 if (bp->irq_tbl) {
7809592d
MC
8026 for (i = 0; i < total_vecs; i++)
8027 bp->irq_tbl[i].vector = msix_ent[i].vector;
c0c050c5 8028
7809592d 8029 bp->total_irqs = total_vecs;
c0c050c5 8030 /* Trim rings based upon num of vectors allocated */
6e6c5a57 8031 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
fbcfc8e4 8032 total_vecs - ulp_msix, min == 1);
6e6c5a57
MC
8033 if (rc)
8034 goto msix_setup_exit;
8035
7809592d
MC
8036 bp->cp_nr_rings = (min == 1) ?
8037 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8038 bp->tx_nr_rings + bp->rx_nr_rings;
c0c050c5 8039
c0c050c5
MC
8040 } else {
8041 rc = -ENOMEM;
8042 goto msix_setup_exit;
8043 }
8044 bp->flags |= BNXT_FLAG_USING_MSIX;
8045 kfree(msix_ent);
8046 return 0;
8047
8048msix_setup_exit:
7809592d
MC
8049 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8050 kfree(bp->irq_tbl);
8051 bp->irq_tbl = NULL;
c0c050c5
MC
8052 pci_disable_msix(bp->pdev);
8053 kfree(msix_ent);
8054 return rc;
8055}
8056
7809592d 8057static int bnxt_init_inta(struct bnxt *bp)
c0c050c5 8058{
c0c050c5 8059 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
7809592d
MC
8060 if (!bp->irq_tbl)
8061 return -ENOMEM;
8062
8063 bp->total_irqs = 1;
c0c050c5
MC
8064 bp->rx_nr_rings = 1;
8065 bp->tx_nr_rings = 1;
8066 bp->cp_nr_rings = 1;
01657bcd 8067 bp->flags |= BNXT_FLAG_SHARED_RINGS;
c0c050c5 8068 bp->irq_tbl[0].vector = bp->pdev->irq;
7809592d 8069 return 0;
c0c050c5
MC
8070}
8071
7809592d 8072static int bnxt_init_int_mode(struct bnxt *bp)
c0c050c5
MC
8073{
8074 int rc = 0;
8075
8076 if (bp->flags & BNXT_FLAG_MSIX_CAP)
7809592d 8077 rc = bnxt_init_msix(bp);
c0c050c5 8078
1fa72e29 8079 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
c0c050c5 8080 /* fallback to INTA */
7809592d 8081 rc = bnxt_init_inta(bp);
c0c050c5
MC
8082 }
8083 return rc;
8084}
8085
7809592d
MC
8086static void bnxt_clear_int_mode(struct bnxt *bp)
8087{
8088 if (bp->flags & BNXT_FLAG_USING_MSIX)
8089 pci_disable_msix(bp->pdev);
8090
8091 kfree(bp->irq_tbl);
8092 bp->irq_tbl = NULL;
8093 bp->flags &= ~BNXT_FLAG_USING_MSIX;
8094}
8095
1b3f0b75 8096int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
674f50a5 8097{
674f50a5 8098 int tcs = netdev_get_num_tc(bp->dev);
1b3f0b75 8099 bool irq_cleared = false;
674f50a5
MC
8100 int rc;
8101
8102 if (!bnxt_need_reserve_rings(bp))
8103 return 0;
8104
1b3f0b75
MC
8105 if (irq_re_init && BNXT_NEW_RM(bp) &&
8106 bnxt_get_num_msix(bp) != bp->total_irqs) {
ec86f14e 8107 bnxt_ulp_irq_stop(bp);
674f50a5 8108 bnxt_clear_int_mode(bp);
1b3f0b75 8109 irq_cleared = true;
36d65be9
MC
8110 }
8111 rc = __bnxt_reserve_rings(bp);
1b3f0b75 8112 if (irq_cleared) {
36d65be9
MC
8113 if (!rc)
8114 rc = bnxt_init_int_mode(bp);
ec86f14e 8115 bnxt_ulp_irq_restart(bp, rc);
36d65be9
MC
8116 }
8117 if (rc) {
8118 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
8119 return rc;
674f50a5
MC
8120 }
8121 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
8122 netdev_err(bp->dev, "tx ring reservation failure\n");
8123 netdev_reset_tc(bp->dev);
8124 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8125 return -ENOMEM;
8126 }
674f50a5
MC
8127 return 0;
8128}
8129
c0c050c5
MC
8130static void bnxt_free_irq(struct bnxt *bp)
8131{
8132 struct bnxt_irq *irq;
8133 int i;
8134
8135#ifdef CONFIG_RFS_ACCEL
8136 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
8137 bp->dev->rx_cpu_rmap = NULL;
8138#endif
cb98526b 8139 if (!bp->irq_tbl || !bp->bnapi)
c0c050c5
MC
8140 return;
8141
8142 for (i = 0; i < bp->cp_nr_rings; i++) {
e5811b8c
MC
8143 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8144
8145 irq = &bp->irq_tbl[map_idx];
56f0fd80
VV
8146 if (irq->requested) {
8147 if (irq->have_cpumask) {
8148 irq_set_affinity_hint(irq->vector, NULL);
8149 free_cpumask_var(irq->cpu_mask);
8150 irq->have_cpumask = 0;
8151 }
c0c050c5 8152 free_irq(irq->vector, bp->bnapi[i]);
56f0fd80
VV
8153 }
8154
c0c050c5
MC
8155 irq->requested = 0;
8156 }
c0c050c5
MC
8157}
8158
8159static int bnxt_request_irq(struct bnxt *bp)
8160{
b81a90d3 8161 int i, j, rc = 0;
c0c050c5
MC
8162 unsigned long flags = 0;
8163#ifdef CONFIG_RFS_ACCEL
e5811b8c 8164 struct cpu_rmap *rmap;
c0c050c5
MC
8165#endif
8166
e5811b8c
MC
8167 rc = bnxt_setup_int_mode(bp);
8168 if (rc) {
8169 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
8170 rc);
8171 return rc;
8172 }
8173#ifdef CONFIG_RFS_ACCEL
8174 rmap = bp->dev->rx_cpu_rmap;
8175#endif
c0c050c5
MC
8176 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
8177 flags = IRQF_SHARED;
8178
b81a90d3 8179 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
e5811b8c
MC
8180 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8181 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
8182
c0c050c5 8183#ifdef CONFIG_RFS_ACCEL
b81a90d3 8184 if (rmap && bp->bnapi[i]->rx_ring) {
c0c050c5
MC
8185 rc = irq_cpu_rmap_add(rmap, irq->vector);
8186 if (rc)
8187 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
b81a90d3
MC
8188 j);
8189 j++;
c0c050c5
MC
8190 }
8191#endif
8192 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
8193 bp->bnapi[i]);
8194 if (rc)
8195 break;
8196
8197 irq->requested = 1;
56f0fd80
VV
8198
8199 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
8200 int numa_node = dev_to_node(&bp->pdev->dev);
8201
8202 irq->have_cpumask = 1;
8203 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
8204 irq->cpu_mask);
8205 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
8206 if (rc) {
8207 netdev_warn(bp->dev,
8208 "Set affinity failed, IRQ = %d\n",
8209 irq->vector);
8210 break;
8211 }
8212 }
c0c050c5
MC
8213 }
8214 return rc;
8215}
8216
8217static void bnxt_del_napi(struct bnxt *bp)
8218{
8219 int i;
8220
8221 if (!bp->bnapi)
8222 return;
8223
8224 for (i = 0; i < bp->cp_nr_rings; i++) {
8225 struct bnxt_napi *bnapi = bp->bnapi[i];
8226
8227 napi_hash_del(&bnapi->napi);
8228 netif_napi_del(&bnapi->napi);
8229 }
e5f6f564
ED
8230 /* We called napi_hash_del() before netif_napi_del(), we need
8231 * to respect an RCU grace period before freeing napi structures.
8232 */
8233 synchronize_net();
c0c050c5
MC
8234}
8235
8236static void bnxt_init_napi(struct bnxt *bp)
8237{
8238 int i;
10bbdaf5 8239 unsigned int cp_nr_rings = bp->cp_nr_rings;
c0c050c5
MC
8240 struct bnxt_napi *bnapi;
8241
8242 if (bp->flags & BNXT_FLAG_USING_MSIX) {
0fcec985
MC
8243 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
8244
8245 if (bp->flags & BNXT_FLAG_CHIP_P5)
8246 poll_fn = bnxt_poll_p5;
8247 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
10bbdaf5
PS
8248 cp_nr_rings--;
8249 for (i = 0; i < cp_nr_rings; i++) {
c0c050c5 8250 bnapi = bp->bnapi[i];
0fcec985 8251 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
c0c050c5 8252 }
10bbdaf5
PS
8253 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8254 bnapi = bp->bnapi[cp_nr_rings];
8255 netif_napi_add(bp->dev, &bnapi->napi,
8256 bnxt_poll_nitroa0, 64);
10bbdaf5 8257 }
c0c050c5
MC
8258 } else {
8259 bnapi = bp->bnapi[0];
8260 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
c0c050c5
MC
8261 }
8262}
8263
8264static void bnxt_disable_napi(struct bnxt *bp)
8265{
8266 int i;
8267
8268 if (!bp->bnapi)
8269 return;
8270
0bc0b97f
AG
8271 for (i = 0; i < bp->cp_nr_rings; i++) {
8272 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
8273
8274 if (bp->bnapi[i]->rx_ring)
8275 cancel_work_sync(&cpr->dim.work);
8276
c0c050c5 8277 napi_disable(&bp->bnapi[i]->napi);
0bc0b97f 8278 }
c0c050c5
MC
8279}
8280
8281static void bnxt_enable_napi(struct bnxt *bp)
8282{
8283 int i;
8284
8285 for (i = 0; i < bp->cp_nr_rings; i++) {
6a8788f2 8286 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
fa7e2812 8287 bp->bnapi[i]->in_reset = false;
6a8788f2
AG
8288
8289 if (bp->bnapi[i]->rx_ring) {
8290 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
c002bd52 8291 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6a8788f2 8292 }
c0c050c5
MC
8293 napi_enable(&bp->bnapi[i]->napi);
8294 }
8295}
8296
7df4ae9f 8297void bnxt_tx_disable(struct bnxt *bp)
c0c050c5
MC
8298{
8299 int i;
c0c050c5 8300 struct bnxt_tx_ring_info *txr;
c0c050c5 8301
b6ab4b01 8302 if (bp->tx_ring) {
c0c050c5 8303 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 8304 txr = &bp->tx_ring[i];
c0c050c5 8305 txr->dev_state = BNXT_DEV_STATE_CLOSING;
c0c050c5
MC
8306 }
8307 }
8308 /* Stop all TX queues */
8309 netif_tx_disable(bp->dev);
8310 netif_carrier_off(bp->dev);
8311}
8312
7df4ae9f 8313void bnxt_tx_enable(struct bnxt *bp)
c0c050c5
MC
8314{
8315 int i;
c0c050c5 8316 struct bnxt_tx_ring_info *txr;
c0c050c5
MC
8317
8318 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 8319 txr = &bp->tx_ring[i];
c0c050c5
MC
8320 txr->dev_state = 0;
8321 }
8322 netif_tx_wake_all_queues(bp->dev);
8323 if (bp->link_info.link_up)
8324 netif_carrier_on(bp->dev);
8325}
8326
8327static void bnxt_report_link(struct bnxt *bp)
8328{
8329 if (bp->link_info.link_up) {
8330 const char *duplex;
8331 const char *flow_ctrl;
38a21b34
DK
8332 u32 speed;
8333 u16 fec;
c0c050c5
MC
8334
8335 netif_carrier_on(bp->dev);
8336 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
8337 duplex = "full";
8338 else
8339 duplex = "half";
8340 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
8341 flow_ctrl = "ON - receive & transmit";
8342 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
8343 flow_ctrl = "ON - transmit";
8344 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
8345 flow_ctrl = "ON - receive";
8346 else
8347 flow_ctrl = "none";
8348 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
38a21b34 8349 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
c0c050c5 8350 speed, duplex, flow_ctrl);
170ce013
MC
8351 if (bp->flags & BNXT_FLAG_EEE_CAP)
8352 netdev_info(bp->dev, "EEE is %s\n",
8353 bp->eee.eee_active ? "active" :
8354 "not active");
e70c752f
MC
8355 fec = bp->link_info.fec_cfg;
8356 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
8357 netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
8358 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
8359 (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
8360 (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
c0c050c5
MC
8361 } else {
8362 netif_carrier_off(bp->dev);
8363 netdev_err(bp->dev, "NIC Link is Down\n");
8364 }
8365}
8366
170ce013
MC
8367static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
8368{
8369 int rc = 0;
8370 struct hwrm_port_phy_qcaps_input req = {0};
8371 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
93ed8117 8372 struct bnxt_link_info *link_info = &bp->link_info;
170ce013 8373
ba642ab7
MC
8374 bp->flags &= ~BNXT_FLAG_EEE_CAP;
8375 if (bp->test_info)
8376 bp->test_info->flags &= ~BNXT_TEST_FL_EXT_LPBK;
170ce013
MC
8377 if (bp->hwrm_spec_code < 0x10201)
8378 return 0;
8379
8380 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
8381
8382 mutex_lock(&bp->hwrm_cmd_lock);
8383 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8384 if (rc)
8385 goto hwrm_phy_qcaps_exit;
8386
acb20054 8387 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
170ce013
MC
8388 struct ethtool_eee *eee = &bp->eee;
8389 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
8390
8391 bp->flags |= BNXT_FLAG_EEE_CAP;
8392 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8393 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
8394 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
8395 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
8396 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
8397 }
55fd0cf3
MC
8398 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
8399 if (bp->test_info)
8400 bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
8401 }
520ad89a
MC
8402 if (resp->supported_speeds_auto_mode)
8403 link_info->support_auto_speeds =
8404 le16_to_cpu(resp->supported_speeds_auto_mode);
170ce013 8405
d5430d31
MC
8406 bp->port_count = resp->port_cnt;
8407
170ce013
MC
8408hwrm_phy_qcaps_exit:
8409 mutex_unlock(&bp->hwrm_cmd_lock);
8410 return rc;
8411}
8412
c0c050c5
MC
8413static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
8414{
8415 int rc = 0;
8416 struct bnxt_link_info *link_info = &bp->link_info;
8417 struct hwrm_port_phy_qcfg_input req = {0};
8418 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8419 u8 link_up = link_info->link_up;
286ef9d6 8420 u16 diff;
c0c050c5
MC
8421
8422 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
8423
8424 mutex_lock(&bp->hwrm_cmd_lock);
8425 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8426 if (rc) {
8427 mutex_unlock(&bp->hwrm_cmd_lock);
8428 return rc;
8429 }
8430
8431 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
8432 link_info->phy_link_status = resp->link;
acb20054
MC
8433 link_info->duplex = resp->duplex_cfg;
8434 if (bp->hwrm_spec_code >= 0x10800)
8435 link_info->duplex = resp->duplex_state;
c0c050c5
MC
8436 link_info->pause = resp->pause;
8437 link_info->auto_mode = resp->auto_mode;
8438 link_info->auto_pause_setting = resp->auto_pause;
3277360e 8439 link_info->lp_pause = resp->link_partner_adv_pause;
c0c050c5 8440 link_info->force_pause_setting = resp->force_pause;
acb20054 8441 link_info->duplex_setting = resp->duplex_cfg;
c0c050c5
MC
8442 if (link_info->phy_link_status == BNXT_LINK_LINK)
8443 link_info->link_speed = le16_to_cpu(resp->link_speed);
8444 else
8445 link_info->link_speed = 0;
8446 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
c0c050c5
MC
8447 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
8448 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
3277360e
MC
8449 link_info->lp_auto_link_speeds =
8450 le16_to_cpu(resp->link_partner_adv_speeds);
c0c050c5
MC
8451 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
8452 link_info->phy_ver[0] = resp->phy_maj;
8453 link_info->phy_ver[1] = resp->phy_min;
8454 link_info->phy_ver[2] = resp->phy_bld;
8455 link_info->media_type = resp->media_type;
03efbec0 8456 link_info->phy_type = resp->phy_type;
11f15ed3 8457 link_info->transceiver = resp->xcvr_pkg_type;
170ce013
MC
8458 link_info->phy_addr = resp->eee_config_phy_addr &
8459 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
42ee18fe 8460 link_info->module_status = resp->module_status;
170ce013
MC
8461
8462 if (bp->flags & BNXT_FLAG_EEE_CAP) {
8463 struct ethtool_eee *eee = &bp->eee;
8464 u16 fw_speeds;
8465
8466 eee->eee_active = 0;
8467 if (resp->eee_config_phy_addr &
8468 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
8469 eee->eee_active = 1;
8470 fw_speeds = le16_to_cpu(
8471 resp->link_partner_adv_eee_link_speed_mask);
8472 eee->lp_advertised =
8473 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8474 }
8475
8476 /* Pull initial EEE config */
8477 if (!chng_link_state) {
8478 if (resp->eee_config_phy_addr &
8479 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
8480 eee->eee_enabled = 1;
c0c050c5 8481
170ce013
MC
8482 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
8483 eee->advertised =
8484 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8485
8486 if (resp->eee_config_phy_addr &
8487 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
8488 __le32 tmr;
8489
8490 eee->tx_lpi_enabled = 1;
8491 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
8492 eee->tx_lpi_timer = le32_to_cpu(tmr) &
8493 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
8494 }
8495 }
8496 }
e70c752f
MC
8497
8498 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
8499 if (bp->hwrm_spec_code >= 0x10504)
8500 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
8501
c0c050c5
MC
8502 /* TODO: need to add more logic to report VF link */
8503 if (chng_link_state) {
8504 if (link_info->phy_link_status == BNXT_LINK_LINK)
8505 link_info->link_up = 1;
8506 else
8507 link_info->link_up = 0;
8508 if (link_up != link_info->link_up)
8509 bnxt_report_link(bp);
8510 } else {
8511 /* alwasy link down if not require to update link state */
8512 link_info->link_up = 0;
8513 }
8514 mutex_unlock(&bp->hwrm_cmd_lock);
286ef9d6 8515
dac04907
MC
8516 if (!BNXT_SINGLE_PF(bp))
8517 return 0;
8518
286ef9d6
MC
8519 diff = link_info->support_auto_speeds ^ link_info->advertising;
8520 if ((link_info->support_auto_speeds | diff) !=
8521 link_info->support_auto_speeds) {
8522 /* An advertised speed is no longer supported, so we need to
0eaa24b9
MC
8523 * update the advertisement settings. Caller holds RTNL
8524 * so we can modify link settings.
286ef9d6 8525 */
286ef9d6 8526 link_info->advertising = link_info->support_auto_speeds;
0eaa24b9 8527 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
286ef9d6 8528 bnxt_hwrm_set_link_setting(bp, true, false);
286ef9d6 8529 }
c0c050c5
MC
8530 return 0;
8531}
8532
10289bec
MC
8533static void bnxt_get_port_module_status(struct bnxt *bp)
8534{
8535 struct bnxt_link_info *link_info = &bp->link_info;
8536 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
8537 u8 module_status;
8538
8539 if (bnxt_update_link(bp, true))
8540 return;
8541
8542 module_status = link_info->module_status;
8543 switch (module_status) {
8544 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
8545 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
8546 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
8547 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
8548 bp->pf.port_id);
8549 if (bp->hwrm_spec_code >= 0x10201) {
8550 netdev_warn(bp->dev, "Module part number %s\n",
8551 resp->phy_vendor_partnumber);
8552 }
8553 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
8554 netdev_warn(bp->dev, "TX is disabled\n");
8555 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
8556 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
8557 }
8558}
8559
c0c050c5
MC
8560static void
8561bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
8562{
8563 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
c9ee9516
MC
8564 if (bp->hwrm_spec_code >= 0x10201)
8565 req->auto_pause =
8566 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
c0c050c5
MC
8567 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
8568 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
8569 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
49b5c7a1 8570 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
c0c050c5
MC
8571 req->enables |=
8572 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
8573 } else {
8574 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
8575 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
8576 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
8577 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
8578 req->enables |=
8579 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
c9ee9516
MC
8580 if (bp->hwrm_spec_code >= 0x10201) {
8581 req->auto_pause = req->force_pause;
8582 req->enables |= cpu_to_le32(
8583 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
8584 }
c0c050c5
MC
8585 }
8586}
8587
8588static void bnxt_hwrm_set_link_common(struct bnxt *bp,
8589 struct hwrm_port_phy_cfg_input *req)
8590{
8591 u8 autoneg = bp->link_info.autoneg;
8592 u16 fw_link_speed = bp->link_info.req_link_speed;
68515a18 8593 u16 advertising = bp->link_info.advertising;
c0c050c5
MC
8594
8595 if (autoneg & BNXT_AUTONEG_SPEED) {
8596 req->auto_mode |=
11f15ed3 8597 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
c0c050c5
MC
8598
8599 req->enables |= cpu_to_le32(
8600 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
8601 req->auto_link_speed_mask = cpu_to_le16(advertising);
8602
8603 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
8604 req->flags |=
8605 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
8606 } else {
8607 req->force_link_speed = cpu_to_le16(fw_link_speed);
8608 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
8609 }
8610
c0c050c5
MC
8611 /* tell chimp that the setting takes effect immediately */
8612 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
8613}
8614
8615int bnxt_hwrm_set_pause(struct bnxt *bp)
8616{
8617 struct hwrm_port_phy_cfg_input req = {0};
8618 int rc;
8619
8620 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8621 bnxt_hwrm_set_pause_common(bp, &req);
8622
8623 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
8624 bp->link_info.force_link_chng)
8625 bnxt_hwrm_set_link_common(bp, &req);
8626
8627 mutex_lock(&bp->hwrm_cmd_lock);
8628 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8629 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
8630 /* since changing of pause setting doesn't trigger any link
8631 * change event, the driver needs to update the current pause
8632 * result upon successfully return of the phy_cfg command
8633 */
8634 bp->link_info.pause =
8635 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
8636 bp->link_info.auto_pause_setting = 0;
8637 if (!bp->link_info.force_link_chng)
8638 bnxt_report_link(bp);
8639 }
8640 bp->link_info.force_link_chng = false;
8641 mutex_unlock(&bp->hwrm_cmd_lock);
8642 return rc;
8643}
8644
939f7f0c
MC
8645static void bnxt_hwrm_set_eee(struct bnxt *bp,
8646 struct hwrm_port_phy_cfg_input *req)
8647{
8648 struct ethtool_eee *eee = &bp->eee;
8649
8650 if (eee->eee_enabled) {
8651 u16 eee_speeds;
8652 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
8653
8654 if (eee->tx_lpi_enabled)
8655 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
8656 else
8657 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
8658
8659 req->flags |= cpu_to_le32(flags);
8660 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
8661 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
8662 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
8663 } else {
8664 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
8665 }
8666}
8667
8668int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
c0c050c5
MC
8669{
8670 struct hwrm_port_phy_cfg_input req = {0};
8671
8672 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8673 if (set_pause)
8674 bnxt_hwrm_set_pause_common(bp, &req);
8675
8676 bnxt_hwrm_set_link_common(bp, &req);
939f7f0c
MC
8677
8678 if (set_eee)
8679 bnxt_hwrm_set_eee(bp, &req);
c0c050c5
MC
8680 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8681}
8682
33f7d55f
MC
8683static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
8684{
8685 struct hwrm_port_phy_cfg_input req = {0};
8686
567b2abe 8687 if (!BNXT_SINGLE_PF(bp))
33f7d55f
MC
8688 return 0;
8689
8690 if (pci_num_vf(bp->pdev))
8691 return 0;
8692
8693 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
16d663a6 8694 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
33f7d55f
MC
8695 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8696}
8697
ec5d31e3
MC
8698static int bnxt_fw_init_one(struct bnxt *bp);
8699
25e1acd6
MC
8700static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
8701{
8702 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
8703 struct hwrm_func_drv_if_change_input req = {0};
ec5d31e3
MC
8704 bool resc_reinit = false, fw_reset = false;
8705 u32 flags = 0;
25e1acd6
MC
8706 int rc;
8707
8708 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
8709 return 0;
8710
8711 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
8712 if (up)
8713 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
8714 mutex_lock(&bp->hwrm_cmd_lock);
8715 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
ec5d31e3
MC
8716 if (!rc)
8717 flags = le32_to_cpu(resp->flags);
25e1acd6 8718 mutex_unlock(&bp->hwrm_cmd_lock);
ec5d31e3
MC
8719 if (rc)
8720 return rc;
25e1acd6 8721
ec5d31e3
MC
8722 if (!up)
8723 return 0;
25e1acd6 8724
ec5d31e3
MC
8725 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
8726 resc_reinit = true;
8727 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
8728 fw_reset = true;
8729
3bc7d4a3
MC
8730 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
8731 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
8732 return -ENODEV;
8733 }
ec5d31e3
MC
8734 if (resc_reinit || fw_reset) {
8735 if (fw_reset) {
8736 rc = bnxt_fw_init_one(bp);
8737 if (rc) {
8738 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
8739 return rc;
8740 }
8741 bnxt_clear_int_mode(bp);
8742 rc = bnxt_init_int_mode(bp);
8743 if (rc) {
8744 netdev_err(bp->dev, "init int mode failed\n");
8745 return rc;
8746 }
8747 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
8748 }
8749 if (BNXT_NEW_RM(bp)) {
8750 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8751
8752 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
8753 hw_resc->resv_cp_rings = 0;
8754 hw_resc->resv_stat_ctxs = 0;
8755 hw_resc->resv_irqs = 0;
8756 hw_resc->resv_tx_rings = 0;
8757 hw_resc->resv_rx_rings = 0;
8758 hw_resc->resv_hw_ring_grps = 0;
8759 hw_resc->resv_vnics = 0;
8760 if (!fw_reset) {
8761 bp->tx_nr_rings = 0;
8762 bp->rx_nr_rings = 0;
8763 }
8764 }
25e1acd6 8765 }
ec5d31e3 8766 return 0;
25e1acd6
MC
8767}
8768
5ad2cbee
MC
8769static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
8770{
8771 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
8772 struct hwrm_port_led_qcaps_input req = {0};
8773 struct bnxt_pf_info *pf = &bp->pf;
8774 int rc;
8775
ba642ab7 8776 bp->num_leds = 0;
5ad2cbee
MC
8777 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
8778 return 0;
8779
8780 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
8781 req.port_id = cpu_to_le16(pf->port_id);
8782 mutex_lock(&bp->hwrm_cmd_lock);
8783 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8784 if (rc) {
8785 mutex_unlock(&bp->hwrm_cmd_lock);
8786 return rc;
8787 }
8788 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
8789 int i;
8790
8791 bp->num_leds = resp->num_leds;
8792 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
8793 bp->num_leds);
8794 for (i = 0; i < bp->num_leds; i++) {
8795 struct bnxt_led_info *led = &bp->leds[i];
8796 __le16 caps = led->led_state_caps;
8797
8798 if (!led->led_group_id ||
8799 !BNXT_LED_ALT_BLINK_CAP(caps)) {
8800 bp->num_leds = 0;
8801 break;
8802 }
8803 }
8804 }
8805 mutex_unlock(&bp->hwrm_cmd_lock);
8806 return 0;
8807}
8808
5282db6c
MC
8809int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
8810{
8811 struct hwrm_wol_filter_alloc_input req = {0};
8812 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
8813 int rc;
8814
8815 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
8816 req.port_id = cpu_to_le16(bp->pf.port_id);
8817 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
8818 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
8819 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
8820 mutex_lock(&bp->hwrm_cmd_lock);
8821 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8822 if (!rc)
8823 bp->wol_filter_id = resp->wol_filter_id;
8824 mutex_unlock(&bp->hwrm_cmd_lock);
8825 return rc;
8826}
8827
8828int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
8829{
8830 struct hwrm_wol_filter_free_input req = {0};
8831 int rc;
8832
8833 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
8834 req.port_id = cpu_to_le16(bp->pf.port_id);
8835 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
8836 req.wol_filter_id = bp->wol_filter_id;
8837 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8838 return rc;
8839}
8840
c1ef146a
MC
8841static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
8842{
8843 struct hwrm_wol_filter_qcfg_input req = {0};
8844 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8845 u16 next_handle = 0;
8846 int rc;
8847
8848 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
8849 req.port_id = cpu_to_le16(bp->pf.port_id);
8850 req.handle = cpu_to_le16(handle);
8851 mutex_lock(&bp->hwrm_cmd_lock);
8852 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8853 if (!rc) {
8854 next_handle = le16_to_cpu(resp->next_handle);
8855 if (next_handle != 0) {
8856 if (resp->wol_type ==
8857 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
8858 bp->wol = 1;
8859 bp->wol_filter_id = resp->wol_filter_id;
8860 }
8861 }
8862 }
8863 mutex_unlock(&bp->hwrm_cmd_lock);
8864 return next_handle;
8865}
8866
8867static void bnxt_get_wol_settings(struct bnxt *bp)
8868{
8869 u16 handle = 0;
8870
ba642ab7 8871 bp->wol = 0;
c1ef146a
MC
8872 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
8873 return;
8874
8875 do {
8876 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
8877 } while (handle && handle != 0xffff);
8878}
8879
cde49a42
VV
8880#ifdef CONFIG_BNXT_HWMON
8881static ssize_t bnxt_show_temp(struct device *dev,
8882 struct device_attribute *devattr, char *buf)
8883{
8884 struct hwrm_temp_monitor_query_input req = {0};
8885 struct hwrm_temp_monitor_query_output *resp;
8886 struct bnxt *bp = dev_get_drvdata(dev);
8887 u32 temp = 0;
8888
8889 resp = bp->hwrm_cmd_resp_addr;
8890 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
8891 mutex_lock(&bp->hwrm_cmd_lock);
8892 if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
8893 temp = resp->temp * 1000; /* display millidegree */
8894 mutex_unlock(&bp->hwrm_cmd_lock);
8895
8896 return sprintf(buf, "%u\n", temp);
8897}
8898static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
8899
8900static struct attribute *bnxt_attrs[] = {
8901 &sensor_dev_attr_temp1_input.dev_attr.attr,
8902 NULL
8903};
8904ATTRIBUTE_GROUPS(bnxt);
8905
8906static void bnxt_hwmon_close(struct bnxt *bp)
8907{
8908 if (bp->hwmon_dev) {
8909 hwmon_device_unregister(bp->hwmon_dev);
8910 bp->hwmon_dev = NULL;
8911 }
8912}
8913
8914static void bnxt_hwmon_open(struct bnxt *bp)
8915{
8916 struct pci_dev *pdev = bp->pdev;
8917
ba642ab7
MC
8918 if (bp->hwmon_dev)
8919 return;
8920
cde49a42
VV
8921 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
8922 DRV_MODULE_NAME, bp,
8923 bnxt_groups);
8924 if (IS_ERR(bp->hwmon_dev)) {
8925 bp->hwmon_dev = NULL;
8926 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
8927 }
8928}
8929#else
8930static void bnxt_hwmon_close(struct bnxt *bp)
8931{
8932}
8933
8934static void bnxt_hwmon_open(struct bnxt *bp)
8935{
8936}
8937#endif
8938
939f7f0c
MC
8939static bool bnxt_eee_config_ok(struct bnxt *bp)
8940{
8941 struct ethtool_eee *eee = &bp->eee;
8942 struct bnxt_link_info *link_info = &bp->link_info;
8943
8944 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
8945 return true;
8946
8947 if (eee->eee_enabled) {
8948 u32 advertising =
8949 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
8950
8951 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
8952 eee->eee_enabled = 0;
8953 return false;
8954 }
8955 if (eee->advertised & ~advertising) {
8956 eee->advertised = advertising & eee->supported;
8957 return false;
8958 }
8959 }
8960 return true;
8961}
8962
c0c050c5
MC
8963static int bnxt_update_phy_setting(struct bnxt *bp)
8964{
8965 int rc;
8966 bool update_link = false;
8967 bool update_pause = false;
939f7f0c 8968 bool update_eee = false;
c0c050c5
MC
8969 struct bnxt_link_info *link_info = &bp->link_info;
8970
8971 rc = bnxt_update_link(bp, true);
8972 if (rc) {
8973 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
8974 rc);
8975 return rc;
8976 }
33dac24a
MC
8977 if (!BNXT_SINGLE_PF(bp))
8978 return 0;
8979
c0c050c5 8980 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
c9ee9516
MC
8981 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
8982 link_info->req_flow_ctrl)
c0c050c5
MC
8983 update_pause = true;
8984 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
8985 link_info->force_pause_setting != link_info->req_flow_ctrl)
8986 update_pause = true;
c0c050c5
MC
8987 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
8988 if (BNXT_AUTO_MODE(link_info->auto_mode))
8989 update_link = true;
8990 if (link_info->req_link_speed != link_info->force_link_speed)
8991 update_link = true;
de73018f
MC
8992 if (link_info->req_duplex != link_info->duplex_setting)
8993 update_link = true;
c0c050c5
MC
8994 } else {
8995 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
8996 update_link = true;
8997 if (link_info->advertising != link_info->auto_link_speeds)
8998 update_link = true;
c0c050c5
MC
8999 }
9000
16d663a6
MC
9001 /* The last close may have shutdown the link, so need to call
9002 * PHY_CFG to bring it back up.
9003 */
9004 if (!netif_carrier_ok(bp->dev))
9005 update_link = true;
9006
939f7f0c
MC
9007 if (!bnxt_eee_config_ok(bp))
9008 update_eee = true;
9009
c0c050c5 9010 if (update_link)
939f7f0c 9011 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
c0c050c5
MC
9012 else if (update_pause)
9013 rc = bnxt_hwrm_set_pause(bp);
9014 if (rc) {
9015 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
9016 rc);
9017 return rc;
9018 }
9019
9020 return rc;
9021}
9022
11809490
JH
9023/* Common routine to pre-map certain register block to different GRC window.
9024 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
9025 * in PF and 3 windows in VF that can be customized to map in different
9026 * register blocks.
9027 */
9028static void bnxt_preset_reg_win(struct bnxt *bp)
9029{
9030 if (BNXT_PF(bp)) {
9031 /* CAG registers map to GRC window #4 */
9032 writel(BNXT_CAG_REG_BASE,
9033 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
9034 }
9035}
9036
47558acd
MC
9037static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
9038
c0c050c5
MC
9039static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9040{
9041 int rc = 0;
9042
11809490 9043 bnxt_preset_reg_win(bp);
c0c050c5
MC
9044 netif_carrier_off(bp->dev);
9045 if (irq_re_init) {
47558acd
MC
9046 /* Reserve rings now if none were reserved at driver probe. */
9047 rc = bnxt_init_dflt_ring_mode(bp);
9048 if (rc) {
9049 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
9050 return rc;
9051 }
c0c050c5 9052 }
1b3f0b75 9053 rc = bnxt_reserve_rings(bp, irq_re_init);
41e8d798
MC
9054 if (rc)
9055 return rc;
c0c050c5
MC
9056 if ((bp->flags & BNXT_FLAG_RFS) &&
9057 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
9058 /* disable RFS if falling back to INTA */
9059 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
9060 bp->flags &= ~BNXT_FLAG_RFS;
9061 }
9062
9063 rc = bnxt_alloc_mem(bp, irq_re_init);
9064 if (rc) {
9065 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
9066 goto open_err_free_mem;
9067 }
9068
9069 if (irq_re_init) {
9070 bnxt_init_napi(bp);
9071 rc = bnxt_request_irq(bp);
9072 if (rc) {
9073 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
c58387ab 9074 goto open_err_irq;
c0c050c5
MC
9075 }
9076 }
9077
9078 bnxt_enable_napi(bp);
cabfb09d 9079 bnxt_debug_dev_init(bp);
c0c050c5
MC
9080
9081 rc = bnxt_init_nic(bp, irq_re_init);
9082 if (rc) {
9083 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
9084 goto open_err;
9085 }
9086
9087 if (link_re_init) {
e2dc9b6e 9088 mutex_lock(&bp->link_lock);
c0c050c5 9089 rc = bnxt_update_phy_setting(bp);
e2dc9b6e 9090 mutex_unlock(&bp->link_lock);
a1ef4a79 9091 if (rc) {
ba41d46f 9092 netdev_warn(bp->dev, "failed to update phy settings\n");
a1ef4a79
MC
9093 if (BNXT_SINGLE_PF(bp)) {
9094 bp->link_info.phy_retry = true;
9095 bp->link_info.phy_retry_expires =
9096 jiffies + 5 * HZ;
9097 }
9098 }
c0c050c5
MC
9099 }
9100
7cdd5fc3 9101 if (irq_re_init)
ad51b8e9 9102 udp_tunnel_get_rx_info(bp->dev);
c0c050c5 9103
caefe526 9104 set_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
9105 bnxt_enable_int(bp);
9106 /* Enable TX queues */
9107 bnxt_tx_enable(bp);
9108 mod_timer(&bp->timer, jiffies + bp->current_interval);
10289bec
MC
9109 /* Poll link status and check for SFP+ module status */
9110 bnxt_get_port_module_status(bp);
c0c050c5 9111
ee5c7fb3
SP
9112 /* VF-reps may need to be re-opened after the PF is re-opened */
9113 if (BNXT_PF(bp))
9114 bnxt_vf_reps_open(bp);
c0c050c5
MC
9115 return 0;
9116
9117open_err:
cabfb09d 9118 bnxt_debug_dev_exit(bp);
c0c050c5 9119 bnxt_disable_napi(bp);
c58387ab
VG
9120
9121open_err_irq:
c0c050c5
MC
9122 bnxt_del_napi(bp);
9123
9124open_err_free_mem:
9125 bnxt_free_skbs(bp);
9126 bnxt_free_irq(bp);
9127 bnxt_free_mem(bp, true);
9128 return rc;
9129}
9130
9131/* rtnl_lock held */
9132int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9133{
9134 int rc = 0;
9135
9136 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
9137 if (rc) {
9138 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
9139 dev_close(bp->dev);
9140 }
9141 return rc;
9142}
9143
f7dc1ea6
MC
9144/* rtnl_lock held, open the NIC half way by allocating all resources, but
9145 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
9146 * self tests.
9147 */
9148int bnxt_half_open_nic(struct bnxt *bp)
9149{
9150 int rc = 0;
9151
9152 rc = bnxt_alloc_mem(bp, false);
9153 if (rc) {
9154 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
9155 goto half_open_err;
9156 }
9157 rc = bnxt_init_nic(bp, false);
9158 if (rc) {
9159 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
9160 goto half_open_err;
9161 }
9162 return 0;
9163
9164half_open_err:
9165 bnxt_free_skbs(bp);
9166 bnxt_free_mem(bp, false);
9167 dev_close(bp->dev);
9168 return rc;
9169}
9170
9171/* rtnl_lock held, this call can only be made after a previous successful
9172 * call to bnxt_half_open_nic().
9173 */
9174void bnxt_half_close_nic(struct bnxt *bp)
9175{
9176 bnxt_hwrm_resource_free(bp, false, false);
9177 bnxt_free_skbs(bp);
9178 bnxt_free_mem(bp, false);
9179}
9180
c0c050c5
MC
9181static int bnxt_open(struct net_device *dev)
9182{
9183 struct bnxt *bp = netdev_priv(dev);
25e1acd6 9184 int rc;
c0c050c5 9185
ec5d31e3
MC
9186 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
9187 netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n");
9188 return -ENODEV;
9189 }
9190
9191 rc = bnxt_hwrm_if_change(bp, true);
25e1acd6 9192 if (rc)
ec5d31e3
MC
9193 return rc;
9194 rc = __bnxt_open_nic(bp, true, true);
9195 if (rc) {
25e1acd6 9196 bnxt_hwrm_if_change(bp, false);
ec5d31e3
MC
9197 } else {
9198 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state) &&
9199 BNXT_PF(bp)) {
9200 struct bnxt_pf_info *pf = &bp->pf;
9201 int n = pf->active_vfs;
cde49a42 9202
ec5d31e3
MC
9203 if (n)
9204 bnxt_cfg_hw_sriov(bp, &n);
9205 }
9206 bnxt_hwmon_open(bp);
9207 }
cde49a42 9208
25e1acd6 9209 return rc;
c0c050c5
MC
9210}
9211
f9b76ebd
MC
9212static bool bnxt_drv_busy(struct bnxt *bp)
9213{
9214 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
9215 test_bit(BNXT_STATE_READ_STATS, &bp->state));
9216}
9217
b8875ca3
MC
9218static void bnxt_get_ring_stats(struct bnxt *bp,
9219 struct rtnl_link_stats64 *stats);
9220
86e953db
MC
9221static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
9222 bool link_re_init)
c0c050c5 9223{
ee5c7fb3
SP
9224 /* Close the VF-reps before closing PF */
9225 if (BNXT_PF(bp))
9226 bnxt_vf_reps_close(bp);
86e953db 9227
c0c050c5
MC
9228 /* Change device state to avoid TX queue wake up's */
9229 bnxt_tx_disable(bp);
9230
caefe526 9231 clear_bit(BNXT_STATE_OPEN, &bp->state);
4cebdcec 9232 smp_mb__after_atomic();
f9b76ebd 9233 while (bnxt_drv_busy(bp))
4cebdcec 9234 msleep(20);
c0c050c5 9235
9d8bc097 9236 /* Flush rings and and disable interrupts */
c0c050c5
MC
9237 bnxt_shutdown_nic(bp, irq_re_init);
9238
9239 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
9240
cabfb09d 9241 bnxt_debug_dev_exit(bp);
c0c050c5 9242 bnxt_disable_napi(bp);
c0c050c5 9243 del_timer_sync(&bp->timer);
3bc7d4a3
MC
9244 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) &&
9245 pci_is_enabled(bp->pdev))
9246 pci_disable_device(bp->pdev);
9247
c0c050c5
MC
9248 bnxt_free_skbs(bp);
9249
b8875ca3
MC
9250 /* Save ring stats before shutdown */
9251 if (bp->bnapi)
9252 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
c0c050c5
MC
9253 if (irq_re_init) {
9254 bnxt_free_irq(bp);
9255 bnxt_del_napi(bp);
9256 }
9257 bnxt_free_mem(bp, irq_re_init);
86e953db
MC
9258}
9259
9260int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9261{
9262 int rc = 0;
9263
3bc7d4a3
MC
9264 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
9265 /* If we get here, it means firmware reset is in progress
9266 * while we are trying to close. We can safely proceed with
9267 * the close because we are holding rtnl_lock(). Some firmware
9268 * messages may fail as we proceed to close. We set the
9269 * ABORT_ERR flag here so that the FW reset thread will later
9270 * abort when it gets the rtnl_lock() and sees the flag.
9271 */
9272 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
9273 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9274 }
9275
86e953db
MC
9276#ifdef CONFIG_BNXT_SRIOV
9277 if (bp->sriov_cfg) {
9278 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
9279 !bp->sriov_cfg,
9280 BNXT_SRIOV_CFG_WAIT_TMO);
9281 if (rc)
9282 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
9283 }
9284#endif
9285 __bnxt_close_nic(bp, irq_re_init, link_re_init);
c0c050c5
MC
9286 return rc;
9287}
9288
9289static int bnxt_close(struct net_device *dev)
9290{
9291 struct bnxt *bp = netdev_priv(dev);
9292
cde49a42 9293 bnxt_hwmon_close(bp);
c0c050c5 9294 bnxt_close_nic(bp, true, true);
33f7d55f 9295 bnxt_hwrm_shutdown_link(bp);
25e1acd6 9296 bnxt_hwrm_if_change(bp, false);
c0c050c5
MC
9297 return 0;
9298}
9299
0ca12be9
VV
9300static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
9301 u16 *val)
9302{
9303 struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
9304 struct hwrm_port_phy_mdio_read_input req = {0};
9305 int rc;
9306
9307 if (bp->hwrm_spec_code < 0x10a00)
9308 return -EOPNOTSUPP;
9309
9310 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
9311 req.port_id = cpu_to_le16(bp->pf.port_id);
9312 req.phy_addr = phy_addr;
9313 req.reg_addr = cpu_to_le16(reg & 0x1f);
2730214d 9314 if (mdio_phy_id_is_c45(phy_addr)) {
0ca12be9
VV
9315 req.cl45_mdio = 1;
9316 req.phy_addr = mdio_phy_id_prtad(phy_addr);
9317 req.dev_addr = mdio_phy_id_devad(phy_addr);
9318 req.reg_addr = cpu_to_le16(reg);
9319 }
9320
9321 mutex_lock(&bp->hwrm_cmd_lock);
9322 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9323 if (!rc)
9324 *val = le16_to_cpu(resp->reg_data);
9325 mutex_unlock(&bp->hwrm_cmd_lock);
9326 return rc;
9327}
9328
9329static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
9330 u16 val)
9331{
9332 struct hwrm_port_phy_mdio_write_input req = {0};
9333
9334 if (bp->hwrm_spec_code < 0x10a00)
9335 return -EOPNOTSUPP;
9336
9337 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
9338 req.port_id = cpu_to_le16(bp->pf.port_id);
9339 req.phy_addr = phy_addr;
9340 req.reg_addr = cpu_to_le16(reg & 0x1f);
2730214d 9341 if (mdio_phy_id_is_c45(phy_addr)) {
0ca12be9
VV
9342 req.cl45_mdio = 1;
9343 req.phy_addr = mdio_phy_id_prtad(phy_addr);
9344 req.dev_addr = mdio_phy_id_devad(phy_addr);
9345 req.reg_addr = cpu_to_le16(reg);
9346 }
9347 req.reg_data = cpu_to_le16(val);
9348
9349 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9350}
9351
c0c050c5
MC
9352/* rtnl_lock held */
9353static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9354{
0ca12be9
VV
9355 struct mii_ioctl_data *mdio = if_mii(ifr);
9356 struct bnxt *bp = netdev_priv(dev);
9357 int rc;
9358
c0c050c5
MC
9359 switch (cmd) {
9360 case SIOCGMIIPHY:
0ca12be9
VV
9361 mdio->phy_id = bp->link_info.phy_addr;
9362
c0c050c5
MC
9363 /* fallthru */
9364 case SIOCGMIIREG: {
0ca12be9
VV
9365 u16 mii_regval = 0;
9366
c0c050c5
MC
9367 if (!netif_running(dev))
9368 return -EAGAIN;
9369
0ca12be9
VV
9370 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
9371 &mii_regval);
9372 mdio->val_out = mii_regval;
9373 return rc;
c0c050c5
MC
9374 }
9375
9376 case SIOCSMIIREG:
9377 if (!netif_running(dev))
9378 return -EAGAIN;
9379
0ca12be9
VV
9380 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
9381 mdio->val_in);
c0c050c5
MC
9382
9383 default:
9384 /* do nothing */
9385 break;
9386 }
9387 return -EOPNOTSUPP;
9388}
9389
b8875ca3
MC
9390static void bnxt_get_ring_stats(struct bnxt *bp,
9391 struct rtnl_link_stats64 *stats)
c0c050c5 9392{
b8875ca3 9393 int i;
c0c050c5 9394
c0c050c5 9395
c0c050c5
MC
9396 for (i = 0; i < bp->cp_nr_rings; i++) {
9397 struct bnxt_napi *bnapi = bp->bnapi[i];
9398 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
9399 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
9400
9401 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
9402 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
9403 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
9404
9405 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
9406 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
9407 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
9408
9409 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
9410 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
9411 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
9412
9413 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
9414 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
9415 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
9416
9417 stats->rx_missed_errors +=
9418 le64_to_cpu(hw_stats->rx_discard_pkts);
9419
9420 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
9421
c0c050c5
MC
9422 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
9423 }
b8875ca3
MC
9424}
9425
9426static void bnxt_add_prev_stats(struct bnxt *bp,
9427 struct rtnl_link_stats64 *stats)
9428{
9429 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
9430
9431 stats->rx_packets += prev_stats->rx_packets;
9432 stats->tx_packets += prev_stats->tx_packets;
9433 stats->rx_bytes += prev_stats->rx_bytes;
9434 stats->tx_bytes += prev_stats->tx_bytes;
9435 stats->rx_missed_errors += prev_stats->rx_missed_errors;
9436 stats->multicast += prev_stats->multicast;
9437 stats->tx_dropped += prev_stats->tx_dropped;
9438}
9439
9440static void
9441bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
9442{
9443 struct bnxt *bp = netdev_priv(dev);
9444
9445 set_bit(BNXT_STATE_READ_STATS, &bp->state);
9446 /* Make sure bnxt_close_nic() sees that we are reading stats before
9447 * we check the BNXT_STATE_OPEN flag.
9448 */
9449 smp_mb__after_atomic();
9450 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9451 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
9452 *stats = bp->net_stats_prev;
9453 return;
9454 }
9455
9456 bnxt_get_ring_stats(bp, stats);
9457 bnxt_add_prev_stats(bp, stats);
c0c050c5 9458
9947f83f
MC
9459 if (bp->flags & BNXT_FLAG_PORT_STATS) {
9460 struct rx_port_stats *rx = bp->hw_rx_port_stats;
9461 struct tx_port_stats *tx = bp->hw_tx_port_stats;
9462
9463 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
9464 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
9465 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
9466 le64_to_cpu(rx->rx_ovrsz_frames) +
9467 le64_to_cpu(rx->rx_runt_frames);
9468 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
9469 le64_to_cpu(rx->rx_jbr_frames);
9470 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
9471 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
9472 stats->tx_errors = le64_to_cpu(tx->tx_err);
9473 }
f9b76ebd 9474 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
c0c050c5
MC
9475}
9476
9477static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
9478{
9479 struct net_device *dev = bp->dev;
9480 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9481 struct netdev_hw_addr *ha;
9482 u8 *haddr;
9483 int mc_count = 0;
9484 bool update = false;
9485 int off = 0;
9486
9487 netdev_for_each_mc_addr(ha, dev) {
9488 if (mc_count >= BNXT_MAX_MC_ADDRS) {
9489 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9490 vnic->mc_list_count = 0;
9491 return false;
9492 }
9493 haddr = ha->addr;
9494 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
9495 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
9496 update = true;
9497 }
9498 off += ETH_ALEN;
9499 mc_count++;
9500 }
9501 if (mc_count)
9502 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
9503
9504 if (mc_count != vnic->mc_list_count) {
9505 vnic->mc_list_count = mc_count;
9506 update = true;
9507 }
9508 return update;
9509}
9510
9511static bool bnxt_uc_list_updated(struct bnxt *bp)
9512{
9513 struct net_device *dev = bp->dev;
9514 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9515 struct netdev_hw_addr *ha;
9516 int off = 0;
9517
9518 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
9519 return true;
9520
9521 netdev_for_each_uc_addr(ha, dev) {
9522 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
9523 return true;
9524
9525 off += ETH_ALEN;
9526 }
9527 return false;
9528}
9529
9530static void bnxt_set_rx_mode(struct net_device *dev)
9531{
9532 struct bnxt *bp = netdev_priv(dev);
9533 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9534 u32 mask = vnic->rx_mask;
9535 bool mc_update = false;
9536 bool uc_update;
9537
9538 if (!netif_running(dev))
9539 return;
9540
9541 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
9542 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
30e33848
MC
9543 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
9544 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
c0c050c5 9545
17c71ac3 9546 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
c0c050c5
MC
9547 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
9548
9549 uc_update = bnxt_uc_list_updated(bp);
9550
30e33848
MC
9551 if (dev->flags & IFF_BROADCAST)
9552 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
c0c050c5
MC
9553 if (dev->flags & IFF_ALLMULTI) {
9554 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9555 vnic->mc_list_count = 0;
9556 } else {
9557 mc_update = bnxt_mc_list_updated(bp, &mask);
9558 }
9559
9560 if (mask != vnic->rx_mask || uc_update || mc_update) {
9561 vnic->rx_mask = mask;
9562
9563 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
c213eae8 9564 bnxt_queue_sp_work(bp);
c0c050c5
MC
9565 }
9566}
9567
b664f008 9568static int bnxt_cfg_rx_mode(struct bnxt *bp)
c0c050c5
MC
9569{
9570 struct net_device *dev = bp->dev;
9571 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9572 struct netdev_hw_addr *ha;
9573 int i, off = 0, rc;
9574 bool uc_update;
9575
9576 netif_addr_lock_bh(dev);
9577 uc_update = bnxt_uc_list_updated(bp);
9578 netif_addr_unlock_bh(dev);
9579
9580 if (!uc_update)
9581 goto skip_uc;
9582
9583 mutex_lock(&bp->hwrm_cmd_lock);
9584 for (i = 1; i < vnic->uc_filter_count; i++) {
9585 struct hwrm_cfa_l2_filter_free_input req = {0};
9586
9587 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
9588 -1);
9589
9590 req.l2_filter_id = vnic->fw_l2_filter_id[i];
9591
9592 rc = _hwrm_send_message(bp, &req, sizeof(req),
9593 HWRM_CMD_TIMEOUT);
9594 }
9595 mutex_unlock(&bp->hwrm_cmd_lock);
9596
9597 vnic->uc_filter_count = 1;
9598
9599 netif_addr_lock_bh(dev);
9600 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
9601 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
9602 } else {
9603 netdev_for_each_uc_addr(ha, dev) {
9604 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
9605 off += ETH_ALEN;
9606 vnic->uc_filter_count++;
9607 }
9608 }
9609 netif_addr_unlock_bh(dev);
9610
9611 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
9612 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
9613 if (rc) {
9614 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
9615 rc);
9616 vnic->uc_filter_count = i;
b664f008 9617 return rc;
c0c050c5
MC
9618 }
9619 }
9620
9621skip_uc:
9622 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
b4e30e8e
MC
9623 if (rc && vnic->mc_list_count) {
9624 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
9625 rc);
9626 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9627 vnic->mc_list_count = 0;
9628 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
9629 }
c0c050c5 9630 if (rc)
b4e30e8e 9631 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
c0c050c5 9632 rc);
b664f008
MC
9633
9634 return rc;
c0c050c5
MC
9635}
9636
2773dfb2
MC
9637static bool bnxt_can_reserve_rings(struct bnxt *bp)
9638{
9639#ifdef CONFIG_BNXT_SRIOV
f1ca94de 9640 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
2773dfb2
MC
9641 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9642
9643 /* No minimum rings were provisioned by the PF. Don't
9644 * reserve rings by default when device is down.
9645 */
9646 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
9647 return true;
9648
9649 if (!netif_running(bp->dev))
9650 return false;
9651 }
9652#endif
9653 return true;
9654}
9655
8079e8f1
MC
9656/* If the chip and firmware supports RFS */
9657static bool bnxt_rfs_supported(struct bnxt *bp)
9658{
e969ae5b
MC
9659 if (bp->flags & BNXT_FLAG_CHIP_P5) {
9660 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX)
9661 return true;
41e8d798 9662 return false;
e969ae5b 9663 }
8079e8f1
MC
9664 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
9665 return true;
ae10ae74
MC
9666 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
9667 return true;
8079e8f1
MC
9668 return false;
9669}
9670
9671/* If runtime conditions support RFS */
2bcfa6f6
MC
9672static bool bnxt_rfs_capable(struct bnxt *bp)
9673{
9674#ifdef CONFIG_RFS_ACCEL
8079e8f1 9675 int vnics, max_vnics, max_rss_ctxs;
2bcfa6f6 9676
41e8d798 9677 if (bp->flags & BNXT_FLAG_CHIP_P5)
ac33906c 9678 return bnxt_rfs_supported(bp);
2773dfb2 9679 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
2bcfa6f6
MC
9680 return false;
9681
9682 vnics = 1 + bp->rx_nr_rings;
8079e8f1
MC
9683 max_vnics = bnxt_get_max_func_vnics(bp);
9684 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
ae10ae74
MC
9685
9686 /* RSS contexts not a limiting factor */
9687 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
9688 max_rss_ctxs = max_vnics;
8079e8f1 9689 if (vnics > max_vnics || vnics > max_rss_ctxs) {
6a1eef5b
MC
9690 if (bp->rx_nr_rings > 1)
9691 netdev_warn(bp->dev,
9692 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
9693 min(max_rss_ctxs - 1, max_vnics - 1));
2bcfa6f6 9694 return false;
a2304909 9695 }
2bcfa6f6 9696
f1ca94de 9697 if (!BNXT_NEW_RM(bp))
6a1eef5b
MC
9698 return true;
9699
9700 if (vnics == bp->hw_resc.resv_vnics)
9701 return true;
9702
780baad4 9703 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
6a1eef5b
MC
9704 if (vnics <= bp->hw_resc.resv_vnics)
9705 return true;
9706
9707 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
780baad4 9708 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
6a1eef5b 9709 return false;
2bcfa6f6
MC
9710#else
9711 return false;
9712#endif
9713}
9714
c0c050c5
MC
9715static netdev_features_t bnxt_fix_features(struct net_device *dev,
9716 netdev_features_t features)
9717{
2bcfa6f6
MC
9718 struct bnxt *bp = netdev_priv(dev);
9719
a2304909 9720 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
2bcfa6f6 9721 features &= ~NETIF_F_NTUPLE;
5a9f6b23 9722
1054aee8
MC
9723 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
9724 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
9725
9726 if (!(features & NETIF_F_GRO))
9727 features &= ~NETIF_F_GRO_HW;
9728
9729 if (features & NETIF_F_GRO_HW)
9730 features &= ~NETIF_F_LRO;
9731
5a9f6b23
MC
9732 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
9733 * turned on or off together.
9734 */
9735 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
9736 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
9737 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
9738 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
9739 NETIF_F_HW_VLAN_STAG_RX);
9740 else
9741 features |= NETIF_F_HW_VLAN_CTAG_RX |
9742 NETIF_F_HW_VLAN_STAG_RX;
9743 }
cf6645f8
MC
9744#ifdef CONFIG_BNXT_SRIOV
9745 if (BNXT_VF(bp)) {
9746 if (bp->vf.vlan) {
9747 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
9748 NETIF_F_HW_VLAN_STAG_RX);
9749 }
9750 }
9751#endif
c0c050c5
MC
9752 return features;
9753}
9754
9755static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
9756{
9757 struct bnxt *bp = netdev_priv(dev);
9758 u32 flags = bp->flags;
9759 u32 changes;
9760 int rc = 0;
9761 bool re_init = false;
9762 bool update_tpa = false;
9763
9764 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
1054aee8 9765 if (features & NETIF_F_GRO_HW)
c0c050c5 9766 flags |= BNXT_FLAG_GRO;
1054aee8 9767 else if (features & NETIF_F_LRO)
c0c050c5
MC
9768 flags |= BNXT_FLAG_LRO;
9769
bdbd1eb5
MC
9770 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
9771 flags &= ~BNXT_FLAG_TPA;
9772
c0c050c5
MC
9773 if (features & NETIF_F_HW_VLAN_CTAG_RX)
9774 flags |= BNXT_FLAG_STRIP_VLAN;
9775
9776 if (features & NETIF_F_NTUPLE)
9777 flags |= BNXT_FLAG_RFS;
9778
9779 changes = flags ^ bp->flags;
9780 if (changes & BNXT_FLAG_TPA) {
9781 update_tpa = true;
9782 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
f45b7b78
MC
9783 (flags & BNXT_FLAG_TPA) == 0 ||
9784 (bp->flags & BNXT_FLAG_CHIP_P5))
c0c050c5
MC
9785 re_init = true;
9786 }
9787
9788 if (changes & ~BNXT_FLAG_TPA)
9789 re_init = true;
9790
9791 if (flags != bp->flags) {
9792 u32 old_flags = bp->flags;
9793
2bcfa6f6 9794 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
f45b7b78 9795 bp->flags = flags;
c0c050c5
MC
9796 if (update_tpa)
9797 bnxt_set_ring_params(bp);
9798 return rc;
9799 }
9800
9801 if (re_init) {
9802 bnxt_close_nic(bp, false, false);
f45b7b78 9803 bp->flags = flags;
c0c050c5
MC
9804 if (update_tpa)
9805 bnxt_set_ring_params(bp);
9806
9807 return bnxt_open_nic(bp, false, false);
9808 }
9809 if (update_tpa) {
f45b7b78 9810 bp->flags = flags;
c0c050c5
MC
9811 rc = bnxt_set_tpa(bp,
9812 (flags & BNXT_FLAG_TPA) ?
9813 true : false);
9814 if (rc)
9815 bp->flags = old_flags;
9816 }
9817 }
9818 return rc;
9819}
9820
ffd77621
MC
9821static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
9822 u32 ring_id, u32 *prod, u32 *cons)
9823{
9824 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
9825 struct hwrm_dbg_ring_info_get_input req = {0};
9826 int rc;
9827
9828 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
9829 req.ring_type = ring_type;
9830 req.fw_ring_id = cpu_to_le32(ring_id);
9831 mutex_lock(&bp->hwrm_cmd_lock);
9832 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9833 if (!rc) {
9834 *prod = le32_to_cpu(resp->producer_index);
9835 *cons = le32_to_cpu(resp->consumer_index);
9836 }
9837 mutex_unlock(&bp->hwrm_cmd_lock);
9838 return rc;
9839}
9840
9f554590
MC
9841static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
9842{
b6ab4b01 9843 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
9f554590
MC
9844 int i = bnapi->index;
9845
3b2b7d9d
MC
9846 if (!txr)
9847 return;
9848
9f554590
MC
9849 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
9850 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
9851 txr->tx_cons);
9852}
9853
9854static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
9855{
b6ab4b01 9856 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
9f554590
MC
9857 int i = bnapi->index;
9858
3b2b7d9d
MC
9859 if (!rxr)
9860 return;
9861
9f554590
MC
9862 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
9863 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
9864 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
9865 rxr->rx_sw_agg_prod);
9866}
9867
9868static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
9869{
9870 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
9871 int i = bnapi->index;
9872
9873 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
9874 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
9875}
9876
c0c050c5
MC
9877static void bnxt_dbg_dump_states(struct bnxt *bp)
9878{
9879 int i;
9880 struct bnxt_napi *bnapi;
c0c050c5
MC
9881
9882 for (i = 0; i < bp->cp_nr_rings; i++) {
9883 bnapi = bp->bnapi[i];
c0c050c5 9884 if (netif_msg_drv(bp)) {
9f554590
MC
9885 bnxt_dump_tx_sw_state(bnapi);
9886 bnxt_dump_rx_sw_state(bnapi);
9887 bnxt_dump_cp_sw_state(bnapi);
c0c050c5
MC
9888 }
9889 }
9890}
9891
6988bd92 9892static void bnxt_reset_task(struct bnxt *bp, bool silent)
c0c050c5 9893{
6988bd92
MC
9894 if (!silent)
9895 bnxt_dbg_dump_states(bp);
028de140 9896 if (netif_running(bp->dev)) {
b386cd36
MC
9897 int rc;
9898
9899 if (!silent)
9900 bnxt_ulp_stop(bp);
028de140 9901 bnxt_close_nic(bp, false, false);
b386cd36
MC
9902 rc = bnxt_open_nic(bp, false, false);
9903 if (!silent && !rc)
9904 bnxt_ulp_start(bp);
028de140 9905 }
c0c050c5
MC
9906}
9907
9908static void bnxt_tx_timeout(struct net_device *dev)
9909{
9910 struct bnxt *bp = netdev_priv(dev);
9911
9912 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
9913 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
c213eae8 9914 bnxt_queue_sp_work(bp);
c0c050c5
MC
9915}
9916
e99e88a9 9917static void bnxt_timer(struct timer_list *t)
c0c050c5 9918{
e99e88a9 9919 struct bnxt *bp = from_timer(bp, t, timer);
c0c050c5
MC
9920 struct net_device *dev = bp->dev;
9921
9922 if (!netif_running(dev))
9923 return;
9924
9925 if (atomic_read(&bp->intr_sem) != 0)
9926 goto bnxt_restart_timer;
9927
adcc331e
MC
9928 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
9929 bp->stats_coal_ticks) {
3bdf56c4 9930 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
c213eae8 9931 bnxt_queue_sp_work(bp);
3bdf56c4 9932 }
5a84acbe
SP
9933
9934 if (bnxt_tc_flower_enabled(bp)) {
9935 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
9936 bnxt_queue_sp_work(bp);
9937 }
a1ef4a79
MC
9938
9939 if (bp->link_info.phy_retry) {
9940 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
9941 bp->link_info.phy_retry = 0;
9942 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
9943 } else {
9944 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
9945 bnxt_queue_sp_work(bp);
9946 }
9947 }
ffd77621
MC
9948
9949 if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) {
9950 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
9951 bnxt_queue_sp_work(bp);
9952 }
c0c050c5
MC
9953bnxt_restart_timer:
9954 mod_timer(&bp->timer, jiffies + bp->current_interval);
9955}
9956
a551ee94 9957static void bnxt_rtnl_lock_sp(struct bnxt *bp)
6988bd92 9958{
a551ee94
MC
9959 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
9960 * set. If the device is being closed, bnxt_close() may be holding
6988bd92
MC
9961 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
9962 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
9963 */
9964 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9965 rtnl_lock();
a551ee94
MC
9966}
9967
9968static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
9969{
6988bd92
MC
9970 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9971 rtnl_unlock();
9972}
9973
a551ee94
MC
9974/* Only called from bnxt_sp_task() */
9975static void bnxt_reset(struct bnxt *bp, bool silent)
9976{
9977 bnxt_rtnl_lock_sp(bp);
9978 if (test_bit(BNXT_STATE_OPEN, &bp->state))
9979 bnxt_reset_task(bp, silent);
9980 bnxt_rtnl_unlock_sp(bp);
9981}
9982
ffd77621
MC
9983static void bnxt_chk_missed_irq(struct bnxt *bp)
9984{
9985 int i;
9986
9987 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
9988 return;
9989
9990 for (i = 0; i < bp->cp_nr_rings; i++) {
9991 struct bnxt_napi *bnapi = bp->bnapi[i];
9992 struct bnxt_cp_ring_info *cpr;
9993 u32 fw_ring_id;
9994 int j;
9995
9996 if (!bnapi)
9997 continue;
9998
9999 cpr = &bnapi->cp_ring;
10000 for (j = 0; j < 2; j++) {
10001 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
10002 u32 val[2];
10003
10004 if (!cpr2 || cpr2->has_more_work ||
10005 !bnxt_has_work(bp, cpr2))
10006 continue;
10007
10008 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
10009 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
10010 continue;
10011 }
10012 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
10013 bnxt_dbg_hwrm_ring_info_get(bp,
10014 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
10015 fw_ring_id, &val[0], &val[1]);
83eb5c5c 10016 cpr->missed_irqs++;
ffd77621
MC
10017 }
10018 }
10019}
10020
c0c050c5
MC
10021static void bnxt_cfg_ntp_filters(struct bnxt *);
10022
10023static void bnxt_sp_task(struct work_struct *work)
10024{
10025 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
c0c050c5 10026
4cebdcec
MC
10027 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10028 smp_mb__after_atomic();
10029 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10030 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5 10031 return;
4cebdcec 10032 }
c0c050c5
MC
10033
10034 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
10035 bnxt_cfg_rx_mode(bp);
10036
10037 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
10038 bnxt_cfg_ntp_filters(bp);
c0c050c5
MC
10039 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
10040 bnxt_hwrm_exec_fwd_req(bp);
10041 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
10042 bnxt_hwrm_tunnel_dst_port_alloc(
10043 bp, bp->vxlan_port,
10044 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10045 }
10046 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
10047 bnxt_hwrm_tunnel_dst_port_free(
10048 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10049 }
7cdd5fc3
AD
10050 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
10051 bnxt_hwrm_tunnel_dst_port_alloc(
10052 bp, bp->nge_port,
10053 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10054 }
10055 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
10056 bnxt_hwrm_tunnel_dst_port_free(
10057 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10058 }
00db3cba 10059 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
3bdf56c4 10060 bnxt_hwrm_port_qstats(bp);
00db3cba 10061 bnxt_hwrm_port_qstats_ext(bp);
55e4398d 10062 bnxt_hwrm_pcie_qstats(bp);
00db3cba 10063 }
3bdf56c4 10064
0eaa24b9 10065 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
e2dc9b6e 10066 int rc;
0eaa24b9 10067
e2dc9b6e 10068 mutex_lock(&bp->link_lock);
0eaa24b9
MC
10069 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
10070 &bp->sp_event))
10071 bnxt_hwrm_phy_qcaps(bp);
10072
e2dc9b6e
MC
10073 rc = bnxt_update_link(bp, true);
10074 mutex_unlock(&bp->link_lock);
0eaa24b9
MC
10075 if (rc)
10076 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
10077 rc);
10078 }
a1ef4a79
MC
10079 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
10080 int rc;
10081
10082 mutex_lock(&bp->link_lock);
10083 rc = bnxt_update_phy_setting(bp);
10084 mutex_unlock(&bp->link_lock);
10085 if (rc) {
10086 netdev_warn(bp->dev, "update phy settings retry failed\n");
10087 } else {
10088 bp->link_info.phy_retry = false;
10089 netdev_info(bp->dev, "update phy settings retry succeeded\n");
10090 }
10091 }
90c694bb 10092 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
e2dc9b6e
MC
10093 mutex_lock(&bp->link_lock);
10094 bnxt_get_port_module_status(bp);
10095 mutex_unlock(&bp->link_lock);
90c694bb 10096 }
5a84acbe
SP
10097
10098 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
10099 bnxt_tc_flow_stats_work(bp);
10100
ffd77621
MC
10101 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
10102 bnxt_chk_missed_irq(bp);
10103
e2dc9b6e
MC
10104 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
10105 * must be the last functions to be called before exiting.
10106 */
6988bd92
MC
10107 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
10108 bnxt_reset(bp, false);
4cebdcec 10109
fc0f1929
MC
10110 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
10111 bnxt_reset(bp, true);
10112
4cebdcec
MC
10113 smp_mb__before_atomic();
10114 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5
MC
10115}
10116
d1e7925e 10117/* Under rtnl_lock */
98fdbe73
MC
10118int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
10119 int tx_xdp)
d1e7925e
MC
10120{
10121 int max_rx, max_tx, tx_sets = 1;
780baad4 10122 int tx_rings_needed, stats;
8f23d638 10123 int rx_rings = rx;
6fc2ffdf 10124 int cp, vnics, rc;
d1e7925e 10125
d1e7925e
MC
10126 if (tcs)
10127 tx_sets = tcs;
10128
10129 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
10130 if (rc)
10131 return rc;
10132
10133 if (max_rx < rx)
10134 return -ENOMEM;
10135
5f449249 10136 tx_rings_needed = tx * tx_sets + tx_xdp;
d1e7925e
MC
10137 if (max_tx < tx_rings_needed)
10138 return -ENOMEM;
10139
6fc2ffdf 10140 vnics = 1;
9b3d15e6 10141 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
6fc2ffdf
EW
10142 vnics += rx_rings;
10143
8f23d638
MC
10144 if (bp->flags & BNXT_FLAG_AGG_RINGS)
10145 rx_rings <<= 1;
10146 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
780baad4
VV
10147 stats = cp;
10148 if (BNXT_NEW_RM(bp)) {
11c3ec7b 10149 cp += bnxt_get_ulp_msix_num(bp);
780baad4
VV
10150 stats += bnxt_get_ulp_stat_ctxs(bp);
10151 }
6fc2ffdf 10152 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
780baad4 10153 stats, vnics);
d1e7925e
MC
10154}
10155
17086399
SP
10156static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
10157{
10158 if (bp->bar2) {
10159 pci_iounmap(pdev, bp->bar2);
10160 bp->bar2 = NULL;
10161 }
10162
10163 if (bp->bar1) {
10164 pci_iounmap(pdev, bp->bar1);
10165 bp->bar1 = NULL;
10166 }
10167
10168 if (bp->bar0) {
10169 pci_iounmap(pdev, bp->bar0);
10170 bp->bar0 = NULL;
10171 }
10172}
10173
10174static void bnxt_cleanup_pci(struct bnxt *bp)
10175{
10176 bnxt_unmap_bars(bp, bp->pdev);
10177 pci_release_regions(bp->pdev);
10178 pci_disable_device(bp->pdev);
10179}
10180
18775aa8
MC
10181static void bnxt_init_dflt_coal(struct bnxt *bp)
10182{
10183 struct bnxt_coal *coal;
10184
10185 /* Tick values in micro seconds.
10186 * 1 coal_buf x bufs_per_record = 1 completion record.
10187 */
10188 coal = &bp->rx_coal;
0c2ff8d7 10189 coal->coal_ticks = 10;
18775aa8
MC
10190 coal->coal_bufs = 30;
10191 coal->coal_ticks_irq = 1;
10192 coal->coal_bufs_irq = 2;
05abe4dd 10193 coal->idle_thresh = 50;
18775aa8
MC
10194 coal->bufs_per_record = 2;
10195 coal->budget = 64; /* NAPI budget */
10196
10197 coal = &bp->tx_coal;
10198 coal->coal_ticks = 28;
10199 coal->coal_bufs = 30;
10200 coal->coal_ticks_irq = 2;
10201 coal->coal_bufs_irq = 2;
10202 coal->bufs_per_record = 1;
10203
10204 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
10205}
10206
7c380918
MC
10207static int bnxt_fw_init_one_p1(struct bnxt *bp)
10208{
10209 int rc;
10210
10211 bp->fw_cap = 0;
10212 rc = bnxt_hwrm_ver_get(bp);
10213 if (rc)
10214 return rc;
10215
10216 if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
10217 rc = bnxt_alloc_kong_hwrm_resources(bp);
10218 if (rc)
10219 bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
10220 }
10221
10222 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
10223 bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
10224 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
10225 if (rc)
10226 return rc;
10227 }
10228 rc = bnxt_hwrm_func_reset(bp);
10229 if (rc)
10230 return -ENODEV;
10231
10232 bnxt_hwrm_fw_set_time(bp);
10233 return 0;
10234}
10235
10236static int bnxt_fw_init_one_p2(struct bnxt *bp)
10237{
10238 int rc;
10239
10240 /* Get the MAX capabilities for this function */
10241 rc = bnxt_hwrm_func_qcaps(bp);
10242 if (rc) {
10243 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
10244 rc);
10245 return -ENODEV;
10246 }
10247
10248 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
10249 if (rc)
10250 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
10251 rc);
10252
07f83d72
MC
10253 rc = bnxt_hwrm_error_recovery_qcfg(bp);
10254 if (rc)
10255 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
10256 rc);
10257
7c380918
MC
10258 rc = bnxt_hwrm_func_drv_rgtr(bp);
10259 if (rc)
10260 return -ENODEV;
10261
10262 rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
10263 if (rc)
10264 return -ENODEV;
10265
10266 bnxt_hwrm_func_qcfg(bp);
10267 bnxt_hwrm_vnic_qcaps(bp);
10268 bnxt_hwrm_port_led_qcaps(bp);
10269 bnxt_ethtool_init(bp);
10270 bnxt_dcb_init(bp);
10271 return 0;
10272}
10273
ba642ab7
MC
10274static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
10275{
10276 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
10277 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
10278 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
10279 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
10280 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
10281 if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
10282 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
10283 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
10284 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
10285 }
10286}
10287
10288static void bnxt_set_dflt_rfs(struct bnxt *bp)
10289{
10290 struct net_device *dev = bp->dev;
10291
10292 dev->hw_features &= ~NETIF_F_NTUPLE;
10293 dev->features &= ~NETIF_F_NTUPLE;
10294 bp->flags &= ~BNXT_FLAG_RFS;
10295 if (bnxt_rfs_supported(bp)) {
10296 dev->hw_features |= NETIF_F_NTUPLE;
10297 if (bnxt_rfs_capable(bp)) {
10298 bp->flags |= BNXT_FLAG_RFS;
10299 dev->features |= NETIF_F_NTUPLE;
10300 }
10301 }
10302}
10303
10304static void bnxt_fw_init_one_p3(struct bnxt *bp)
10305{
10306 struct pci_dev *pdev = bp->pdev;
10307
10308 bnxt_set_dflt_rss_hash_type(bp);
10309 bnxt_set_dflt_rfs(bp);
10310
10311 bnxt_get_wol_settings(bp);
10312 if (bp->flags & BNXT_FLAG_WOL_CAP)
10313 device_set_wakeup_enable(&pdev->dev, bp->wol);
10314 else
10315 device_set_wakeup_capable(&pdev->dev, false);
10316
10317 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
10318 bnxt_hwrm_coal_params_qcaps(bp);
10319}
10320
ec5d31e3
MC
10321static int bnxt_fw_init_one(struct bnxt *bp)
10322{
10323 int rc;
10324
10325 rc = bnxt_fw_init_one_p1(bp);
10326 if (rc) {
10327 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
10328 return rc;
10329 }
10330 rc = bnxt_fw_init_one_p2(bp);
10331 if (rc) {
10332 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
10333 return rc;
10334 }
10335 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
10336 if (rc)
10337 return rc;
10338 bnxt_fw_init_one_p3(bp);
10339 return 0;
10340}
10341
c0c050c5
MC
10342static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
10343{
10344 int rc;
10345 struct bnxt *bp = netdev_priv(dev);
10346
10347 SET_NETDEV_DEV(dev, &pdev->dev);
10348
10349 /* enable device (incl. PCI PM wakeup), and bus-mastering */
10350 rc = pci_enable_device(pdev);
10351 if (rc) {
10352 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
10353 goto init_err;
10354 }
10355
10356 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10357 dev_err(&pdev->dev,
10358 "Cannot find PCI device base address, aborting\n");
10359 rc = -ENODEV;
10360 goto init_err_disable;
10361 }
10362
10363 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10364 if (rc) {
10365 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
10366 goto init_err_disable;
10367 }
10368
10369 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
10370 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
10371 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
10372 goto init_err_disable;
10373 }
10374
10375 pci_set_master(pdev);
10376
10377 bp->dev = dev;
10378 bp->pdev = pdev;
10379
10380 bp->bar0 = pci_ioremap_bar(pdev, 0);
10381 if (!bp->bar0) {
10382 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
10383 rc = -ENOMEM;
10384 goto init_err_release;
10385 }
10386
10387 bp->bar1 = pci_ioremap_bar(pdev, 2);
10388 if (!bp->bar1) {
10389 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
10390 rc = -ENOMEM;
10391 goto init_err_release;
10392 }
10393
10394 bp->bar2 = pci_ioremap_bar(pdev, 4);
10395 if (!bp->bar2) {
10396 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
10397 rc = -ENOMEM;
10398 goto init_err_release;
10399 }
10400
6316ea6d
SB
10401 pci_enable_pcie_error_reporting(pdev);
10402
c0c050c5
MC
10403 INIT_WORK(&bp->sp_task, bnxt_sp_task);
10404
10405 spin_lock_init(&bp->ntp_fltr_lock);
697197e5
MC
10406#if BITS_PER_LONG == 32
10407 spin_lock_init(&bp->db_lock);
10408#endif
c0c050c5
MC
10409
10410 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
10411 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
10412
18775aa8 10413 bnxt_init_dflt_coal(bp);
51f30785 10414
e99e88a9 10415 timer_setup(&bp->timer, bnxt_timer, 0);
c0c050c5
MC
10416 bp->current_interval = BNXT_TIMER_INTERVAL;
10417
caefe526 10418 clear_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
10419 return 0;
10420
10421init_err_release:
17086399 10422 bnxt_unmap_bars(bp, pdev);
c0c050c5
MC
10423 pci_release_regions(pdev);
10424
10425init_err_disable:
10426 pci_disable_device(pdev);
10427
10428init_err:
10429 return rc;
10430}
10431
10432/* rtnl_lock held */
10433static int bnxt_change_mac_addr(struct net_device *dev, void *p)
10434{
10435 struct sockaddr *addr = p;
1fc2cfd0
JH
10436 struct bnxt *bp = netdev_priv(dev);
10437 int rc = 0;
c0c050c5
MC
10438
10439 if (!is_valid_ether_addr(addr->sa_data))
10440 return -EADDRNOTAVAIL;
10441
c1a7bdff
MC
10442 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
10443 return 0;
10444
28ea334b 10445 rc = bnxt_approve_mac(bp, addr->sa_data, true);
84c33dd3
MC
10446 if (rc)
10447 return rc;
bdd4347b 10448
c0c050c5 10449 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1fc2cfd0
JH
10450 if (netif_running(dev)) {
10451 bnxt_close_nic(bp, false, false);
10452 rc = bnxt_open_nic(bp, false, false);
10453 }
c0c050c5 10454
1fc2cfd0 10455 return rc;
c0c050c5
MC
10456}
10457
10458/* rtnl_lock held */
10459static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
10460{
10461 struct bnxt *bp = netdev_priv(dev);
10462
c0c050c5
MC
10463 if (netif_running(dev))
10464 bnxt_close_nic(bp, false, false);
10465
10466 dev->mtu = new_mtu;
10467 bnxt_set_ring_params(bp);
10468
10469 if (netif_running(dev))
10470 return bnxt_open_nic(bp, false, false);
10471
10472 return 0;
10473}
10474
c5e3deb8 10475int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
c0c050c5
MC
10476{
10477 struct bnxt *bp = netdev_priv(dev);
3ffb6a39 10478 bool sh = false;
d1e7925e 10479 int rc;
16e5cc64 10480
c0c050c5 10481 if (tc > bp->max_tc) {
b451c8b6 10482 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
c0c050c5
MC
10483 tc, bp->max_tc);
10484 return -EINVAL;
10485 }
10486
10487 if (netdev_get_num_tc(dev) == tc)
10488 return 0;
10489
3ffb6a39
MC
10490 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
10491 sh = true;
10492
98fdbe73
MC
10493 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
10494 sh, tc, bp->tx_nr_rings_xdp);
d1e7925e
MC
10495 if (rc)
10496 return rc;
c0c050c5
MC
10497
10498 /* Needs to close the device and do hw resource re-allocations */
10499 if (netif_running(bp->dev))
10500 bnxt_close_nic(bp, true, false);
10501
10502 if (tc) {
10503 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
10504 netdev_set_num_tc(dev, tc);
10505 } else {
10506 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
10507 netdev_reset_tc(dev);
10508 }
87e9b377 10509 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
3ffb6a39
MC
10510 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
10511 bp->tx_nr_rings + bp->rx_nr_rings;
c0c050c5
MC
10512
10513 if (netif_running(bp->dev))
10514 return bnxt_open_nic(bp, true, false);
10515
10516 return 0;
10517}
10518
9e0fd15d
JP
10519static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
10520 void *cb_priv)
c5e3deb8 10521{
9e0fd15d 10522 struct bnxt *bp = cb_priv;
de4784ca 10523
312324f1
JK
10524 if (!bnxt_tc_flower_enabled(bp) ||
10525 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
38cf0426 10526 return -EOPNOTSUPP;
c5e3deb8 10527
9e0fd15d
JP
10528 switch (type) {
10529 case TC_SETUP_CLSFLOWER:
10530 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
10531 default:
10532 return -EOPNOTSUPP;
10533 }
10534}
10535
955bcb6e
PNA
10536static LIST_HEAD(bnxt_block_cb_list);
10537
2ae7408f
SP
10538static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
10539 void *type_data)
10540{
4e95bc26
PNA
10541 struct bnxt *bp = netdev_priv(dev);
10542
2ae7408f 10543 switch (type) {
9e0fd15d 10544 case TC_SETUP_BLOCK:
955bcb6e
PNA
10545 return flow_block_cb_setup_simple(type_data,
10546 &bnxt_block_cb_list,
4e95bc26
PNA
10547 bnxt_setup_tc_block_cb,
10548 bp, bp, true);
575ed7d3 10549 case TC_SETUP_QDISC_MQPRIO: {
2ae7408f
SP
10550 struct tc_mqprio_qopt *mqprio = type_data;
10551
10552 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
56f36acd 10553
2ae7408f
SP
10554 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
10555 }
10556 default:
10557 return -EOPNOTSUPP;
10558 }
c5e3deb8
MC
10559}
10560
c0c050c5
MC
10561#ifdef CONFIG_RFS_ACCEL
10562static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
10563 struct bnxt_ntuple_filter *f2)
10564{
10565 struct flow_keys *keys1 = &f1->fkeys;
10566 struct flow_keys *keys2 = &f2->fkeys;
10567
10568 if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
10569 keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
10570 keys1->ports.ports == keys2->ports.ports &&
10571 keys1->basic.ip_proto == keys2->basic.ip_proto &&
10572 keys1->basic.n_proto == keys2->basic.n_proto &&
61aad724 10573 keys1->control.flags == keys2->control.flags &&
a54c4d74
MC
10574 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
10575 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
c0c050c5
MC
10576 return true;
10577
10578 return false;
10579}
10580
10581static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
10582 u16 rxq_index, u32 flow_id)
10583{
10584 struct bnxt *bp = netdev_priv(dev);
10585 struct bnxt_ntuple_filter *fltr, *new_fltr;
10586 struct flow_keys *fkeys;
10587 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
a54c4d74 10588 int rc = 0, idx, bit_id, l2_idx = 0;
c0c050c5
MC
10589 struct hlist_head *head;
10590
a54c4d74
MC
10591 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
10592 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10593 int off = 0, j;
10594
10595 netif_addr_lock_bh(dev);
10596 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
10597 if (ether_addr_equal(eth->h_dest,
10598 vnic->uc_list + off)) {
10599 l2_idx = j + 1;
10600 break;
10601 }
10602 }
10603 netif_addr_unlock_bh(dev);
10604 if (!l2_idx)
10605 return -EINVAL;
10606 }
c0c050c5
MC
10607 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
10608 if (!new_fltr)
10609 return -ENOMEM;
10610
10611 fkeys = &new_fltr->fkeys;
10612 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
10613 rc = -EPROTONOSUPPORT;
10614 goto err_free;
10615 }
10616
dda0e746
MC
10617 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
10618 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
c0c050c5
MC
10619 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
10620 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
10621 rc = -EPROTONOSUPPORT;
10622 goto err_free;
10623 }
dda0e746
MC
10624 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
10625 bp->hwrm_spec_code < 0x10601) {
10626 rc = -EPROTONOSUPPORT;
10627 goto err_free;
10628 }
61aad724
MC
10629 if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
10630 bp->hwrm_spec_code < 0x10601) {
10631 rc = -EPROTONOSUPPORT;
10632 goto err_free;
10633 }
c0c050c5 10634
a54c4d74 10635 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
c0c050c5
MC
10636 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
10637
10638 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
10639 head = &bp->ntp_fltr_hash_tbl[idx];
10640 rcu_read_lock();
10641 hlist_for_each_entry_rcu(fltr, head, hash) {
10642 if (bnxt_fltr_match(fltr, new_fltr)) {
10643 rcu_read_unlock();
10644 rc = 0;
10645 goto err_free;
10646 }
10647 }
10648 rcu_read_unlock();
10649
10650 spin_lock_bh(&bp->ntp_fltr_lock);
84e86b98
MC
10651 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
10652 BNXT_NTP_FLTR_MAX_FLTR, 0);
10653 if (bit_id < 0) {
c0c050c5
MC
10654 spin_unlock_bh(&bp->ntp_fltr_lock);
10655 rc = -ENOMEM;
10656 goto err_free;
10657 }
10658
84e86b98 10659 new_fltr->sw_id = (u16)bit_id;
c0c050c5 10660 new_fltr->flow_id = flow_id;
a54c4d74 10661 new_fltr->l2_fltr_idx = l2_idx;
c0c050c5
MC
10662 new_fltr->rxq = rxq_index;
10663 hlist_add_head_rcu(&new_fltr->hash, head);
10664 bp->ntp_fltr_count++;
10665 spin_unlock_bh(&bp->ntp_fltr_lock);
10666
10667 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
c213eae8 10668 bnxt_queue_sp_work(bp);
c0c050c5
MC
10669
10670 return new_fltr->sw_id;
10671
10672err_free:
10673 kfree(new_fltr);
10674 return rc;
10675}
10676
10677static void bnxt_cfg_ntp_filters(struct bnxt *bp)
10678{
10679 int i;
10680
10681 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
10682 struct hlist_head *head;
10683 struct hlist_node *tmp;
10684 struct bnxt_ntuple_filter *fltr;
10685 int rc;
10686
10687 head = &bp->ntp_fltr_hash_tbl[i];
10688 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
10689 bool del = false;
10690
10691 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
10692 if (rps_may_expire_flow(bp->dev, fltr->rxq,
10693 fltr->flow_id,
10694 fltr->sw_id)) {
10695 bnxt_hwrm_cfa_ntuple_filter_free(bp,
10696 fltr);
10697 del = true;
10698 }
10699 } else {
10700 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
10701 fltr);
10702 if (rc)
10703 del = true;
10704 else
10705 set_bit(BNXT_FLTR_VALID, &fltr->state);
10706 }
10707
10708 if (del) {
10709 spin_lock_bh(&bp->ntp_fltr_lock);
10710 hlist_del_rcu(&fltr->hash);
10711 bp->ntp_fltr_count--;
10712 spin_unlock_bh(&bp->ntp_fltr_lock);
10713 synchronize_rcu();
10714 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
10715 kfree(fltr);
10716 }
10717 }
10718 }
19241368
JH
10719 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
10720 netdev_info(bp->dev, "Receive PF driver unload event!");
c0c050c5
MC
10721}
10722
10723#else
10724
10725static void bnxt_cfg_ntp_filters(struct bnxt *bp)
10726{
10727}
10728
10729#endif /* CONFIG_RFS_ACCEL */
10730
ad51b8e9
AD
10731static void bnxt_udp_tunnel_add(struct net_device *dev,
10732 struct udp_tunnel_info *ti)
c0c050c5
MC
10733{
10734 struct bnxt *bp = netdev_priv(dev);
10735
ad51b8e9 10736 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
c0c050c5
MC
10737 return;
10738
ad51b8e9 10739 if (!netif_running(dev))
c0c050c5
MC
10740 return;
10741
ad51b8e9
AD
10742 switch (ti->type) {
10743 case UDP_TUNNEL_TYPE_VXLAN:
10744 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
10745 return;
c0c050c5 10746
ad51b8e9
AD
10747 bp->vxlan_port_cnt++;
10748 if (bp->vxlan_port_cnt == 1) {
10749 bp->vxlan_port = ti->port;
10750 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
c213eae8 10751 bnxt_queue_sp_work(bp);
ad51b8e9
AD
10752 }
10753 break;
7cdd5fc3
AD
10754 case UDP_TUNNEL_TYPE_GENEVE:
10755 if (bp->nge_port_cnt && bp->nge_port != ti->port)
10756 return;
10757
10758 bp->nge_port_cnt++;
10759 if (bp->nge_port_cnt == 1) {
10760 bp->nge_port = ti->port;
10761 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
10762 }
10763 break;
ad51b8e9
AD
10764 default:
10765 return;
c0c050c5 10766 }
ad51b8e9 10767
c213eae8 10768 bnxt_queue_sp_work(bp);
c0c050c5
MC
10769}
10770
ad51b8e9
AD
10771static void bnxt_udp_tunnel_del(struct net_device *dev,
10772 struct udp_tunnel_info *ti)
c0c050c5
MC
10773{
10774 struct bnxt *bp = netdev_priv(dev);
10775
ad51b8e9 10776 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
c0c050c5
MC
10777 return;
10778
ad51b8e9 10779 if (!netif_running(dev))
c0c050c5
MC
10780 return;
10781
ad51b8e9
AD
10782 switch (ti->type) {
10783 case UDP_TUNNEL_TYPE_VXLAN:
10784 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
10785 return;
c0c050c5
MC
10786 bp->vxlan_port_cnt--;
10787
ad51b8e9
AD
10788 if (bp->vxlan_port_cnt != 0)
10789 return;
10790
10791 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
10792 break;
7cdd5fc3
AD
10793 case UDP_TUNNEL_TYPE_GENEVE:
10794 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
10795 return;
10796 bp->nge_port_cnt--;
10797
10798 if (bp->nge_port_cnt != 0)
10799 return;
10800
10801 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
10802 break;
ad51b8e9
AD
10803 default:
10804 return;
c0c050c5 10805 }
ad51b8e9 10806
c213eae8 10807 bnxt_queue_sp_work(bp);
c0c050c5
MC
10808}
10809
39d8ba2e
MC
10810static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
10811 struct net_device *dev, u32 filter_mask,
10812 int nlflags)
10813{
10814 struct bnxt *bp = netdev_priv(dev);
10815
10816 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
10817 nlflags, filter_mask, NULL);
10818}
10819
10820static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
2fd527b7 10821 u16 flags, struct netlink_ext_ack *extack)
39d8ba2e
MC
10822{
10823 struct bnxt *bp = netdev_priv(dev);
10824 struct nlattr *attr, *br_spec;
10825 int rem, rc = 0;
10826
10827 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
10828 return -EOPNOTSUPP;
10829
10830 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
10831 if (!br_spec)
10832 return -EINVAL;
10833
10834 nla_for_each_nested(attr, br_spec, rem) {
10835 u16 mode;
10836
10837 if (nla_type(attr) != IFLA_BRIDGE_MODE)
10838 continue;
10839
10840 if (nla_len(attr) < sizeof(mode))
10841 return -EINVAL;
10842
10843 mode = nla_get_u16(attr);
10844 if (mode == bp->br_mode)
10845 break;
10846
10847 rc = bnxt_hwrm_set_br_mode(bp, mode);
10848 if (!rc)
10849 bp->br_mode = mode;
10850 break;
10851 }
10852 return rc;
10853}
10854
52d5254a
FF
10855int bnxt_get_port_parent_id(struct net_device *dev,
10856 struct netdev_phys_item_id *ppid)
c124a62f 10857{
52d5254a
FF
10858 struct bnxt *bp = netdev_priv(dev);
10859
c124a62f
SP
10860 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
10861 return -EOPNOTSUPP;
10862
10863 /* The PF and it's VF-reps only support the switchdev framework */
10864 if (!BNXT_PF(bp))
10865 return -EOPNOTSUPP;
10866
52d5254a
FF
10867 ppid->id_len = sizeof(bp->switch_id);
10868 memcpy(ppid->id, bp->switch_id, ppid->id_len);
c124a62f 10869
52d5254a 10870 return 0;
c124a62f
SP
10871}
10872
c9c49a65
JP
10873static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
10874{
10875 struct bnxt *bp = netdev_priv(dev);
10876
10877 return &bp->dl_port;
10878}
10879
c0c050c5
MC
10880static const struct net_device_ops bnxt_netdev_ops = {
10881 .ndo_open = bnxt_open,
10882 .ndo_start_xmit = bnxt_start_xmit,
10883 .ndo_stop = bnxt_close,
10884 .ndo_get_stats64 = bnxt_get_stats64,
10885 .ndo_set_rx_mode = bnxt_set_rx_mode,
10886 .ndo_do_ioctl = bnxt_ioctl,
10887 .ndo_validate_addr = eth_validate_addr,
10888 .ndo_set_mac_address = bnxt_change_mac_addr,
10889 .ndo_change_mtu = bnxt_change_mtu,
10890 .ndo_fix_features = bnxt_fix_features,
10891 .ndo_set_features = bnxt_set_features,
10892 .ndo_tx_timeout = bnxt_tx_timeout,
10893#ifdef CONFIG_BNXT_SRIOV
10894 .ndo_get_vf_config = bnxt_get_vf_config,
10895 .ndo_set_vf_mac = bnxt_set_vf_mac,
10896 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
10897 .ndo_set_vf_rate = bnxt_set_vf_bw,
10898 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
10899 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
746df139 10900 .ndo_set_vf_trust = bnxt_set_vf_trust,
c0c050c5
MC
10901#endif
10902 .ndo_setup_tc = bnxt_setup_tc,
10903#ifdef CONFIG_RFS_ACCEL
10904 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
10905#endif
ad51b8e9
AD
10906 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
10907 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
f4e63525 10908 .ndo_bpf = bnxt_xdp,
f18c2b77 10909 .ndo_xdp_xmit = bnxt_xdp_xmit,
39d8ba2e
MC
10910 .ndo_bridge_getlink = bnxt_bridge_getlink,
10911 .ndo_bridge_setlink = bnxt_bridge_setlink,
c9c49a65 10912 .ndo_get_devlink_port = bnxt_get_devlink_port,
c0c050c5
MC
10913};
10914
10915static void bnxt_remove_one(struct pci_dev *pdev)
10916{
10917 struct net_device *dev = pci_get_drvdata(pdev);
10918 struct bnxt *bp = netdev_priv(dev);
10919
4ab0c6a8 10920 if (BNXT_PF(bp)) {
c0c050c5 10921 bnxt_sriov_disable(bp);
4ab0c6a8
SP
10922 bnxt_dl_unregister(bp);
10923 }
c0c050c5 10924
6316ea6d 10925 pci_disable_pcie_error_reporting(pdev);
c0c050c5 10926 unregister_netdev(dev);
2ae7408f 10927 bnxt_shutdown_tc(bp);
c213eae8 10928 bnxt_cancel_sp_work(bp);
c0c050c5
MC
10929 bp->sp_event = 0;
10930
7809592d 10931 bnxt_clear_int_mode(bp);
be58a0da 10932 bnxt_hwrm_func_drv_unrgtr(bp);
c0c050c5 10933 bnxt_free_hwrm_resources(bp);
e605db80 10934 bnxt_free_hwrm_short_cmd_req(bp);
eb513658 10935 bnxt_ethtool_free(bp);
7df4ae9f 10936 bnxt_dcb_free(bp);
a588e458
MC
10937 kfree(bp->edev);
10938 bp->edev = NULL;
c20dc142 10939 bnxt_cleanup_pci(bp);
98f04cf0
MC
10940 bnxt_free_ctx_mem(bp);
10941 kfree(bp->ctx);
10942 bp->ctx = NULL;
fd3ab1c7 10943 bnxt_free_port_stats(bp);
c0c050c5 10944 free_netdev(dev);
c0c050c5
MC
10945}
10946
ba642ab7 10947static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
c0c050c5
MC
10948{
10949 int rc = 0;
10950 struct bnxt_link_info *link_info = &bp->link_info;
c0c050c5 10951
170ce013
MC
10952 rc = bnxt_hwrm_phy_qcaps(bp);
10953 if (rc) {
10954 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
10955 rc);
10956 return rc;
10957 }
c0c050c5
MC
10958 rc = bnxt_update_link(bp, false);
10959 if (rc) {
10960 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
10961 rc);
10962 return rc;
10963 }
10964
93ed8117
MC
10965 /* Older firmware does not have supported_auto_speeds, so assume
10966 * that all supported speeds can be autonegotiated.
10967 */
10968 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
10969 link_info->support_auto_speeds = link_info->support_speeds;
10970
ba642ab7
MC
10971 if (!fw_dflt)
10972 return 0;
10973
c0c050c5 10974 /*initialize the ethool setting copy with NVM settings */
0d8abf02 10975 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
c9ee9516
MC
10976 link_info->autoneg = BNXT_AUTONEG_SPEED;
10977 if (bp->hwrm_spec_code >= 0x10201) {
10978 if (link_info->auto_pause_setting &
10979 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
10980 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10981 } else {
10982 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10983 }
0d8abf02 10984 link_info->advertising = link_info->auto_link_speeds;
0d8abf02
MC
10985 } else {
10986 link_info->req_link_speed = link_info->force_link_speed;
10987 link_info->req_duplex = link_info->duplex_setting;
c0c050c5 10988 }
c9ee9516
MC
10989 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
10990 link_info->req_flow_ctrl =
10991 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
10992 else
10993 link_info->req_flow_ctrl = link_info->force_pause_setting;
ba642ab7 10994 return 0;
c0c050c5
MC
10995}
10996
10997static int bnxt_get_max_irq(struct pci_dev *pdev)
10998{
10999 u16 ctrl;
11000
11001 if (!pdev->msix_cap)
11002 return 1;
11003
11004 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
11005 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
11006}
11007
6e6c5a57
MC
11008static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
11009 int *max_cp)
c0c050c5 11010{
6a4f2947 11011 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
e30fbc33 11012 int max_ring_grps = 0, max_irq;
c0c050c5 11013
6a4f2947
MC
11014 *max_tx = hw_resc->max_tx_rings;
11015 *max_rx = hw_resc->max_rx_rings;
e30fbc33
MC
11016 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
11017 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
11018 bnxt_get_ulp_msix_num(bp),
c027c6b4 11019 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
e30fbc33
MC
11020 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11021 *max_cp = min_t(int, *max_cp, max_irq);
6a4f2947 11022 max_ring_grps = hw_resc->max_hw_ring_grps;
76595193
PS
11023 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
11024 *max_cp -= 1;
11025 *max_rx -= 2;
11026 }
c0c050c5
MC
11027 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11028 *max_rx >>= 1;
e30fbc33
MC
11029 if (bp->flags & BNXT_FLAG_CHIP_P5) {
11030 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
11031 /* On P5 chips, max_cp output param should be available NQs */
11032 *max_cp = max_irq;
11033 }
b72d4a68 11034 *max_rx = min_t(int, *max_rx, max_ring_grps);
6e6c5a57
MC
11035}
11036
11037int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
11038{
11039 int rx, tx, cp;
11040
11041 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
78f058a4
MC
11042 *max_rx = rx;
11043 *max_tx = tx;
6e6c5a57
MC
11044 if (!rx || !tx || !cp)
11045 return -ENOMEM;
11046
6e6c5a57
MC
11047 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
11048}
11049
e4060d30
MC
11050static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
11051 bool shared)
11052{
11053 int rc;
11054
11055 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
bdbd1eb5
MC
11056 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
11057 /* Not enough rings, try disabling agg rings. */
11058 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
11059 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
07f4fde5
MC
11060 if (rc) {
11061 /* set BNXT_FLAG_AGG_RINGS back for consistency */
11062 bp->flags |= BNXT_FLAG_AGG_RINGS;
bdbd1eb5 11063 return rc;
07f4fde5 11064 }
bdbd1eb5 11065 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
1054aee8
MC
11066 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
11067 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
bdbd1eb5
MC
11068 bnxt_set_ring_params(bp);
11069 }
e4060d30
MC
11070
11071 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
11072 int max_cp, max_stat, max_irq;
11073
11074 /* Reserve minimum resources for RoCE */
11075 max_cp = bnxt_get_max_func_cp_rings(bp);
11076 max_stat = bnxt_get_max_func_stat_ctxs(bp);
11077 max_irq = bnxt_get_max_func_irqs(bp);
11078 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
11079 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
11080 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
11081 return 0;
11082
11083 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
11084 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
11085 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
11086 max_cp = min_t(int, max_cp, max_irq);
11087 max_cp = min_t(int, max_cp, max_stat);
11088 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
11089 if (rc)
11090 rc = 0;
11091 }
11092 return rc;
11093}
11094
58ea801a
MC
11095/* In initial default shared ring setting, each shared ring must have a
11096 * RX/TX ring pair.
11097 */
11098static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
11099{
11100 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
11101 bp->rx_nr_rings = bp->cp_nr_rings;
11102 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
11103 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
11104}
11105
702c221c 11106static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
6e6c5a57
MC
11107{
11108 int dflt_rings, max_rx_rings, max_tx_rings, rc;
6e6c5a57 11109
2773dfb2
MC
11110 if (!bnxt_can_reserve_rings(bp))
11111 return 0;
11112
6e6c5a57
MC
11113 if (sh)
11114 bp->flags |= BNXT_FLAG_SHARED_RINGS;
d629522e 11115 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
1d3ef13d
MC
11116 /* Reduce default rings on multi-port cards so that total default
11117 * rings do not exceed CPU count.
11118 */
11119 if (bp->port_count > 1) {
11120 int max_rings =
11121 max_t(int, num_online_cpus() / bp->port_count, 1);
11122
11123 dflt_rings = min_t(int, dflt_rings, max_rings);
11124 }
e4060d30 11125 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
6e6c5a57
MC
11126 if (rc)
11127 return rc;
11128 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
11129 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
58ea801a
MC
11130 if (sh)
11131 bnxt_trim_dflt_sh_rings(bp);
11132 else
11133 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
11134 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
391be5c2 11135
674f50a5 11136 rc = __bnxt_reserve_rings(bp);
391be5c2
MC
11137 if (rc)
11138 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
58ea801a
MC
11139 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11140 if (sh)
11141 bnxt_trim_dflt_sh_rings(bp);
391be5c2 11142
674f50a5
MC
11143 /* Rings may have been trimmed, re-reserve the trimmed rings. */
11144 if (bnxt_need_reserve_rings(bp)) {
11145 rc = __bnxt_reserve_rings(bp);
11146 if (rc)
11147 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
11148 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11149 }
76595193
PS
11150 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11151 bp->rx_nr_rings++;
11152 bp->cp_nr_rings++;
11153 }
6e6c5a57 11154 return rc;
c0c050c5
MC
11155}
11156
47558acd
MC
11157static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
11158{
11159 int rc;
11160
11161 if (bp->tx_nr_rings)
11162 return 0;
11163
6b95c3e9
MC
11164 bnxt_ulp_irq_stop(bp);
11165 bnxt_clear_int_mode(bp);
47558acd
MC
11166 rc = bnxt_set_dflt_rings(bp, true);
11167 if (rc) {
11168 netdev_err(bp->dev, "Not enough rings available.\n");
6b95c3e9 11169 goto init_dflt_ring_err;
47558acd
MC
11170 }
11171 rc = bnxt_init_int_mode(bp);
11172 if (rc)
6b95c3e9
MC
11173 goto init_dflt_ring_err;
11174
47558acd
MC
11175 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11176 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
11177 bp->flags |= BNXT_FLAG_RFS;
11178 bp->dev->features |= NETIF_F_NTUPLE;
11179 }
6b95c3e9
MC
11180init_dflt_ring_err:
11181 bnxt_ulp_irq_restart(bp, rc);
11182 return rc;
47558acd
MC
11183}
11184
80fcaf46 11185int bnxt_restore_pf_fw_resources(struct bnxt *bp)
7b08f661 11186{
80fcaf46
MC
11187 int rc;
11188
7b08f661
MC
11189 ASSERT_RTNL();
11190 bnxt_hwrm_func_qcaps(bp);
1a037782
VD
11191
11192 if (netif_running(bp->dev))
11193 __bnxt_close_nic(bp, true, false);
11194
ec86f14e 11195 bnxt_ulp_irq_stop(bp);
80fcaf46
MC
11196 bnxt_clear_int_mode(bp);
11197 rc = bnxt_init_int_mode(bp);
ec86f14e 11198 bnxt_ulp_irq_restart(bp, rc);
1a037782
VD
11199
11200 if (netif_running(bp->dev)) {
11201 if (rc)
11202 dev_close(bp->dev);
11203 else
11204 rc = bnxt_open_nic(bp, true, false);
11205 }
11206
80fcaf46 11207 return rc;
7b08f661
MC
11208}
11209
a22a6ac2
MC
11210static int bnxt_init_mac_addr(struct bnxt *bp)
11211{
11212 int rc = 0;
11213
11214 if (BNXT_PF(bp)) {
11215 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
11216 } else {
11217#ifdef CONFIG_BNXT_SRIOV
11218 struct bnxt_vf_info *vf = &bp->vf;
28ea334b 11219 bool strict_approval = true;
a22a6ac2
MC
11220
11221 if (is_valid_ether_addr(vf->mac_addr)) {
91cdda40 11222 /* overwrite netdev dev_addr with admin VF MAC */
a22a6ac2 11223 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
28ea334b
MC
11224 /* Older PF driver or firmware may not approve this
11225 * correctly.
11226 */
11227 strict_approval = false;
a22a6ac2
MC
11228 } else {
11229 eth_hw_addr_random(bp->dev);
a22a6ac2 11230 }
28ea334b 11231 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
a22a6ac2
MC
11232#endif
11233 }
11234 return rc;
11235}
11236
03213a99
JP
11237static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
11238{
11239 struct pci_dev *pdev = bp->pdev;
11240 int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
11241 u32 dw;
11242
11243 if (!pos) {
11244 netdev_info(bp->dev, "Unable do read adapter's DSN");
11245 return -EOPNOTSUPP;
11246 }
11247
11248 /* DSN (two dw) is at an offset of 4 from the cap pos */
11249 pos += 4;
11250 pci_read_config_dword(pdev, pos, &dw);
11251 put_unaligned_le32(dw, &dsn[0]);
11252 pci_read_config_dword(pdev, pos + 4, &dw);
11253 put_unaligned_le32(dw, &dsn[4]);
11254 return 0;
11255}
11256
c0c050c5
MC
11257static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
11258{
11259 static int version_printed;
11260 struct net_device *dev;
11261 struct bnxt *bp;
6e6c5a57 11262 int rc, max_irqs;
c0c050c5 11263
4e00338a 11264 if (pci_is_bridge(pdev))
fa853dda
PS
11265 return -ENODEV;
11266
c0c050c5
MC
11267 if (version_printed++ == 0)
11268 pr_info("%s", version);
11269
11270 max_irqs = bnxt_get_max_irq(pdev);
11271 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
11272 if (!dev)
11273 return -ENOMEM;
11274
11275 bp = netdev_priv(dev);
9c1fabdf 11276 bnxt_set_max_func_irqs(bp, max_irqs);
c0c050c5
MC
11277
11278 if (bnxt_vf_pciid(ent->driver_data))
11279 bp->flags |= BNXT_FLAG_VF;
11280
2bcfa6f6 11281 if (pdev->msix_cap)
c0c050c5 11282 bp->flags |= BNXT_FLAG_MSIX_CAP;
c0c050c5
MC
11283
11284 rc = bnxt_init_board(pdev, dev);
11285 if (rc < 0)
11286 goto init_err_free;
11287
11288 dev->netdev_ops = &bnxt_netdev_ops;
11289 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
11290 dev->ethtool_ops = &bnxt_ethtool_ops;
c0c050c5
MC
11291 pci_set_drvdata(pdev, dev);
11292
3e8060fa
PS
11293 rc = bnxt_alloc_hwrm_resources(bp);
11294 if (rc)
17086399 11295 goto init_err_pci_clean;
3e8060fa
PS
11296
11297 mutex_init(&bp->hwrm_cmd_lock);
ba642ab7 11298 mutex_init(&bp->link_lock);
7c380918
MC
11299
11300 rc = bnxt_fw_init_one_p1(bp);
3e8060fa 11301 if (rc)
17086399 11302 goto init_err_pci_clean;
3e8060fa 11303
e38287b7
MC
11304 if (BNXT_CHIP_P5(bp))
11305 bp->flags |= BNXT_FLAG_CHIP_P5;
11306
7c380918 11307 rc = bnxt_fw_init_one_p2(bp);
3c2217a6
MC
11308 if (rc)
11309 goto init_err_pci_clean;
11310
c0c050c5
MC
11311 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
11312 NETIF_F_TSO | NETIF_F_TSO6 |
11313 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
7e13318d 11314 NETIF_F_GSO_IPXIP4 |
152971ee
AD
11315 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
11316 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
3e8060fa
PS
11317 NETIF_F_RXCSUM | NETIF_F_GRO;
11318
e38287b7 11319 if (BNXT_SUPPORTS_TPA(bp))
3e8060fa 11320 dev->hw_features |= NETIF_F_LRO;
c0c050c5 11321
c0c050c5
MC
11322 dev->hw_enc_features =
11323 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
11324 NETIF_F_TSO | NETIF_F_TSO6 |
11325 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
152971ee 11326 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
7e13318d 11327 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
152971ee
AD
11328 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
11329 NETIF_F_GSO_GRE_CSUM;
c0c050c5
MC
11330 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
11331 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
11332 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
e38287b7 11333 if (BNXT_SUPPORTS_TPA(bp))
1054aee8 11334 dev->hw_features |= NETIF_F_GRO_HW;
c0c050c5 11335 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
1054aee8
MC
11336 if (dev->features & NETIF_F_GRO_HW)
11337 dev->features &= ~NETIF_F_LRO;
c0c050c5
MC
11338 dev->priv_flags |= IFF_UNICAST_FLT;
11339
11340#ifdef CONFIG_BNXT_SRIOV
11341 init_waitqueue_head(&bp->sriov_cfg_wait);
4ab0c6a8 11342 mutex_init(&bp->sriov_lock);
c0c050c5 11343#endif
e38287b7
MC
11344 if (BNXT_SUPPORTS_TPA(bp)) {
11345 bp->gro_func = bnxt_gro_func_5730x;
67912c36 11346 if (BNXT_CHIP_P4(bp))
e38287b7 11347 bp->gro_func = bnxt_gro_func_5731x;
67912c36
MC
11348 else if (BNXT_CHIP_P5(bp))
11349 bp->gro_func = bnxt_gro_func_5750x;
e38287b7
MC
11350 }
11351 if (!BNXT_CHIP_P4_PLUS(bp))
434c975a 11352 bp->flags |= BNXT_FLAG_DOUBLE_DB;
309369c9 11353
a588e458
MC
11354 bp->ulp_probe = bnxt_ulp_probe;
11355
a22a6ac2
MC
11356 rc = bnxt_init_mac_addr(bp);
11357 if (rc) {
11358 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
11359 rc = -EADDRNOTAVAIL;
11360 goto init_err_pci_clean;
11361 }
c0c050c5 11362
2e9217d1
VV
11363 if (BNXT_PF(bp)) {
11364 /* Read the adapter's DSN to use as the eswitch switch_id */
11365 rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
11366 if (rc)
11367 goto init_err_pci_clean;
11368 }
567b2abe 11369
7eb9bb3a
MC
11370 /* MTU range: 60 - FW defined max */
11371 dev->min_mtu = ETH_ZLEN;
11372 dev->max_mtu = bp->max_mtu;
11373
ba642ab7 11374 rc = bnxt_probe_phy(bp, true);
d5430d31
MC
11375 if (rc)
11376 goto init_err_pci_clean;
11377
c61fb99c 11378 bnxt_set_rx_skb_mode(bp, false);
c0c050c5
MC
11379 bnxt_set_tpa_flags(bp);
11380 bnxt_set_ring_params(bp);
702c221c 11381 rc = bnxt_set_dflt_rings(bp, true);
bdbd1eb5
MC
11382 if (rc) {
11383 netdev_err(bp->dev, "Not enough rings available.\n");
11384 rc = -ENOMEM;
17086399 11385 goto init_err_pci_clean;
bdbd1eb5 11386 }
c0c050c5 11387
ba642ab7 11388 bnxt_fw_init_one_p3(bp);
2bcfa6f6 11389
c0c050c5
MC
11390 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
11391 bp->flags |= BNXT_FLAG_STRIP_VLAN;
11392
7809592d 11393 rc = bnxt_init_int_mode(bp);
c0c050c5 11394 if (rc)
17086399 11395 goto init_err_pci_clean;
c0c050c5 11396
832aed16
MC
11397 /* No TC has been set yet and rings may have been trimmed due to
11398 * limited MSIX, so we re-initialize the TX rings per TC.
11399 */
11400 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11401
c213eae8
MC
11402 if (BNXT_PF(bp)) {
11403 if (!bnxt_pf_wq) {
11404 bnxt_pf_wq =
11405 create_singlethread_workqueue("bnxt_pf_wq");
11406 if (!bnxt_pf_wq) {
11407 dev_err(&pdev->dev, "Unable to create workqueue.\n");
11408 goto init_err_pci_clean;
11409 }
11410 }
2ae7408f 11411 bnxt_init_tc(bp);
c213eae8 11412 }
2ae7408f 11413
7809592d
MC
11414 rc = register_netdev(dev);
11415 if (rc)
2ae7408f 11416 goto init_err_cleanup_tc;
7809592d 11417
4ab0c6a8
SP
11418 if (BNXT_PF(bp))
11419 bnxt_dl_register(bp);
11420
c0c050c5
MC
11421 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
11422 board_info[ent->driver_data].name,
11423 (long)pci_resource_start(pdev, 0), dev->dev_addr);
af125b75 11424 pcie_print_link_status(pdev);
90c4f788 11425
c0c050c5
MC
11426 return 0;
11427
2ae7408f
SP
11428init_err_cleanup_tc:
11429 bnxt_shutdown_tc(bp);
7809592d
MC
11430 bnxt_clear_int_mode(bp);
11431
17086399 11432init_err_pci_clean:
f9099d61 11433 bnxt_free_hwrm_short_cmd_req(bp);
a2bf74f4 11434 bnxt_free_hwrm_resources(bp);
98f04cf0
MC
11435 bnxt_free_ctx_mem(bp);
11436 kfree(bp->ctx);
11437 bp->ctx = NULL;
07f83d72
MC
11438 kfree(bp->fw_health);
11439 bp->fw_health = NULL;
17086399 11440 bnxt_cleanup_pci(bp);
c0c050c5
MC
11441
11442init_err_free:
11443 free_netdev(dev);
11444 return rc;
11445}
11446
d196ece7
MC
11447static void bnxt_shutdown(struct pci_dev *pdev)
11448{
11449 struct net_device *dev = pci_get_drvdata(pdev);
11450 struct bnxt *bp;
11451
11452 if (!dev)
11453 return;
11454
11455 rtnl_lock();
11456 bp = netdev_priv(dev);
11457 if (!bp)
11458 goto shutdown_exit;
11459
11460 if (netif_running(dev))
11461 dev_close(dev);
11462
a7f3f939
RJ
11463 bnxt_ulp_shutdown(bp);
11464
d196ece7
MC
11465 if (system_state == SYSTEM_POWER_OFF) {
11466 bnxt_clear_int_mode(bp);
c20dc142 11467 pci_disable_device(pdev);
d196ece7
MC
11468 pci_wake_from_d3(pdev, bp->wol);
11469 pci_set_power_state(pdev, PCI_D3hot);
11470 }
11471
11472shutdown_exit:
11473 rtnl_unlock();
11474}
11475
f65a2044
MC
11476#ifdef CONFIG_PM_SLEEP
11477static int bnxt_suspend(struct device *device)
11478{
f521eaa9 11479 struct net_device *dev = dev_get_drvdata(device);
f65a2044
MC
11480 struct bnxt *bp = netdev_priv(dev);
11481 int rc = 0;
11482
11483 rtnl_lock();
11484 if (netif_running(dev)) {
11485 netif_device_detach(dev);
11486 rc = bnxt_close(dev);
11487 }
11488 bnxt_hwrm_func_drv_unrgtr(bp);
11489 rtnl_unlock();
11490 return rc;
11491}
11492
11493static int bnxt_resume(struct device *device)
11494{
f521eaa9 11495 struct net_device *dev = dev_get_drvdata(device);
f65a2044
MC
11496 struct bnxt *bp = netdev_priv(dev);
11497 int rc = 0;
11498
11499 rtnl_lock();
11500 if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
11501 rc = -ENODEV;
11502 goto resume_exit;
11503 }
11504 rc = bnxt_hwrm_func_reset(bp);
11505 if (rc) {
11506 rc = -EBUSY;
11507 goto resume_exit;
11508 }
11509 bnxt_get_wol_settings(bp);
11510 if (netif_running(dev)) {
11511 rc = bnxt_open(dev);
11512 if (!rc)
11513 netif_device_attach(dev);
11514 }
11515
11516resume_exit:
11517 rtnl_unlock();
11518 return rc;
11519}
11520
11521static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
11522#define BNXT_PM_OPS (&bnxt_pm_ops)
11523
11524#else
11525
11526#define BNXT_PM_OPS NULL
11527
11528#endif /* CONFIG_PM_SLEEP */
11529
6316ea6d
SB
11530/**
11531 * bnxt_io_error_detected - called when PCI error is detected
11532 * @pdev: Pointer to PCI device
11533 * @state: The current pci connection state
11534 *
11535 * This function is called after a PCI bus error affecting
11536 * this device has been detected.
11537 */
11538static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
11539 pci_channel_state_t state)
11540{
11541 struct net_device *netdev = pci_get_drvdata(pdev);
a588e458 11542 struct bnxt *bp = netdev_priv(netdev);
6316ea6d
SB
11543
11544 netdev_info(netdev, "PCI I/O error detected\n");
11545
11546 rtnl_lock();
11547 netif_device_detach(netdev);
11548
a588e458
MC
11549 bnxt_ulp_stop(bp);
11550
6316ea6d
SB
11551 if (state == pci_channel_io_perm_failure) {
11552 rtnl_unlock();
11553 return PCI_ERS_RESULT_DISCONNECT;
11554 }
11555
11556 if (netif_running(netdev))
11557 bnxt_close(netdev);
11558
11559 pci_disable_device(pdev);
11560 rtnl_unlock();
11561
11562 /* Request a slot slot reset. */
11563 return PCI_ERS_RESULT_NEED_RESET;
11564}
11565
11566/**
11567 * bnxt_io_slot_reset - called after the pci bus has been reset.
11568 * @pdev: Pointer to PCI device
11569 *
11570 * Restart the card from scratch, as if from a cold-boot.
11571 * At this point, the card has exprienced a hard reset,
11572 * followed by fixups by BIOS, and has its config space
11573 * set up identically to what it was at cold boot.
11574 */
11575static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
11576{
11577 struct net_device *netdev = pci_get_drvdata(pdev);
11578 struct bnxt *bp = netdev_priv(netdev);
11579 int err = 0;
11580 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
11581
11582 netdev_info(bp->dev, "PCI Slot Reset\n");
11583
11584 rtnl_lock();
11585
11586 if (pci_enable_device(pdev)) {
11587 dev_err(&pdev->dev,
11588 "Cannot re-enable PCI device after reset.\n");
11589 } else {
11590 pci_set_master(pdev);
11591
aa8ed021
MC
11592 err = bnxt_hwrm_func_reset(bp);
11593 if (!err && netif_running(netdev))
6316ea6d
SB
11594 err = bnxt_open(netdev);
11595
a588e458 11596 if (!err) {
6316ea6d 11597 result = PCI_ERS_RESULT_RECOVERED;
a588e458
MC
11598 bnxt_ulp_start(bp);
11599 }
6316ea6d
SB
11600 }
11601
11602 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
11603 dev_close(netdev);
11604
11605 rtnl_unlock();
11606
6316ea6d
SB
11607 return PCI_ERS_RESULT_RECOVERED;
11608}
11609
11610/**
11611 * bnxt_io_resume - called when traffic can start flowing again.
11612 * @pdev: Pointer to PCI device
11613 *
11614 * This callback is called when the error recovery driver tells
11615 * us that its OK to resume normal operation.
11616 */
11617static void bnxt_io_resume(struct pci_dev *pdev)
11618{
11619 struct net_device *netdev = pci_get_drvdata(pdev);
11620
11621 rtnl_lock();
11622
11623 netif_device_attach(netdev);
11624
11625 rtnl_unlock();
11626}
11627
11628static const struct pci_error_handlers bnxt_err_handler = {
11629 .error_detected = bnxt_io_error_detected,
11630 .slot_reset = bnxt_io_slot_reset,
11631 .resume = bnxt_io_resume
11632};
11633
c0c050c5
MC
11634static struct pci_driver bnxt_pci_driver = {
11635 .name = DRV_MODULE_NAME,
11636 .id_table = bnxt_pci_tbl,
11637 .probe = bnxt_init_one,
11638 .remove = bnxt_remove_one,
d196ece7 11639 .shutdown = bnxt_shutdown,
f65a2044 11640 .driver.pm = BNXT_PM_OPS,
6316ea6d 11641 .err_handler = &bnxt_err_handler,
c0c050c5
MC
11642#if defined(CONFIG_BNXT_SRIOV)
11643 .sriov_configure = bnxt_sriov_configure,
11644#endif
11645};
11646
c213eae8
MC
11647static int __init bnxt_init(void)
11648{
cabfb09d 11649 bnxt_debug_init();
c213eae8
MC
11650 return pci_register_driver(&bnxt_pci_driver);
11651}
11652
11653static void __exit bnxt_exit(void)
11654{
11655 pci_unregister_driver(&bnxt_pci_driver);
11656 if (bnxt_pf_wq)
11657 destroy_workqueue(bnxt_pf_wq);
cabfb09d 11658 bnxt_debug_exit();
c213eae8
MC
11659}
11660
11661module_init(bnxt_init);
11662module_exit(bnxt_exit);