]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/broadcom/bnxt/bnxt.c
bnxt_en: Refactor ethtool ring statistics logic.
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
CommitLineData
c0c050c5
MC
1/* Broadcom NetXtreme-C/E network driver.
2 *
11f15ed3 3 * Copyright (c) 2014-2016 Broadcom Corporation
c6cc32a2 4 * Copyright (c) 2016-2019 Broadcom Limited
c0c050c5
MC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12
13#include <linux/stringify.h>
14#include <linux/kernel.h>
15#include <linux/timer.h>
16#include <linux/errno.h>
17#include <linux/ioport.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/interrupt.h>
21#include <linux/pci.h>
22#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/skbuff.h>
25#include <linux/dma-mapping.h>
26#include <linux/bitops.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29#include <linux/delay.h>
30#include <asm/byteorder.h>
31#include <asm/page.h>
32#include <linux/time.h>
33#include <linux/mii.h>
0ca12be9 34#include <linux/mdio.h>
c0c050c5
MC
35#include <linux/if.h>
36#include <linux/if_vlan.h>
32e8239c 37#include <linux/if_bridge.h>
5ac67d8b 38#include <linux/rtc.h>
c6d30e83 39#include <linux/bpf.h>
c0c050c5
MC
40#include <net/ip.h>
41#include <net/tcp.h>
42#include <net/udp.h>
43#include <net/checksum.h>
44#include <net/ip6_checksum.h>
ad51b8e9 45#include <net/udp_tunnel.h>
c0c050c5
MC
46#include <linux/workqueue.h>
47#include <linux/prefetch.h>
48#include <linux/cache.h>
49#include <linux/log2.h>
50#include <linux/aer.h>
51#include <linux/bitmap.h>
52#include <linux/cpu_rmap.h>
56f0fd80 53#include <linux/cpumask.h>
2ae7408f 54#include <net/pkt_cls.h>
cde49a42
VV
55#include <linux/hwmon.h>
56#include <linux/hwmon-sysfs.h>
322b87ca 57#include <net/page_pool.h>
c0c050c5
MC
58
59#include "bnxt_hsi.h"
60#include "bnxt.h"
a588e458 61#include "bnxt_ulp.h"
c0c050c5
MC
62#include "bnxt_sriov.h"
63#include "bnxt_ethtool.h"
7df4ae9f 64#include "bnxt_dcb.h"
c6d30e83 65#include "bnxt_xdp.h"
4ab0c6a8 66#include "bnxt_vfr.h"
2ae7408f 67#include "bnxt_tc.h"
3c467bf3 68#include "bnxt_devlink.h"
cabfb09d 69#include "bnxt_debugfs.h"
c0c050c5
MC
70
71#define BNXT_TX_TIMEOUT (5 * HZ)
72
73static const char version[] =
74 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
75
76MODULE_LICENSE("GPL");
77MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
78MODULE_VERSION(DRV_MODULE_VERSION);
79
80#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
81#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
82#define BNXT_RX_COPY_THRESH 256
83
4419dbe6 84#define BNXT_TX_PUSH_THRESH 164
c0c050c5
MC
85
86enum board_idx {
fbc9a523 87 BCM57301,
c0c050c5
MC
88 BCM57302,
89 BCM57304,
1f681688 90 BCM57417_NPAR,
fa853dda 91 BCM58700,
b24eb6ae
MC
92 BCM57311,
93 BCM57312,
fbc9a523 94 BCM57402,
c0c050c5
MC
95 BCM57404,
96 BCM57406,
1f681688
MC
97 BCM57402_NPAR,
98 BCM57407,
b24eb6ae
MC
99 BCM57412,
100 BCM57414,
101 BCM57416,
102 BCM57417,
1f681688 103 BCM57412_NPAR,
5049e33b 104 BCM57314,
1f681688
MC
105 BCM57417_SFP,
106 BCM57416_SFP,
107 BCM57404_NPAR,
108 BCM57406_NPAR,
109 BCM57407_SFP,
adbc8305 110 BCM57407_NPAR,
1f681688
MC
111 BCM57414_NPAR,
112 BCM57416_NPAR,
32b40798
DK
113 BCM57452,
114 BCM57454,
92abef36 115 BCM5745x_NPAR,
1ab968d2 116 BCM57508,
c6cc32a2 117 BCM57504,
51fec80d 118 BCM57502,
4a58139b 119 BCM58802,
8ed693b7 120 BCM58804,
4a58139b 121 BCM58808,
adbc8305
MC
122 NETXTREME_E_VF,
123 NETXTREME_C_VF,
618784e3 124 NETXTREME_S_VF,
b16b6891 125 NETXTREME_E_P5_VF,
c0c050c5
MC
126};
127
128/* indexed by enum above */
129static const struct {
130 char *name;
131} board_info[] = {
27573a7d
SB
132 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
133 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
134 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
135 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
136 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
137 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
138 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
139 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
140 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
141 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
142 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
143 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
144 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
145 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
146 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
147 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
148 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
149 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
150 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
151 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
152 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
153 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
154 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
155 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
156 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
157 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
158 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
159 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
92abef36 160 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
1ab968d2 161 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
c6cc32a2 162 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
51fec80d 163 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
27573a7d 164 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
8ed693b7 165 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
27573a7d
SB
166 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
167 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
168 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
618784e3 169 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
b16b6891 170 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
c0c050c5
MC
171};
172
173static const struct pci_device_id bnxt_pci_tbl[] = {
92abef36
VV
174 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
175 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
4a58139b 176 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
adbc8305 177 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
fbc9a523 178 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
c0c050c5
MC
179 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
180 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
1f681688 181 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
fa853dda 182 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
b24eb6ae
MC
183 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
184 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
fbc9a523 185 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
c0c050c5
MC
186 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
187 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
1f681688
MC
188 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
189 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
b24eb6ae
MC
190 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
191 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
192 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
193 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
1f681688 194 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
5049e33b 195 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
1f681688
MC
196 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
197 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
198 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
199 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
200 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
adbc8305
MC
201 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
202 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
1f681688 203 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
adbc8305 204 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
1f681688 205 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
adbc8305 206 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
4a58139b 207 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
32b40798 208 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
1ab968d2 209 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
c6cc32a2 210 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
51fec80d 211 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
4a58139b 212 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
8ed693b7 213 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
c0c050c5 214#ifdef CONFIG_BNXT_SRIOV
c7ef35eb
DK
215 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
216 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
adbc8305
MC
217 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
218 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
219 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
220 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
221 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
222 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
51fec80d 223 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
b16b6891 224 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
618784e3 225 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
c0c050c5
MC
226#endif
227 { 0 }
228};
229
230MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
231
232static const u16 bnxt_vf_req_snif[] = {
233 HWRM_FUNC_CFG,
91cdda40 234 HWRM_FUNC_VF_CFG,
c0c050c5
MC
235 HWRM_PORT_PHY_QCFG,
236 HWRM_CFA_L2_FILTER_ALLOC,
237};
238
25be8623 239static const u16 bnxt_async_events_arr[] = {
87c374de
MC
240 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
241 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
242 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
243 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
244 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
25be8623
MC
245};
246
c213eae8
MC
247static struct workqueue_struct *bnxt_pf_wq;
248
c0c050c5
MC
249static bool bnxt_vf_pciid(enum board_idx idx)
250{
618784e3 251 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
b16b6891 252 idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
c0c050c5
MC
253}
254
255#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
256#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
257#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
258
c0c050c5
MC
259#define BNXT_CP_DB_IRQ_DIS(db) \
260 writel(DB_CP_IRQ_DIS_FLAGS, db)
261
697197e5
MC
262#define BNXT_DB_CQ(db, idx) \
263 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
264
265#define BNXT_DB_NQ_P5(db, idx) \
266 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
267
268#define BNXT_DB_CQ_ARM(db, idx) \
269 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
270
271#define BNXT_DB_NQ_ARM_P5(db, idx) \
272 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
273
274static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
275{
276 if (bp->flags & BNXT_FLAG_CHIP_P5)
277 BNXT_DB_NQ_P5(db, idx);
278 else
279 BNXT_DB_CQ(db, idx);
280}
281
282static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
283{
284 if (bp->flags & BNXT_FLAG_CHIP_P5)
285 BNXT_DB_NQ_ARM_P5(db, idx);
286 else
287 BNXT_DB_CQ_ARM(db, idx);
288}
289
290static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
291{
292 if (bp->flags & BNXT_FLAG_CHIP_P5)
293 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
294 db->doorbell);
295 else
296 BNXT_DB_CQ(db, idx);
297}
298
38413406 299const u16 bnxt_lhint_arr[] = {
c0c050c5
MC
300 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
301 TX_BD_FLAGS_LHINT_512_TO_1023,
302 TX_BD_FLAGS_LHINT_1024_TO_2047,
303 TX_BD_FLAGS_LHINT_1024_TO_2047,
304 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
305 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
306 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
307 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
308 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
309 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
310 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
311 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
312 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
313 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
314 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
315 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
316 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
317 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
318 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
319};
320
ee5c7fb3
SP
321static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
322{
323 struct metadata_dst *md_dst = skb_metadata_dst(skb);
324
325 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
326 return 0;
327
328 return md_dst->u.port_info.port_id;
329}
330
c0c050c5
MC
331static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
332{
333 struct bnxt *bp = netdev_priv(dev);
334 struct tx_bd *txbd;
335 struct tx_bd_ext *txbd1;
336 struct netdev_queue *txq;
337 int i;
338 dma_addr_t mapping;
339 unsigned int length, pad = 0;
340 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
341 u16 prod, last_frag;
342 struct pci_dev *pdev = bp->pdev;
c0c050c5
MC
343 struct bnxt_tx_ring_info *txr;
344 struct bnxt_sw_tx_bd *tx_buf;
345
346 i = skb_get_queue_mapping(skb);
347 if (unlikely(i >= bp->tx_nr_rings)) {
348 dev_kfree_skb_any(skb);
349 return NETDEV_TX_OK;
350 }
351
c0c050c5 352 txq = netdev_get_tx_queue(dev, i);
a960dec9 353 txr = &bp->tx_ring[bp->tx_ring_map[i]];
c0c050c5
MC
354 prod = txr->tx_prod;
355
356 free_size = bnxt_tx_avail(bp, txr);
357 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
358 netif_tx_stop_queue(txq);
359 return NETDEV_TX_BUSY;
360 }
361
362 length = skb->len;
363 len = skb_headlen(skb);
364 last_frag = skb_shinfo(skb)->nr_frags;
365
366 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
367
368 txbd->tx_bd_opaque = prod;
369
370 tx_buf = &txr->tx_buf_ring[prod];
371 tx_buf->skb = skb;
372 tx_buf->nr_frags = last_frag;
373
374 vlan_tag_flags = 0;
ee5c7fb3 375 cfa_action = bnxt_xmit_get_cfa_action(skb);
c0c050c5
MC
376 if (skb_vlan_tag_present(skb)) {
377 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
378 skb_vlan_tag_get(skb);
379 /* Currently supports 8021Q, 8021AD vlan offloads
380 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
381 */
382 if (skb->vlan_proto == htons(ETH_P_8021Q))
383 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
384 }
385
386 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
4419dbe6
MC
387 struct tx_push_buffer *tx_push_buf = txr->tx_push;
388 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
389 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
697197e5 390 void __iomem *db = txr->tx_db.doorbell;
4419dbe6
MC
391 void *pdata = tx_push_buf->data;
392 u64 *end;
393 int j, push_len;
c0c050c5
MC
394
395 /* Set COAL_NOW to be ready quickly for the next push */
396 tx_push->tx_bd_len_flags_type =
397 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
398 TX_BD_TYPE_LONG_TX_BD |
399 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
400 TX_BD_FLAGS_COAL_NOW |
401 TX_BD_FLAGS_PACKET_END |
402 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
403
404 if (skb->ip_summed == CHECKSUM_PARTIAL)
405 tx_push1->tx_bd_hsize_lflags =
406 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
407 else
408 tx_push1->tx_bd_hsize_lflags = 0;
409
410 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
ee5c7fb3
SP
411 tx_push1->tx_bd_cfa_action =
412 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
c0c050c5 413
fbb0fa8b
MC
414 end = pdata + length;
415 end = PTR_ALIGN(end, 8) - 1;
4419dbe6
MC
416 *end = 0;
417
c0c050c5
MC
418 skb_copy_from_linear_data(skb, pdata, len);
419 pdata += len;
420 for (j = 0; j < last_frag; j++) {
421 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
422 void *fptr;
423
424 fptr = skb_frag_address_safe(frag);
425 if (!fptr)
426 goto normal_tx;
427
428 memcpy(pdata, fptr, skb_frag_size(frag));
429 pdata += skb_frag_size(frag);
430 }
431
4419dbe6
MC
432 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
433 txbd->tx_bd_haddr = txr->data_mapping;
c0c050c5
MC
434 prod = NEXT_TX(prod);
435 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
436 memcpy(txbd, tx_push1, sizeof(*txbd));
437 prod = NEXT_TX(prod);
4419dbe6 438 tx_push->doorbell =
c0c050c5
MC
439 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
440 txr->tx_prod = prod;
441
b9a8460a 442 tx_buf->is_push = 1;
c0c050c5 443 netdev_tx_sent_queue(txq, skb->len);
b9a8460a 444 wmb(); /* Sync is_push and byte queue before pushing data */
c0c050c5 445
4419dbe6
MC
446 push_len = (length + sizeof(*tx_push) + 7) / 8;
447 if (push_len > 16) {
697197e5
MC
448 __iowrite64_copy(db, tx_push_buf, 16);
449 __iowrite32_copy(db + 4, tx_push_buf + 1,
9d13744b 450 (push_len - 16) << 1);
4419dbe6 451 } else {
697197e5 452 __iowrite64_copy(db, tx_push_buf, push_len);
4419dbe6 453 }
c0c050c5 454
c0c050c5
MC
455 goto tx_done;
456 }
457
458normal_tx:
459 if (length < BNXT_MIN_PKT_SIZE) {
460 pad = BNXT_MIN_PKT_SIZE - length;
461 if (skb_pad(skb, pad)) {
462 /* SKB already freed. */
463 tx_buf->skb = NULL;
464 return NETDEV_TX_OK;
465 }
466 length = BNXT_MIN_PKT_SIZE;
467 }
468
469 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
470
471 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
472 dev_kfree_skb_any(skb);
473 tx_buf->skb = NULL;
474 return NETDEV_TX_OK;
475 }
476
477 dma_unmap_addr_set(tx_buf, mapping, mapping);
478 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
479 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
480
481 txbd->tx_bd_haddr = cpu_to_le64(mapping);
482
483 prod = NEXT_TX(prod);
484 txbd1 = (struct tx_bd_ext *)
485 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
486
487 txbd1->tx_bd_hsize_lflags = 0;
488 if (skb_is_gso(skb)) {
489 u32 hdr_len;
490
491 if (skb->encapsulation)
492 hdr_len = skb_inner_network_offset(skb) +
493 skb_inner_network_header_len(skb) +
494 inner_tcp_hdrlen(skb);
495 else
496 hdr_len = skb_transport_offset(skb) +
497 tcp_hdrlen(skb);
498
499 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
500 TX_BD_FLAGS_T_IPID |
501 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
502 length = skb_shinfo(skb)->gso_size;
503 txbd1->tx_bd_mss = cpu_to_le32(length);
504 length += hdr_len;
505 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
506 txbd1->tx_bd_hsize_lflags =
507 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
508 txbd1->tx_bd_mss = 0;
509 }
510
511 length >>= 9;
2b3c6885
MC
512 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
513 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
514 skb->len);
515 i = 0;
516 goto tx_dma_error;
517 }
c0c050c5
MC
518 flags |= bnxt_lhint_arr[length];
519 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
520
521 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
ee5c7fb3
SP
522 txbd1->tx_bd_cfa_action =
523 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
c0c050c5
MC
524 for (i = 0; i < last_frag; i++) {
525 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
526
527 prod = NEXT_TX(prod);
528 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
529
530 len = skb_frag_size(frag);
531 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
532 DMA_TO_DEVICE);
533
534 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
535 goto tx_dma_error;
536
537 tx_buf = &txr->tx_buf_ring[prod];
538 dma_unmap_addr_set(tx_buf, mapping, mapping);
539
540 txbd->tx_bd_haddr = cpu_to_le64(mapping);
541
542 flags = len << TX_BD_LEN_SHIFT;
543 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
544 }
545
546 flags &= ~TX_BD_LEN;
547 txbd->tx_bd_len_flags_type =
548 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
549 TX_BD_FLAGS_PACKET_END);
550
551 netdev_tx_sent_queue(txq, skb->len);
552
553 /* Sync BD data before updating doorbell */
554 wmb();
555
556 prod = NEXT_TX(prod);
557 txr->tx_prod = prod;
558
6b16f9ee 559 if (!netdev_xmit_more() || netif_xmit_stopped(txq))
697197e5 560 bnxt_db_write(bp, &txr->tx_db, prod);
c0c050c5
MC
561
562tx_done:
563
c0c050c5 564 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
6b16f9ee 565 if (netdev_xmit_more() && !tx_buf->is_push)
697197e5 566 bnxt_db_write(bp, &txr->tx_db, prod);
4d172f21 567
c0c050c5
MC
568 netif_tx_stop_queue(txq);
569
570 /* netif_tx_stop_queue() must be done before checking
571 * tx index in bnxt_tx_avail() below, because in
572 * bnxt_tx_int(), we update tx index before checking for
573 * netif_tx_queue_stopped().
574 */
575 smp_mb();
576 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
577 netif_tx_wake_queue(txq);
578 }
579 return NETDEV_TX_OK;
580
581tx_dma_error:
582 last_frag = i;
583
584 /* start back at beginning and unmap skb */
585 prod = txr->tx_prod;
586 tx_buf = &txr->tx_buf_ring[prod];
587 tx_buf->skb = NULL;
588 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
589 skb_headlen(skb), PCI_DMA_TODEVICE);
590 prod = NEXT_TX(prod);
591
592 /* unmap remaining mapped pages */
593 for (i = 0; i < last_frag; i++) {
594 prod = NEXT_TX(prod);
595 tx_buf = &txr->tx_buf_ring[prod];
596 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
597 skb_frag_size(&skb_shinfo(skb)->frags[i]),
598 PCI_DMA_TODEVICE);
599 }
600
601 dev_kfree_skb_any(skb);
602 return NETDEV_TX_OK;
603}
604
605static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
606{
b6ab4b01 607 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
a960dec9 608 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
c0c050c5
MC
609 u16 cons = txr->tx_cons;
610 struct pci_dev *pdev = bp->pdev;
611 int i;
612 unsigned int tx_bytes = 0;
613
614 for (i = 0; i < nr_pkts; i++) {
615 struct bnxt_sw_tx_bd *tx_buf;
616 struct sk_buff *skb;
617 int j, last;
618
619 tx_buf = &txr->tx_buf_ring[cons];
620 cons = NEXT_TX(cons);
621 skb = tx_buf->skb;
622 tx_buf->skb = NULL;
623
624 if (tx_buf->is_push) {
625 tx_buf->is_push = 0;
626 goto next_tx_int;
627 }
628
629 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
630 skb_headlen(skb), PCI_DMA_TODEVICE);
631 last = tx_buf->nr_frags;
632
633 for (j = 0; j < last; j++) {
634 cons = NEXT_TX(cons);
635 tx_buf = &txr->tx_buf_ring[cons];
636 dma_unmap_page(
637 &pdev->dev,
638 dma_unmap_addr(tx_buf, mapping),
639 skb_frag_size(&skb_shinfo(skb)->frags[j]),
640 PCI_DMA_TODEVICE);
641 }
642
643next_tx_int:
644 cons = NEXT_TX(cons);
645
646 tx_bytes += skb->len;
647 dev_kfree_skb_any(skb);
648 }
649
650 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
651 txr->tx_cons = cons;
652
653 /* Need to make the tx_cons update visible to bnxt_start_xmit()
654 * before checking for netif_tx_queue_stopped(). Without the
655 * memory barrier, there is a small possibility that bnxt_start_xmit()
656 * will miss it and cause the queue to be stopped forever.
657 */
658 smp_mb();
659
660 if (unlikely(netif_tx_queue_stopped(txq)) &&
661 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
662 __netif_tx_lock(txq, smp_processor_id());
663 if (netif_tx_queue_stopped(txq) &&
664 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
665 txr->dev_state != BNXT_DEV_STATE_CLOSING)
666 netif_tx_wake_queue(txq);
667 __netif_tx_unlock(txq);
668 }
669}
670
c61fb99c 671static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
322b87ca 672 struct bnxt_rx_ring_info *rxr,
c61fb99c
MC
673 gfp_t gfp)
674{
675 struct device *dev = &bp->pdev->dev;
676 struct page *page;
677
322b87ca 678 page = page_pool_dev_alloc_pages(rxr->page_pool);
c61fb99c
MC
679 if (!page)
680 return NULL;
681
c519fe9a
SN
682 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
683 DMA_ATTR_WEAK_ORDERING);
c61fb99c 684 if (dma_mapping_error(dev, *mapping)) {
322b87ca 685 page_pool_recycle_direct(rxr->page_pool, page);
c61fb99c
MC
686 return NULL;
687 }
688 *mapping += bp->rx_dma_offset;
689 return page;
690}
691
c0c050c5
MC
692static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
693 gfp_t gfp)
694{
695 u8 *data;
696 struct pci_dev *pdev = bp->pdev;
697
698 data = kmalloc(bp->rx_buf_size, gfp);
699 if (!data)
700 return NULL;
701
c519fe9a
SN
702 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
703 bp->rx_buf_use_size, bp->rx_dir,
704 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
705
706 if (dma_mapping_error(&pdev->dev, *mapping)) {
707 kfree(data);
708 data = NULL;
709 }
710 return data;
711}
712
38413406
MC
713int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
714 u16 prod, gfp_t gfp)
c0c050c5
MC
715{
716 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
717 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
c0c050c5
MC
718 dma_addr_t mapping;
719
c61fb99c 720 if (BNXT_RX_PAGE_MODE(bp)) {
322b87ca
AG
721 struct page *page =
722 __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
c0c050c5 723
c61fb99c
MC
724 if (!page)
725 return -ENOMEM;
726
727 rx_buf->data = page;
728 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
729 } else {
730 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
731
732 if (!data)
733 return -ENOMEM;
734
735 rx_buf->data = data;
736 rx_buf->data_ptr = data + bp->rx_offset;
737 }
11cd119d 738 rx_buf->mapping = mapping;
c0c050c5
MC
739
740 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
c0c050c5
MC
741 return 0;
742}
743
c6d30e83 744void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
c0c050c5
MC
745{
746 u16 prod = rxr->rx_prod;
747 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
748 struct rx_bd *cons_bd, *prod_bd;
749
750 prod_rx_buf = &rxr->rx_buf_ring[prod];
751 cons_rx_buf = &rxr->rx_buf_ring[cons];
752
753 prod_rx_buf->data = data;
6bb19474 754 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
c0c050c5 755
11cd119d 756 prod_rx_buf->mapping = cons_rx_buf->mapping;
c0c050c5
MC
757
758 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
759 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
760
761 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
762}
763
764static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
765{
766 u16 next, max = rxr->rx_agg_bmap_size;
767
768 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
769 if (next >= max)
770 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
771 return next;
772}
773
774static inline int bnxt_alloc_rx_page(struct bnxt *bp,
775 struct bnxt_rx_ring_info *rxr,
776 u16 prod, gfp_t gfp)
777{
778 struct rx_bd *rxbd =
779 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
780 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
781 struct pci_dev *pdev = bp->pdev;
782 struct page *page;
783 dma_addr_t mapping;
784 u16 sw_prod = rxr->rx_sw_agg_prod;
89d0a06c 785 unsigned int offset = 0;
c0c050c5 786
89d0a06c
MC
787 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
788 page = rxr->rx_page;
789 if (!page) {
790 page = alloc_page(gfp);
791 if (!page)
792 return -ENOMEM;
793 rxr->rx_page = page;
794 rxr->rx_page_offset = 0;
795 }
796 offset = rxr->rx_page_offset;
797 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
798 if (rxr->rx_page_offset == PAGE_SIZE)
799 rxr->rx_page = NULL;
800 else
801 get_page(page);
802 } else {
803 page = alloc_page(gfp);
804 if (!page)
805 return -ENOMEM;
806 }
c0c050c5 807
c519fe9a
SN
808 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
809 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
810 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
811 if (dma_mapping_error(&pdev->dev, mapping)) {
812 __free_page(page);
813 return -EIO;
814 }
815
816 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
817 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
818
819 __set_bit(sw_prod, rxr->rx_agg_bmap);
820 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
821 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
822
823 rx_agg_buf->page = page;
89d0a06c 824 rx_agg_buf->offset = offset;
c0c050c5
MC
825 rx_agg_buf->mapping = mapping;
826 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
827 rxbd->rx_bd_opaque = sw_prod;
828 return 0;
829}
830
4a228a3a
MC
831static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
832 struct bnxt_cp_ring_info *cpr,
833 u16 cp_cons, u16 curr)
834{
835 struct rx_agg_cmp *agg;
836
837 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
838 agg = (struct rx_agg_cmp *)
839 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
840 return agg;
841}
842
bfcd8d79
MC
843static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
844 struct bnxt_rx_ring_info *rxr,
845 u16 agg_id, u16 curr)
846{
847 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
848
849 return &tpa_info->agg_arr[curr];
850}
851
4a228a3a
MC
852static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
853 u16 start, u32 agg_bufs, bool tpa)
c0c050c5 854{
e44758b7 855 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5 856 struct bnxt *bp = bnapi->bp;
b6ab4b01 857 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
858 u16 prod = rxr->rx_agg_prod;
859 u16 sw_prod = rxr->rx_sw_agg_prod;
bfcd8d79 860 bool p5_tpa = false;
c0c050c5
MC
861 u32 i;
862
bfcd8d79
MC
863 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
864 p5_tpa = true;
865
c0c050c5
MC
866 for (i = 0; i < agg_bufs; i++) {
867 u16 cons;
868 struct rx_agg_cmp *agg;
869 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
870 struct rx_bd *prod_bd;
871 struct page *page;
872
bfcd8d79
MC
873 if (p5_tpa)
874 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
875 else
876 agg = bnxt_get_agg(bp, cpr, idx, start + i);
c0c050c5
MC
877 cons = agg->rx_agg_cmp_opaque;
878 __clear_bit(cons, rxr->rx_agg_bmap);
879
880 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
881 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
882
883 __set_bit(sw_prod, rxr->rx_agg_bmap);
884 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
885 cons_rx_buf = &rxr->rx_agg_ring[cons];
886
887 /* It is possible for sw_prod to be equal to cons, so
888 * set cons_rx_buf->page to NULL first.
889 */
890 page = cons_rx_buf->page;
891 cons_rx_buf->page = NULL;
892 prod_rx_buf->page = page;
89d0a06c 893 prod_rx_buf->offset = cons_rx_buf->offset;
c0c050c5
MC
894
895 prod_rx_buf->mapping = cons_rx_buf->mapping;
896
897 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
898
899 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
900 prod_bd->rx_bd_opaque = sw_prod;
901
902 prod = NEXT_RX_AGG(prod);
903 sw_prod = NEXT_RX_AGG(sw_prod);
c0c050c5
MC
904 }
905 rxr->rx_agg_prod = prod;
906 rxr->rx_sw_agg_prod = sw_prod;
907}
908
c61fb99c
MC
909static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
910 struct bnxt_rx_ring_info *rxr,
911 u16 cons, void *data, u8 *data_ptr,
912 dma_addr_t dma_addr,
913 unsigned int offset_and_len)
914{
915 unsigned int payload = offset_and_len >> 16;
916 unsigned int len = offset_and_len & 0xffff;
d7840976 917 skb_frag_t *frag;
c61fb99c
MC
918 struct page *page = data;
919 u16 prod = rxr->rx_prod;
920 struct sk_buff *skb;
921 int off, err;
922
923 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
924 if (unlikely(err)) {
925 bnxt_reuse_rx_data(rxr, cons, data);
926 return NULL;
927 }
928 dma_addr -= bp->rx_dma_offset;
c519fe9a
SN
929 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
930 DMA_ATTR_WEAK_ORDERING);
c61fb99c
MC
931
932 if (unlikely(!payload))
c43f1255 933 payload = eth_get_headlen(bp->dev, data_ptr, len);
c61fb99c
MC
934
935 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
936 if (!skb) {
937 __free_page(page);
938 return NULL;
939 }
940
941 off = (void *)data_ptr - page_address(page);
942 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
943 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
944 payload + NET_IP_ALIGN);
945
946 frag = &skb_shinfo(skb)->frags[0];
947 skb_frag_size_sub(frag, payload);
948 frag->page_offset += payload;
949 skb->data_len -= payload;
950 skb->tail += payload;
951
952 return skb;
953}
954
c0c050c5
MC
955static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
956 struct bnxt_rx_ring_info *rxr, u16 cons,
6bb19474
MC
957 void *data, u8 *data_ptr,
958 dma_addr_t dma_addr,
959 unsigned int offset_and_len)
c0c050c5 960{
6bb19474 961 u16 prod = rxr->rx_prod;
c0c050c5 962 struct sk_buff *skb;
6bb19474 963 int err;
c0c050c5
MC
964
965 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
966 if (unlikely(err)) {
967 bnxt_reuse_rx_data(rxr, cons, data);
968 return NULL;
969 }
970
971 skb = build_skb(data, 0);
c519fe9a
SN
972 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
973 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
974 if (!skb) {
975 kfree(data);
976 return NULL;
977 }
978
b3dba77c 979 skb_reserve(skb, bp->rx_offset);
6bb19474 980 skb_put(skb, offset_and_len & 0xffff);
c0c050c5
MC
981 return skb;
982}
983
e44758b7
MC
984static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
985 struct bnxt_cp_ring_info *cpr,
4a228a3a
MC
986 struct sk_buff *skb, u16 idx,
987 u32 agg_bufs, bool tpa)
c0c050c5 988{
e44758b7 989 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5 990 struct pci_dev *pdev = bp->pdev;
b6ab4b01 991 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5 992 u16 prod = rxr->rx_agg_prod;
bfcd8d79 993 bool p5_tpa = false;
c0c050c5
MC
994 u32 i;
995
bfcd8d79
MC
996 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
997 p5_tpa = true;
998
c0c050c5
MC
999 for (i = 0; i < agg_bufs; i++) {
1000 u16 cons, frag_len;
1001 struct rx_agg_cmp *agg;
1002 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1003 struct page *page;
1004 dma_addr_t mapping;
1005
bfcd8d79
MC
1006 if (p5_tpa)
1007 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1008 else
1009 agg = bnxt_get_agg(bp, cpr, idx, i);
c0c050c5
MC
1010 cons = agg->rx_agg_cmp_opaque;
1011 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1012 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1013
1014 cons_rx_buf = &rxr->rx_agg_ring[cons];
89d0a06c
MC
1015 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1016 cons_rx_buf->offset, frag_len);
c0c050c5
MC
1017 __clear_bit(cons, rxr->rx_agg_bmap);
1018
1019 /* It is possible for bnxt_alloc_rx_page() to allocate
1020 * a sw_prod index that equals the cons index, so we
1021 * need to clear the cons entry now.
1022 */
11cd119d 1023 mapping = cons_rx_buf->mapping;
c0c050c5
MC
1024 page = cons_rx_buf->page;
1025 cons_rx_buf->page = NULL;
1026
1027 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1028 struct skb_shared_info *shinfo;
1029 unsigned int nr_frags;
1030
1031 shinfo = skb_shinfo(skb);
1032 nr_frags = --shinfo->nr_frags;
1033 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1034
1035 dev_kfree_skb(skb);
1036
1037 cons_rx_buf->page = page;
1038
1039 /* Update prod since possibly some pages have been
1040 * allocated already.
1041 */
1042 rxr->rx_agg_prod = prod;
4a228a3a 1043 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
c0c050c5
MC
1044 return NULL;
1045 }
1046
c519fe9a
SN
1047 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1048 PCI_DMA_FROMDEVICE,
1049 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
1050
1051 skb->data_len += frag_len;
1052 skb->len += frag_len;
1053 skb->truesize += PAGE_SIZE;
1054
1055 prod = NEXT_RX_AGG(prod);
c0c050c5
MC
1056 }
1057 rxr->rx_agg_prod = prod;
1058 return skb;
1059}
1060
1061static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1062 u8 agg_bufs, u32 *raw_cons)
1063{
1064 u16 last;
1065 struct rx_agg_cmp *agg;
1066
1067 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1068 last = RING_CMP(*raw_cons);
1069 agg = (struct rx_agg_cmp *)
1070 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1071 return RX_AGG_CMP_VALID(agg, *raw_cons);
1072}
1073
1074static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1075 unsigned int len,
1076 dma_addr_t mapping)
1077{
1078 struct bnxt *bp = bnapi->bp;
1079 struct pci_dev *pdev = bp->pdev;
1080 struct sk_buff *skb;
1081
1082 skb = napi_alloc_skb(&bnapi->napi, len);
1083 if (!skb)
1084 return NULL;
1085
745fc05c
MC
1086 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1087 bp->rx_dir);
c0c050c5 1088
6bb19474
MC
1089 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1090 len + NET_IP_ALIGN);
c0c050c5 1091
745fc05c
MC
1092 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1093 bp->rx_dir);
c0c050c5
MC
1094
1095 skb_put(skb, len);
1096 return skb;
1097}
1098
e44758b7 1099static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
fa7e2812
MC
1100 u32 *raw_cons, void *cmp)
1101{
fa7e2812
MC
1102 struct rx_cmp *rxcmp = cmp;
1103 u32 tmp_raw_cons = *raw_cons;
1104 u8 cmp_type, agg_bufs = 0;
1105
1106 cmp_type = RX_CMP_TYPE(rxcmp);
1107
1108 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1109 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1110 RX_CMP_AGG_BUFS) >>
1111 RX_CMP_AGG_BUFS_SHIFT;
1112 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1113 struct rx_tpa_end_cmp *tpa_end = cmp;
1114
bfcd8d79
MC
1115 if (bp->flags & BNXT_FLAG_CHIP_P5)
1116 return 0;
1117
4a228a3a 1118 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
fa7e2812
MC
1119 }
1120
1121 if (agg_bufs) {
1122 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1123 return -EBUSY;
1124 }
1125 *raw_cons = tmp_raw_cons;
1126 return 0;
1127}
1128
c213eae8
MC
1129static void bnxt_queue_sp_work(struct bnxt *bp)
1130{
1131 if (BNXT_PF(bp))
1132 queue_work(bnxt_pf_wq, &bp->sp_task);
1133 else
1134 schedule_work(&bp->sp_task);
1135}
1136
1137static void bnxt_cancel_sp_work(struct bnxt *bp)
1138{
1139 if (BNXT_PF(bp))
1140 flush_workqueue(bnxt_pf_wq);
1141 else
1142 cancel_work_sync(&bp->sp_task);
1143}
1144
fa7e2812
MC
1145static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1146{
1147 if (!rxr->bnapi->in_reset) {
1148 rxr->bnapi->in_reset = true;
1149 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
c213eae8 1150 bnxt_queue_sp_work(bp);
fa7e2812
MC
1151 }
1152 rxr->rx_next_cons = 0xffff;
1153}
1154
ec4d8e7c
MC
1155static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1156{
1157 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1158 u16 idx = agg_id & MAX_TPA_P5_MASK;
1159
1160 if (test_bit(idx, map->agg_idx_bmap))
1161 idx = find_first_zero_bit(map->agg_idx_bmap,
1162 BNXT_AGG_IDX_BMAP_SIZE);
1163 __set_bit(idx, map->agg_idx_bmap);
1164 map->agg_id_tbl[agg_id] = idx;
1165 return idx;
1166}
1167
1168static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1169{
1170 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1171
1172 __clear_bit(idx, map->agg_idx_bmap);
1173}
1174
1175static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1176{
1177 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1178
1179 return map->agg_id_tbl[agg_id];
1180}
1181
c0c050c5
MC
1182static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1183 struct rx_tpa_start_cmp *tpa_start,
1184 struct rx_tpa_start_cmp_ext *tpa_start1)
1185{
c0c050c5 1186 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
bfcd8d79
MC
1187 struct bnxt_tpa_info *tpa_info;
1188 u16 cons, prod, agg_id;
c0c050c5
MC
1189 struct rx_bd *prod_bd;
1190 dma_addr_t mapping;
1191
ec4d8e7c 1192 if (bp->flags & BNXT_FLAG_CHIP_P5) {
bfcd8d79 1193 agg_id = TPA_START_AGG_ID_P5(tpa_start);
ec4d8e7c
MC
1194 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1195 } else {
bfcd8d79 1196 agg_id = TPA_START_AGG_ID(tpa_start);
ec4d8e7c 1197 }
c0c050c5
MC
1198 cons = tpa_start->rx_tpa_start_cmp_opaque;
1199 prod = rxr->rx_prod;
1200 cons_rx_buf = &rxr->rx_buf_ring[cons];
1201 prod_rx_buf = &rxr->rx_buf_ring[prod];
1202 tpa_info = &rxr->rx_tpa[agg_id];
1203
bfcd8d79
MC
1204 if (unlikely(cons != rxr->rx_next_cons ||
1205 TPA_START_ERROR(tpa_start))) {
1206 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1207 cons, rxr->rx_next_cons,
1208 TPA_START_ERROR_CODE(tpa_start1));
fa7e2812
MC
1209 bnxt_sched_reset(bp, rxr);
1210 return;
1211 }
ee5c7fb3
SP
1212 /* Store cfa_code in tpa_info to use in tpa_end
1213 * completion processing.
1214 */
1215 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
c0c050c5 1216 prod_rx_buf->data = tpa_info->data;
6bb19474 1217 prod_rx_buf->data_ptr = tpa_info->data_ptr;
c0c050c5
MC
1218
1219 mapping = tpa_info->mapping;
11cd119d 1220 prod_rx_buf->mapping = mapping;
c0c050c5
MC
1221
1222 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1223
1224 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1225
1226 tpa_info->data = cons_rx_buf->data;
6bb19474 1227 tpa_info->data_ptr = cons_rx_buf->data_ptr;
c0c050c5 1228 cons_rx_buf->data = NULL;
11cd119d 1229 tpa_info->mapping = cons_rx_buf->mapping;
c0c050c5
MC
1230
1231 tpa_info->len =
1232 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1233 RX_TPA_START_CMP_LEN_SHIFT;
1234 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1235 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1236
1237 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1238 tpa_info->gso_type = SKB_GSO_TCPV4;
1239 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
50f011b6 1240 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
c0c050c5
MC
1241 tpa_info->gso_type = SKB_GSO_TCPV6;
1242 tpa_info->rss_hash =
1243 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1244 } else {
1245 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1246 tpa_info->gso_type = 0;
1247 if (netif_msg_rx_err(bp))
1248 netdev_warn(bp->dev, "TPA packet without valid hash\n");
1249 }
1250 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1251 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
94758f8d 1252 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
bfcd8d79 1253 tpa_info->agg_count = 0;
c0c050c5
MC
1254
1255 rxr->rx_prod = NEXT_RX(prod);
1256 cons = NEXT_RX(cons);
376a5b86 1257 rxr->rx_next_cons = NEXT_RX(cons);
c0c050c5
MC
1258 cons_rx_buf = &rxr->rx_buf_ring[cons];
1259
1260 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1261 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1262 cons_rx_buf->data = NULL;
1263}
1264
4a228a3a 1265static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
c0c050c5
MC
1266{
1267 if (agg_bufs)
4a228a3a 1268 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
c0c050c5
MC
1269}
1270
bee5a188
MC
1271#ifdef CONFIG_INET
1272static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1273{
1274 struct udphdr *uh = NULL;
1275
1276 if (ip_proto == htons(ETH_P_IP)) {
1277 struct iphdr *iph = (struct iphdr *)skb->data;
1278
1279 if (iph->protocol == IPPROTO_UDP)
1280 uh = (struct udphdr *)(iph + 1);
1281 } else {
1282 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1283
1284 if (iph->nexthdr == IPPROTO_UDP)
1285 uh = (struct udphdr *)(iph + 1);
1286 }
1287 if (uh) {
1288 if (uh->check)
1289 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1290 else
1291 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1292 }
1293}
1294#endif
1295
94758f8d
MC
1296static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1297 int payload_off, int tcp_ts,
1298 struct sk_buff *skb)
1299{
1300#ifdef CONFIG_INET
1301 struct tcphdr *th;
1302 int len, nw_off;
1303 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1304 u32 hdr_info = tpa_info->hdr_info;
1305 bool loopback = false;
1306
1307 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1308 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1309 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1310
1311 /* If the packet is an internal loopback packet, the offsets will
1312 * have an extra 4 bytes.
1313 */
1314 if (inner_mac_off == 4) {
1315 loopback = true;
1316 } else if (inner_mac_off > 4) {
1317 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1318 ETH_HLEN - 2));
1319
1320 /* We only support inner iPv4/ipv6. If we don't see the
1321 * correct protocol ID, it must be a loopback packet where
1322 * the offsets are off by 4.
1323 */
09a7636a 1324 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
94758f8d
MC
1325 loopback = true;
1326 }
1327 if (loopback) {
1328 /* internal loopback packet, subtract all offsets by 4 */
1329 inner_ip_off -= 4;
1330 inner_mac_off -= 4;
1331 outer_ip_off -= 4;
1332 }
1333
1334 nw_off = inner_ip_off - ETH_HLEN;
1335 skb_set_network_header(skb, nw_off);
1336 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1337 struct ipv6hdr *iph = ipv6_hdr(skb);
1338
1339 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1340 len = skb->len - skb_transport_offset(skb);
1341 th = tcp_hdr(skb);
1342 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1343 } else {
1344 struct iphdr *iph = ip_hdr(skb);
1345
1346 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1347 len = skb->len - skb_transport_offset(skb);
1348 th = tcp_hdr(skb);
1349 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1350 }
1351
1352 if (inner_mac_off) { /* tunnel */
94758f8d
MC
1353 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1354 ETH_HLEN - 2));
1355
bee5a188 1356 bnxt_gro_tunnel(skb, proto);
94758f8d
MC
1357 }
1358#endif
1359 return skb;
1360}
1361
67912c36
MC
1362static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1363 int payload_off, int tcp_ts,
1364 struct sk_buff *skb)
1365{
1366#ifdef CONFIG_INET
1367 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1368 u32 hdr_info = tpa_info->hdr_info;
1369 int iphdr_len, nw_off;
1370
1371 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1372 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1373 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1374
1375 nw_off = inner_ip_off - ETH_HLEN;
1376 skb_set_network_header(skb, nw_off);
1377 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1378 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1379 skb_set_transport_header(skb, nw_off + iphdr_len);
1380
1381 if (inner_mac_off) { /* tunnel */
1382 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1383 ETH_HLEN - 2));
1384
1385 bnxt_gro_tunnel(skb, proto);
1386 }
1387#endif
1388 return skb;
1389}
1390
c0c050c5
MC
1391#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1392#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1393
309369c9
MC
1394static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1395 int payload_off, int tcp_ts,
c0c050c5
MC
1396 struct sk_buff *skb)
1397{
d1611c3a 1398#ifdef CONFIG_INET
c0c050c5 1399 struct tcphdr *th;
719ca811 1400 int len, nw_off, tcp_opt_len = 0;
27e24189 1401
309369c9 1402 if (tcp_ts)
c0c050c5
MC
1403 tcp_opt_len = 12;
1404
c0c050c5
MC
1405 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1406 struct iphdr *iph;
1407
1408 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1409 ETH_HLEN;
1410 skb_set_network_header(skb, nw_off);
1411 iph = ip_hdr(skb);
1412 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1413 len = skb->len - skb_transport_offset(skb);
1414 th = tcp_hdr(skb);
1415 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1416 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1417 struct ipv6hdr *iph;
1418
1419 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1420 ETH_HLEN;
1421 skb_set_network_header(skb, nw_off);
1422 iph = ipv6_hdr(skb);
1423 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1424 len = skb->len - skb_transport_offset(skb);
1425 th = tcp_hdr(skb);
1426 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1427 } else {
1428 dev_kfree_skb_any(skb);
1429 return NULL;
1430 }
c0c050c5 1431
bee5a188
MC
1432 if (nw_off) /* tunnel */
1433 bnxt_gro_tunnel(skb, skb->protocol);
c0c050c5
MC
1434#endif
1435 return skb;
1436}
1437
309369c9
MC
1438static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1439 struct bnxt_tpa_info *tpa_info,
1440 struct rx_tpa_end_cmp *tpa_end,
1441 struct rx_tpa_end_cmp_ext *tpa_end1,
1442 struct sk_buff *skb)
1443{
1444#ifdef CONFIG_INET
1445 int payload_off;
1446 u16 segs;
1447
1448 segs = TPA_END_TPA_SEGS(tpa_end);
1449 if (segs == 1)
1450 return skb;
1451
1452 NAPI_GRO_CB(skb)->count = segs;
1453 skb_shinfo(skb)->gso_size =
1454 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1455 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
bfcd8d79
MC
1456 if (bp->flags & BNXT_FLAG_CHIP_P5)
1457 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1458 else
1459 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
309369c9 1460 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
5910906c
MC
1461 if (likely(skb))
1462 tcp_gro_complete(skb);
309369c9
MC
1463#endif
1464 return skb;
1465}
1466
ee5c7fb3
SP
1467/* Given the cfa_code of a received packet determine which
1468 * netdev (vf-rep or PF) the packet is destined to.
1469 */
1470static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1471{
1472 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1473
1474 /* if vf-rep dev is NULL, the must belongs to the PF */
1475 return dev ? dev : bp->dev;
1476}
1477
c0c050c5 1478static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
e44758b7 1479 struct bnxt_cp_ring_info *cpr,
c0c050c5
MC
1480 u32 *raw_cons,
1481 struct rx_tpa_end_cmp *tpa_end,
1482 struct rx_tpa_end_cmp_ext *tpa_end1,
4e5dbbda 1483 u8 *event)
c0c050c5 1484{
e44758b7 1485 struct bnxt_napi *bnapi = cpr->bnapi;
b6ab4b01 1486 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
6bb19474 1487 u8 *data_ptr, agg_bufs;
c0c050c5
MC
1488 unsigned int len;
1489 struct bnxt_tpa_info *tpa_info;
1490 dma_addr_t mapping;
1491 struct sk_buff *skb;
bfcd8d79 1492 u16 idx = 0, agg_id;
6bb19474 1493 void *data;
bfcd8d79 1494 bool gro;
c0c050c5 1495
fa7e2812 1496 if (unlikely(bnapi->in_reset)) {
e44758b7 1497 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
fa7e2812
MC
1498
1499 if (rc < 0)
1500 return ERR_PTR(-EBUSY);
1501 return NULL;
1502 }
1503
bfcd8d79
MC
1504 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1505 agg_id = TPA_END_AGG_ID_P5(tpa_end);
ec4d8e7c 1506 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
bfcd8d79
MC
1507 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1508 tpa_info = &rxr->rx_tpa[agg_id];
1509 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1510 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1511 agg_bufs, tpa_info->agg_count);
1512 agg_bufs = tpa_info->agg_count;
1513 }
1514 tpa_info->agg_count = 0;
1515 *event |= BNXT_AGG_EVENT;
ec4d8e7c 1516 bnxt_free_agg_idx(rxr, agg_id);
bfcd8d79
MC
1517 idx = agg_id;
1518 gro = !!(bp->flags & BNXT_FLAG_GRO);
1519 } else {
1520 agg_id = TPA_END_AGG_ID(tpa_end);
1521 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1522 tpa_info = &rxr->rx_tpa[agg_id];
1523 idx = RING_CMP(*raw_cons);
1524 if (agg_bufs) {
1525 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1526 return ERR_PTR(-EBUSY);
1527
1528 *event |= BNXT_AGG_EVENT;
1529 idx = NEXT_CMP(idx);
1530 }
1531 gro = !!TPA_END_GRO(tpa_end);
1532 }
c0c050c5 1533 data = tpa_info->data;
6bb19474
MC
1534 data_ptr = tpa_info->data_ptr;
1535 prefetch(data_ptr);
c0c050c5
MC
1536 len = tpa_info->len;
1537 mapping = tpa_info->mapping;
1538
69c149e2 1539 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
4a228a3a 1540 bnxt_abort_tpa(cpr, idx, agg_bufs);
69c149e2
MC
1541 if (agg_bufs > MAX_SKB_FRAGS)
1542 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1543 agg_bufs, (int)MAX_SKB_FRAGS);
c0c050c5
MC
1544 return NULL;
1545 }
1546
1547 if (len <= bp->rx_copy_thresh) {
6bb19474 1548 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
c0c050c5 1549 if (!skb) {
4a228a3a 1550 bnxt_abort_tpa(cpr, idx, agg_bufs);
c0c050c5
MC
1551 return NULL;
1552 }
1553 } else {
1554 u8 *new_data;
1555 dma_addr_t new_mapping;
1556
1557 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1558 if (!new_data) {
4a228a3a 1559 bnxt_abort_tpa(cpr, idx, agg_bufs);
c0c050c5
MC
1560 return NULL;
1561 }
1562
1563 tpa_info->data = new_data;
b3dba77c 1564 tpa_info->data_ptr = new_data + bp->rx_offset;
c0c050c5
MC
1565 tpa_info->mapping = new_mapping;
1566
1567 skb = build_skb(data, 0);
c519fe9a
SN
1568 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1569 bp->rx_buf_use_size, bp->rx_dir,
1570 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
1571
1572 if (!skb) {
1573 kfree(data);
4a228a3a 1574 bnxt_abort_tpa(cpr, idx, agg_bufs);
c0c050c5
MC
1575 return NULL;
1576 }
b3dba77c 1577 skb_reserve(skb, bp->rx_offset);
c0c050c5
MC
1578 skb_put(skb, len);
1579 }
1580
1581 if (agg_bufs) {
4a228a3a 1582 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
c0c050c5
MC
1583 if (!skb) {
1584 /* Page reuse already handled by bnxt_rx_pages(). */
1585 return NULL;
1586 }
1587 }
ee5c7fb3
SP
1588
1589 skb->protocol =
1590 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
c0c050c5
MC
1591
1592 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1593 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1594
8852ddb4
MC
1595 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1596 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
c0c050c5
MC
1597 u16 vlan_proto = tpa_info->metadata >>
1598 RX_CMP_FLAGS2_METADATA_TPID_SFT;
ed7bc602 1599 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
c0c050c5 1600
8852ddb4 1601 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
c0c050c5
MC
1602 }
1603
1604 skb_checksum_none_assert(skb);
1605 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1606 skb->ip_summed = CHECKSUM_UNNECESSARY;
1607 skb->csum_level =
1608 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1609 }
1610
bfcd8d79 1611 if (gro)
309369c9 1612 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
c0c050c5
MC
1613
1614 return skb;
1615}
1616
8fe88ce7
MC
1617static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1618 struct rx_agg_cmp *rx_agg)
1619{
1620 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1621 struct bnxt_tpa_info *tpa_info;
1622
ec4d8e7c 1623 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
8fe88ce7
MC
1624 tpa_info = &rxr->rx_tpa[agg_id];
1625 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1626 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1627}
1628
ee5c7fb3
SP
1629static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1630 struct sk_buff *skb)
1631{
1632 if (skb->dev != bp->dev) {
1633 /* this packet belongs to a vf-rep */
1634 bnxt_vf_rep_rx(bp, skb);
1635 return;
1636 }
1637 skb_record_rx_queue(skb, bnapi->index);
1638 napi_gro_receive(&bnapi->napi, skb);
1639}
1640
c0c050c5
MC
1641/* returns the following:
1642 * 1 - 1 packet successfully received
1643 * 0 - successful TPA_START, packet not completed yet
1644 * -EBUSY - completion ring does not have all the agg buffers yet
1645 * -ENOMEM - packet aborted due to out of memory
1646 * -EIO - packet aborted due to hw error indicated in BD
1647 */
e44758b7
MC
1648static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1649 u32 *raw_cons, u8 *event)
c0c050c5 1650{
e44758b7 1651 struct bnxt_napi *bnapi = cpr->bnapi;
b6ab4b01 1652 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
1653 struct net_device *dev = bp->dev;
1654 struct rx_cmp *rxcmp;
1655 struct rx_cmp_ext *rxcmp1;
1656 u32 tmp_raw_cons = *raw_cons;
ee5c7fb3 1657 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
c0c050c5
MC
1658 struct bnxt_sw_rx_bd *rx_buf;
1659 unsigned int len;
6bb19474 1660 u8 *data_ptr, agg_bufs, cmp_type;
c0c050c5
MC
1661 dma_addr_t dma_addr;
1662 struct sk_buff *skb;
6bb19474 1663 void *data;
c0c050c5 1664 int rc = 0;
c61fb99c 1665 u32 misc;
c0c050c5
MC
1666
1667 rxcmp = (struct rx_cmp *)
1668 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1669
8fe88ce7
MC
1670 cmp_type = RX_CMP_TYPE(rxcmp);
1671
1672 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1673 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1674 goto next_rx_no_prod_no_len;
1675 }
1676
c0c050c5
MC
1677 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1678 cp_cons = RING_CMP(tmp_raw_cons);
1679 rxcmp1 = (struct rx_cmp_ext *)
1680 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1681
1682 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1683 return -EBUSY;
1684
c0c050c5
MC
1685 prod = rxr->rx_prod;
1686
1687 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1688 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1689 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1690
4e5dbbda 1691 *event |= BNXT_RX_EVENT;
e7e70fa6 1692 goto next_rx_no_prod_no_len;
c0c050c5
MC
1693
1694 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
e44758b7 1695 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
c0c050c5 1696 (struct rx_tpa_end_cmp *)rxcmp,
4e5dbbda 1697 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
c0c050c5 1698
1fac4b2f 1699 if (IS_ERR(skb))
c0c050c5
MC
1700 return -EBUSY;
1701
1702 rc = -ENOMEM;
1703 if (likely(skb)) {
ee5c7fb3 1704 bnxt_deliver_skb(bp, bnapi, skb);
c0c050c5
MC
1705 rc = 1;
1706 }
4e5dbbda 1707 *event |= BNXT_RX_EVENT;
e7e70fa6 1708 goto next_rx_no_prod_no_len;
c0c050c5
MC
1709 }
1710
1711 cons = rxcmp->rx_cmp_opaque;
fa7e2812 1712 if (unlikely(cons != rxr->rx_next_cons)) {
e44758b7 1713 int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
fa7e2812 1714
a1b0e4e6
MC
1715 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1716 cons, rxr->rx_next_cons);
fa7e2812
MC
1717 bnxt_sched_reset(bp, rxr);
1718 return rc1;
1719 }
a1b0e4e6
MC
1720 rx_buf = &rxr->rx_buf_ring[cons];
1721 data = rx_buf->data;
1722 data_ptr = rx_buf->data_ptr;
6bb19474 1723 prefetch(data_ptr);
c0c050c5 1724
c61fb99c
MC
1725 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1726 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
c0c050c5
MC
1727
1728 if (agg_bufs) {
1729 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1730 return -EBUSY;
1731
1732 cp_cons = NEXT_CMP(cp_cons);
4e5dbbda 1733 *event |= BNXT_AGG_EVENT;
c0c050c5 1734 }
4e5dbbda 1735 *event |= BNXT_RX_EVENT;
c0c050c5
MC
1736
1737 rx_buf->data = NULL;
1738 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
8e44e96c
MC
1739 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1740
c0c050c5
MC
1741 bnxt_reuse_rx_data(rxr, cons, data);
1742 if (agg_bufs)
4a228a3a
MC
1743 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1744 false);
c0c050c5
MC
1745
1746 rc = -EIO;
8e44e96c
MC
1747 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1748 netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
1749 bnxt_sched_reset(bp, rxr);
1750 }
0b397b17 1751 goto next_rx_no_len;
c0c050c5
MC
1752 }
1753
1754 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
11cd119d 1755 dma_addr = rx_buf->mapping;
c0c050c5 1756
c6d30e83
MC
1757 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1758 rc = 1;
1759 goto next_rx;
1760 }
1761
c0c050c5 1762 if (len <= bp->rx_copy_thresh) {
6bb19474 1763 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
c0c050c5
MC
1764 bnxt_reuse_rx_data(rxr, cons, data);
1765 if (!skb) {
296d5b54 1766 if (agg_bufs)
4a228a3a
MC
1767 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1768 agg_bufs, false);
c0c050c5
MC
1769 rc = -ENOMEM;
1770 goto next_rx;
1771 }
1772 } else {
c61fb99c
MC
1773 u32 payload;
1774
c6d30e83
MC
1775 if (rx_buf->data_ptr == data_ptr)
1776 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1777 else
1778 payload = 0;
6bb19474 1779 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
c61fb99c 1780 payload | len);
c0c050c5
MC
1781 if (!skb) {
1782 rc = -ENOMEM;
1783 goto next_rx;
1784 }
1785 }
1786
1787 if (agg_bufs) {
4a228a3a 1788 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
c0c050c5
MC
1789 if (!skb) {
1790 rc = -ENOMEM;
1791 goto next_rx;
1792 }
1793 }
1794
1795 if (RX_CMP_HASH_VALID(rxcmp)) {
1796 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1797 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1798
1799 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1800 if (hash_type != 1 && hash_type != 3)
1801 type = PKT_HASH_TYPE_L3;
1802 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1803 }
1804
ee5c7fb3
SP
1805 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1806 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
c0c050c5 1807
8852ddb4
MC
1808 if ((rxcmp1->rx_cmp_flags2 &
1809 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1810 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
c0c050c5 1811 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
ed7bc602 1812 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
c0c050c5
MC
1813 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1814
8852ddb4 1815 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
c0c050c5
MC
1816 }
1817
1818 skb_checksum_none_assert(skb);
1819 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1820 if (dev->features & NETIF_F_RXCSUM) {
1821 skb->ip_summed = CHECKSUM_UNNECESSARY;
1822 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1823 }
1824 } else {
665e350d
SB
1825 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1826 if (dev->features & NETIF_F_RXCSUM)
d1981929 1827 bnapi->cp_ring.rx_l4_csum_errors++;
665e350d 1828 }
c0c050c5
MC
1829 }
1830
ee5c7fb3 1831 bnxt_deliver_skb(bp, bnapi, skb);
c0c050c5
MC
1832 rc = 1;
1833
1834next_rx:
6a8788f2
AG
1835 cpr->rx_packets += 1;
1836 cpr->rx_bytes += len;
e7e70fa6 1837
0b397b17
MC
1838next_rx_no_len:
1839 rxr->rx_prod = NEXT_RX(prod);
1840 rxr->rx_next_cons = NEXT_RX(cons);
1841
e7e70fa6 1842next_rx_no_prod_no_len:
c0c050c5
MC
1843 *raw_cons = tmp_raw_cons;
1844
1845 return rc;
1846}
1847
2270bc5d
MC
1848/* In netpoll mode, if we are using a combined completion ring, we need to
1849 * discard the rx packets and recycle the buffers.
1850 */
e44758b7
MC
1851static int bnxt_force_rx_discard(struct bnxt *bp,
1852 struct bnxt_cp_ring_info *cpr,
2270bc5d
MC
1853 u32 *raw_cons, u8 *event)
1854{
2270bc5d
MC
1855 u32 tmp_raw_cons = *raw_cons;
1856 struct rx_cmp_ext *rxcmp1;
1857 struct rx_cmp *rxcmp;
1858 u16 cp_cons;
1859 u8 cmp_type;
1860
1861 cp_cons = RING_CMP(tmp_raw_cons);
1862 rxcmp = (struct rx_cmp *)
1863 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1864
1865 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1866 cp_cons = RING_CMP(tmp_raw_cons);
1867 rxcmp1 = (struct rx_cmp_ext *)
1868 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1869
1870 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1871 return -EBUSY;
1872
1873 cmp_type = RX_CMP_TYPE(rxcmp);
1874 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1875 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1876 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1877 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1878 struct rx_tpa_end_cmp_ext *tpa_end1;
1879
1880 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1881 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1882 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1883 }
e44758b7 1884 return bnxt_rx_pkt(bp, cpr, raw_cons, event);
2270bc5d
MC
1885}
1886
4bb13abf 1887#define BNXT_GET_EVENT_PORT(data) \
87c374de
MC
1888 ((data) & \
1889 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
4bb13abf 1890
c0c050c5
MC
1891static int bnxt_async_event_process(struct bnxt *bp,
1892 struct hwrm_async_event_cmpl *cmpl)
1893{
1894 u16 event_id = le16_to_cpu(cmpl->event_id);
1895
1896 /* TODO CHIMP_FW: Define event id's for link change, error etc */
1897 switch (event_id) {
87c374de 1898 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
8cbde117
MC
1899 u32 data1 = le32_to_cpu(cmpl->event_data1);
1900 struct bnxt_link_info *link_info = &bp->link_info;
1901
1902 if (BNXT_VF(bp))
1903 goto async_event_process_exit;
a8168b6c
MC
1904
1905 /* print unsupported speed warning in forced speed mode only */
1906 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
1907 (data1 & 0x20000)) {
8cbde117
MC
1908 u16 fw_speed = link_info->force_link_speed;
1909 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1910
a8168b6c
MC
1911 if (speed != SPEED_UNKNOWN)
1912 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1913 speed);
8cbde117 1914 }
286ef9d6 1915 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
8cbde117 1916 }
bc171e87 1917 /* fall through */
87c374de 1918 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
c0c050c5 1919 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
19241368 1920 break;
87c374de 1921 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
19241368 1922 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
c0c050c5 1923 break;
87c374de 1924 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
4bb13abf
MC
1925 u32 data1 = le32_to_cpu(cmpl->event_data1);
1926 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1927
1928 if (BNXT_VF(bp))
1929 break;
1930
1931 if (bp->pf.port_id != port_id)
1932 break;
1933
4bb13abf
MC
1934 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1935 break;
1936 }
87c374de 1937 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
fc0f1929
MC
1938 if (BNXT_PF(bp))
1939 goto async_event_process_exit;
1940 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1941 break;
c0c050c5 1942 default:
19241368 1943 goto async_event_process_exit;
c0c050c5 1944 }
c213eae8 1945 bnxt_queue_sp_work(bp);
19241368 1946async_event_process_exit:
a588e458 1947 bnxt_ulp_async_events(bp, cmpl);
c0c050c5
MC
1948 return 0;
1949}
1950
1951static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1952{
1953 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1954 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1955 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1956 (struct hwrm_fwd_req_cmpl *)txcmp;
1957
1958 switch (cmpl_type) {
1959 case CMPL_BASE_TYPE_HWRM_DONE:
1960 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1961 if (seq_id == bp->hwrm_intr_seq_id)
fc718bb2 1962 bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
c0c050c5
MC
1963 else
1964 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1965 break;
1966
1967 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1968 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1969
1970 if ((vf_id < bp->pf.first_vf_id) ||
1971 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1972 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1973 vf_id);
1974 return -EINVAL;
1975 }
1976
1977 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1978 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
c213eae8 1979 bnxt_queue_sp_work(bp);
c0c050c5
MC
1980 break;
1981
1982 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1983 bnxt_async_event_process(bp,
1984 (struct hwrm_async_event_cmpl *)txcmp);
1985
1986 default:
1987 break;
1988 }
1989
1990 return 0;
1991}
1992
1993static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1994{
1995 struct bnxt_napi *bnapi = dev_instance;
1996 struct bnxt *bp = bnapi->bp;
1997 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1998 u32 cons = RING_CMP(cpr->cp_raw_cons);
1999
6a8788f2 2000 cpr->event_ctr++;
c0c050c5
MC
2001 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2002 napi_schedule(&bnapi->napi);
2003 return IRQ_HANDLED;
2004}
2005
2006static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2007{
2008 u32 raw_cons = cpr->cp_raw_cons;
2009 u16 cons = RING_CMP(raw_cons);
2010 struct tx_cmp *txcmp;
2011
2012 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2013
2014 return TX_CMP_VALID(txcmp, raw_cons);
2015}
2016
c0c050c5
MC
2017static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2018{
2019 struct bnxt_napi *bnapi = dev_instance;
2020 struct bnxt *bp = bnapi->bp;
2021 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2022 u32 cons = RING_CMP(cpr->cp_raw_cons);
2023 u32 int_status;
2024
2025 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2026
2027 if (!bnxt_has_work(bp, cpr)) {
11809490 2028 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
c0c050c5
MC
2029 /* return if erroneous interrupt */
2030 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2031 return IRQ_NONE;
2032 }
2033
2034 /* disable ring IRQ */
697197e5 2035 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
c0c050c5
MC
2036
2037 /* Return here if interrupt is shared and is disabled. */
2038 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2039 return IRQ_HANDLED;
2040
2041 napi_schedule(&bnapi->napi);
2042 return IRQ_HANDLED;
2043}
2044
3675b92f
MC
2045static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2046 int budget)
c0c050c5 2047{
e44758b7 2048 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5
MC
2049 u32 raw_cons = cpr->cp_raw_cons;
2050 u32 cons;
2051 int tx_pkts = 0;
2052 int rx_pkts = 0;
4e5dbbda 2053 u8 event = 0;
c0c050c5
MC
2054 struct tx_cmp *txcmp;
2055
0fcec985 2056 cpr->has_more_work = 0;
c0c050c5
MC
2057 while (1) {
2058 int rc;
2059
2060 cons = RING_CMP(raw_cons);
2061 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2062
2063 if (!TX_CMP_VALID(txcmp, raw_cons))
2064 break;
2065
67a95e20
MC
2066 /* The valid test of the entry must be done first before
2067 * reading any further.
2068 */
b67daab0 2069 dma_rmb();
3675b92f 2070 cpr->had_work_done = 1;
c0c050c5
MC
2071 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2072 tx_pkts++;
2073 /* return full budget so NAPI will complete. */
73f21c65 2074 if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
c0c050c5 2075 rx_pkts = budget;
73f21c65 2076 raw_cons = NEXT_RAW_CMP(raw_cons);
0fcec985
MC
2077 if (budget)
2078 cpr->has_more_work = 1;
73f21c65
MC
2079 break;
2080 }
c0c050c5 2081 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2270bc5d 2082 if (likely(budget))
e44758b7 2083 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2270bc5d 2084 else
e44758b7 2085 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2270bc5d 2086 &event);
c0c050c5
MC
2087 if (likely(rc >= 0))
2088 rx_pkts += rc;
903649e7
MC
2089 /* Increment rx_pkts when rc is -ENOMEM to count towards
2090 * the NAPI budget. Otherwise, we may potentially loop
2091 * here forever if we consistently cannot allocate
2092 * buffers.
2093 */
2edbdb31 2094 else if (rc == -ENOMEM && budget)
903649e7 2095 rx_pkts++;
c0c050c5
MC
2096 else if (rc == -EBUSY) /* partial completion */
2097 break;
c0c050c5
MC
2098 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2099 CMPL_BASE_TYPE_HWRM_DONE) ||
2100 (TX_CMP_TYPE(txcmp) ==
2101 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2102 (TX_CMP_TYPE(txcmp) ==
2103 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2104 bnxt_hwrm_handler(bp, txcmp);
2105 }
2106 raw_cons = NEXT_RAW_CMP(raw_cons);
2107
0fcec985
MC
2108 if (rx_pkts && rx_pkts == budget) {
2109 cpr->has_more_work = 1;
c0c050c5 2110 break;
0fcec985 2111 }
c0c050c5
MC
2112 }
2113
f18c2b77
AG
2114 if (event & BNXT_REDIRECT_EVENT)
2115 xdp_do_flush_map();
2116
38413406
MC
2117 if (event & BNXT_TX_EVENT) {
2118 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
38413406
MC
2119 u16 prod = txr->tx_prod;
2120
2121 /* Sync BD data before updating doorbell */
2122 wmb();
2123
697197e5 2124 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
38413406
MC
2125 }
2126
c0c050c5 2127 cpr->cp_raw_cons = raw_cons;
3675b92f
MC
2128 bnapi->tx_pkts += tx_pkts;
2129 bnapi->events |= event;
2130 return rx_pkts;
2131}
c0c050c5 2132
3675b92f
MC
2133static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2134{
2135 if (bnapi->tx_pkts) {
2136 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2137 bnapi->tx_pkts = 0;
2138 }
c0c050c5 2139
3675b92f 2140 if (bnapi->events & BNXT_RX_EVENT) {
b6ab4b01 2141 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5 2142
697197e5 2143 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3675b92f 2144 if (bnapi->events & BNXT_AGG_EVENT)
697197e5 2145 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
c0c050c5 2146 }
3675b92f
MC
2147 bnapi->events = 0;
2148}
2149
2150static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2151 int budget)
2152{
2153 struct bnxt_napi *bnapi = cpr->bnapi;
2154 int rx_pkts;
2155
2156 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2157
2158 /* ACK completion ring before freeing tx ring and producing new
2159 * buffers in rx/agg rings to prevent overflowing the completion
2160 * ring.
2161 */
2162 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2163
2164 __bnxt_poll_work_done(bp, bnapi);
c0c050c5
MC
2165 return rx_pkts;
2166}
2167
10bbdaf5
PS
2168static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2169{
2170 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2171 struct bnxt *bp = bnapi->bp;
2172 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2173 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2174 struct tx_cmp *txcmp;
2175 struct rx_cmp_ext *rxcmp1;
2176 u32 cp_cons, tmp_raw_cons;
2177 u32 raw_cons = cpr->cp_raw_cons;
2178 u32 rx_pkts = 0;
4e5dbbda 2179 u8 event = 0;
10bbdaf5
PS
2180
2181 while (1) {
2182 int rc;
2183
2184 cp_cons = RING_CMP(raw_cons);
2185 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2186
2187 if (!TX_CMP_VALID(txcmp, raw_cons))
2188 break;
2189
2190 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2191 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2192 cp_cons = RING_CMP(tmp_raw_cons);
2193 rxcmp1 = (struct rx_cmp_ext *)
2194 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2195
2196 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2197 break;
2198
2199 /* force an error to recycle the buffer */
2200 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2201 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2202
e44758b7 2203 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2edbdb31 2204 if (likely(rc == -EIO) && budget)
10bbdaf5
PS
2205 rx_pkts++;
2206 else if (rc == -EBUSY) /* partial completion */
2207 break;
2208 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2209 CMPL_BASE_TYPE_HWRM_DONE)) {
2210 bnxt_hwrm_handler(bp, txcmp);
2211 } else {
2212 netdev_err(bp->dev,
2213 "Invalid completion received on special ring\n");
2214 }
2215 raw_cons = NEXT_RAW_CMP(raw_cons);
2216
2217 if (rx_pkts == budget)
2218 break;
2219 }
2220
2221 cpr->cp_raw_cons = raw_cons;
697197e5
MC
2222 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2223 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
10bbdaf5 2224
434c975a 2225 if (event & BNXT_AGG_EVENT)
697197e5 2226 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
10bbdaf5
PS
2227
2228 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
6ad20165 2229 napi_complete_done(napi, rx_pkts);
697197e5 2230 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
10bbdaf5
PS
2231 }
2232 return rx_pkts;
2233}
2234
c0c050c5
MC
2235static int bnxt_poll(struct napi_struct *napi, int budget)
2236{
2237 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2238 struct bnxt *bp = bnapi->bp;
2239 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2240 int work_done = 0;
2241
c0c050c5 2242 while (1) {
e44758b7 2243 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
c0c050c5 2244
73f21c65
MC
2245 if (work_done >= budget) {
2246 if (!budget)
697197e5 2247 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
c0c050c5 2248 break;
73f21c65 2249 }
c0c050c5
MC
2250
2251 if (!bnxt_has_work(bp, cpr)) {
e7b95691 2252 if (napi_complete_done(napi, work_done))
697197e5 2253 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
c0c050c5
MC
2254 break;
2255 }
2256 }
6a8788f2 2257 if (bp->flags & BNXT_FLAG_DIM) {
8960b389 2258 struct dim_sample dim_sample;
6a8788f2 2259
8960b389
TG
2260 dim_update_sample(cpr->event_ctr,
2261 cpr->rx_packets,
2262 cpr->rx_bytes,
2263 &dim_sample);
6a8788f2
AG
2264 net_dim(&cpr->dim, dim_sample);
2265 }
c0c050c5
MC
2266 return work_done;
2267}
2268
0fcec985
MC
2269static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2270{
2271 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2272 int i, work_done = 0;
2273
2274 for (i = 0; i < 2; i++) {
2275 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2276
2277 if (cpr2) {
2278 work_done += __bnxt_poll_work(bp, cpr2,
2279 budget - work_done);
2280 cpr->has_more_work |= cpr2->has_more_work;
2281 }
2282 }
2283 return work_done;
2284}
2285
2286static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2287 u64 dbr_type, bool all)
2288{
2289 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2290 int i;
2291
2292 for (i = 0; i < 2; i++) {
2293 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2294 struct bnxt_db_info *db;
2295
2296 if (cpr2 && (all || cpr2->had_work_done)) {
2297 db = &cpr2->cp_db;
2298 writeq(db->db_key64 | dbr_type |
2299 RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2300 cpr2->had_work_done = 0;
2301 }
2302 }
2303 __bnxt_poll_work_done(bp, bnapi);
2304}
2305
2306static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2307{
2308 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2309 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2310 u32 raw_cons = cpr->cp_raw_cons;
2311 struct bnxt *bp = bnapi->bp;
2312 struct nqe_cn *nqcmp;
2313 int work_done = 0;
2314 u32 cons;
2315
2316 if (cpr->has_more_work) {
2317 cpr->has_more_work = 0;
2318 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2319 if (cpr->has_more_work) {
2320 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false);
2321 return work_done;
2322 }
2323 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true);
2324 if (napi_complete_done(napi, work_done))
2325 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons);
2326 return work_done;
2327 }
2328 while (1) {
2329 cons = RING_CMP(raw_cons);
2330 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2331
2332 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2333 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
2334 false);
2335 cpr->cp_raw_cons = raw_cons;
2336 if (napi_complete_done(napi, work_done))
2337 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2338 cpr->cp_raw_cons);
2339 return work_done;
2340 }
2341
2342 /* The valid test of the entry must be done first before
2343 * reading any further.
2344 */
2345 dma_rmb();
2346
2347 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2348 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2349 struct bnxt_cp_ring_info *cpr2;
2350
2351 cpr2 = cpr->cp_ring_arr[idx];
2352 work_done += __bnxt_poll_work(bp, cpr2,
2353 budget - work_done);
2354 cpr->has_more_work = cpr2->has_more_work;
2355 } else {
2356 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2357 }
2358 raw_cons = NEXT_RAW_CMP(raw_cons);
2359 if (cpr->has_more_work)
2360 break;
2361 }
2362 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true);
2363 cpr->cp_raw_cons = raw_cons;
2364 return work_done;
2365}
2366
c0c050c5
MC
2367static void bnxt_free_tx_skbs(struct bnxt *bp)
2368{
2369 int i, max_idx;
2370 struct pci_dev *pdev = bp->pdev;
2371
b6ab4b01 2372 if (!bp->tx_ring)
c0c050c5
MC
2373 return;
2374
2375 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2376 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2377 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
2378 int j;
2379
c0c050c5
MC
2380 for (j = 0; j < max_idx;) {
2381 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
f18c2b77 2382 struct sk_buff *skb;
c0c050c5
MC
2383 int k, last;
2384
f18c2b77
AG
2385 if (i < bp->tx_nr_rings_xdp &&
2386 tx_buf->action == XDP_REDIRECT) {
2387 dma_unmap_single(&pdev->dev,
2388 dma_unmap_addr(tx_buf, mapping),
2389 dma_unmap_len(tx_buf, len),
2390 PCI_DMA_TODEVICE);
2391 xdp_return_frame(tx_buf->xdpf);
2392 tx_buf->action = 0;
2393 tx_buf->xdpf = NULL;
2394 j++;
2395 continue;
2396 }
2397
2398 skb = tx_buf->skb;
c0c050c5
MC
2399 if (!skb) {
2400 j++;
2401 continue;
2402 }
2403
2404 tx_buf->skb = NULL;
2405
2406 if (tx_buf->is_push) {
2407 dev_kfree_skb(skb);
2408 j += 2;
2409 continue;
2410 }
2411
2412 dma_unmap_single(&pdev->dev,
2413 dma_unmap_addr(tx_buf, mapping),
2414 skb_headlen(skb),
2415 PCI_DMA_TODEVICE);
2416
2417 last = tx_buf->nr_frags;
2418 j += 2;
d612a579
MC
2419 for (k = 0; k < last; k++, j++) {
2420 int ring_idx = j & bp->tx_ring_mask;
c0c050c5
MC
2421 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2422
d612a579 2423 tx_buf = &txr->tx_buf_ring[ring_idx];
c0c050c5
MC
2424 dma_unmap_page(
2425 &pdev->dev,
2426 dma_unmap_addr(tx_buf, mapping),
2427 skb_frag_size(frag), PCI_DMA_TODEVICE);
2428 }
2429 dev_kfree_skb(skb);
2430 }
2431 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2432 }
2433}
2434
2435static void bnxt_free_rx_skbs(struct bnxt *bp)
2436{
2437 int i, max_idx, max_agg_idx;
2438 struct pci_dev *pdev = bp->pdev;
2439
b6ab4b01 2440 if (!bp->rx_ring)
c0c050c5
MC
2441 return;
2442
2443 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2444 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2445 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 2446 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
ec4d8e7c 2447 struct bnxt_tpa_idx_map *map;
c0c050c5
MC
2448 int j;
2449
c0c050c5 2450 if (rxr->rx_tpa) {
79632e9b 2451 for (j = 0; j < bp->max_tpa; j++) {
c0c050c5
MC
2452 struct bnxt_tpa_info *tpa_info =
2453 &rxr->rx_tpa[j];
2454 u8 *data = tpa_info->data;
2455
2456 if (!data)
2457 continue;
2458
c519fe9a
SN
2459 dma_unmap_single_attrs(&pdev->dev,
2460 tpa_info->mapping,
2461 bp->rx_buf_use_size,
2462 bp->rx_dir,
2463 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
2464
2465 tpa_info->data = NULL;
2466
2467 kfree(data);
2468 }
2469 }
2470
2471 for (j = 0; j < max_idx; j++) {
2472 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
3ed3a83e 2473 dma_addr_t mapping = rx_buf->mapping;
6bb19474 2474 void *data = rx_buf->data;
c0c050c5
MC
2475
2476 if (!data)
2477 continue;
2478
c0c050c5
MC
2479 rx_buf->data = NULL;
2480
3ed3a83e
MC
2481 if (BNXT_RX_PAGE_MODE(bp)) {
2482 mapping -= bp->rx_dma_offset;
c519fe9a
SN
2483 dma_unmap_page_attrs(&pdev->dev, mapping,
2484 PAGE_SIZE, bp->rx_dir,
2485 DMA_ATTR_WEAK_ORDERING);
322b87ca 2486 page_pool_recycle_direct(rxr->page_pool, data);
3ed3a83e 2487 } else {
c519fe9a
SN
2488 dma_unmap_single_attrs(&pdev->dev, mapping,
2489 bp->rx_buf_use_size,
2490 bp->rx_dir,
2491 DMA_ATTR_WEAK_ORDERING);
c61fb99c 2492 kfree(data);
3ed3a83e 2493 }
c0c050c5
MC
2494 }
2495
2496 for (j = 0; j < max_agg_idx; j++) {
2497 struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2498 &rxr->rx_agg_ring[j];
2499 struct page *page = rx_agg_buf->page;
2500
2501 if (!page)
2502 continue;
2503
c519fe9a
SN
2504 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2505 BNXT_RX_PAGE_SIZE,
2506 PCI_DMA_FROMDEVICE,
2507 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
2508
2509 rx_agg_buf->page = NULL;
2510 __clear_bit(j, rxr->rx_agg_bmap);
2511
2512 __free_page(page);
2513 }
89d0a06c
MC
2514 if (rxr->rx_page) {
2515 __free_page(rxr->rx_page);
2516 rxr->rx_page = NULL;
2517 }
ec4d8e7c
MC
2518 map = rxr->rx_tpa_idx_map;
2519 if (map)
2520 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
c0c050c5
MC
2521 }
2522}
2523
2524static void bnxt_free_skbs(struct bnxt *bp)
2525{
2526 bnxt_free_tx_skbs(bp);
2527 bnxt_free_rx_skbs(bp);
2528}
2529
6fe19886 2530static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
c0c050c5
MC
2531{
2532 struct pci_dev *pdev = bp->pdev;
2533 int i;
2534
6fe19886
MC
2535 for (i = 0; i < rmem->nr_pages; i++) {
2536 if (!rmem->pg_arr[i])
c0c050c5
MC
2537 continue;
2538
6fe19886
MC
2539 dma_free_coherent(&pdev->dev, rmem->page_size,
2540 rmem->pg_arr[i], rmem->dma_arr[i]);
c0c050c5 2541
6fe19886 2542 rmem->pg_arr[i] = NULL;
c0c050c5 2543 }
6fe19886 2544 if (rmem->pg_tbl) {
4f49b2b8
MC
2545 size_t pg_tbl_size = rmem->nr_pages * 8;
2546
2547 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2548 pg_tbl_size = rmem->page_size;
2549 dma_free_coherent(&pdev->dev, pg_tbl_size,
6fe19886
MC
2550 rmem->pg_tbl, rmem->pg_tbl_map);
2551 rmem->pg_tbl = NULL;
c0c050c5 2552 }
6fe19886
MC
2553 if (rmem->vmem_size && *rmem->vmem) {
2554 vfree(*rmem->vmem);
2555 *rmem->vmem = NULL;
c0c050c5
MC
2556 }
2557}
2558
6fe19886 2559static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
c0c050c5 2560{
c0c050c5 2561 struct pci_dev *pdev = bp->pdev;
66cca20a 2562 u64 valid_bit = 0;
6fe19886 2563 int i;
c0c050c5 2564
66cca20a
MC
2565 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2566 valid_bit = PTU_PTE_VALID;
4f49b2b8
MC
2567 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2568 size_t pg_tbl_size = rmem->nr_pages * 8;
2569
2570 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2571 pg_tbl_size = rmem->page_size;
2572 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
6fe19886 2573 &rmem->pg_tbl_map,
c0c050c5 2574 GFP_KERNEL);
6fe19886 2575 if (!rmem->pg_tbl)
c0c050c5
MC
2576 return -ENOMEM;
2577 }
2578
6fe19886 2579 for (i = 0; i < rmem->nr_pages; i++) {
66cca20a
MC
2580 u64 extra_bits = valid_bit;
2581
6fe19886
MC
2582 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2583 rmem->page_size,
2584 &rmem->dma_arr[i],
c0c050c5 2585 GFP_KERNEL);
6fe19886 2586 if (!rmem->pg_arr[i])
c0c050c5
MC
2587 return -ENOMEM;
2588
4f49b2b8 2589 if (rmem->nr_pages > 1 || rmem->depth > 0) {
66cca20a
MC
2590 if (i == rmem->nr_pages - 2 &&
2591 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2592 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2593 else if (i == rmem->nr_pages - 1 &&
2594 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2595 extra_bits |= PTU_PTE_LAST;
2596 rmem->pg_tbl[i] =
2597 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2598 }
c0c050c5
MC
2599 }
2600
6fe19886
MC
2601 if (rmem->vmem_size) {
2602 *rmem->vmem = vzalloc(rmem->vmem_size);
2603 if (!(*rmem->vmem))
c0c050c5
MC
2604 return -ENOMEM;
2605 }
2606 return 0;
2607}
2608
4a228a3a
MC
2609static void bnxt_free_tpa_info(struct bnxt *bp)
2610{
2611 int i;
2612
2613 for (i = 0; i < bp->rx_nr_rings; i++) {
2614 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2615
ec4d8e7c
MC
2616 kfree(rxr->rx_tpa_idx_map);
2617 rxr->rx_tpa_idx_map = NULL;
79632e9b
MC
2618 if (rxr->rx_tpa) {
2619 kfree(rxr->rx_tpa[0].agg_arr);
2620 rxr->rx_tpa[0].agg_arr = NULL;
2621 }
4a228a3a
MC
2622 kfree(rxr->rx_tpa);
2623 rxr->rx_tpa = NULL;
2624 }
2625}
2626
2627static int bnxt_alloc_tpa_info(struct bnxt *bp)
2628{
79632e9b
MC
2629 int i, j, total_aggs = 0;
2630
2631 bp->max_tpa = MAX_TPA;
2632 if (bp->flags & BNXT_FLAG_CHIP_P5) {
2633 if (!bp->max_tpa_v2)
2634 return 0;
2635 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2636 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
2637 }
4a228a3a
MC
2638
2639 for (i = 0; i < bp->rx_nr_rings; i++) {
2640 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
79632e9b 2641 struct rx_agg_cmp *agg;
4a228a3a 2642
79632e9b 2643 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
4a228a3a
MC
2644 GFP_KERNEL);
2645 if (!rxr->rx_tpa)
2646 return -ENOMEM;
79632e9b
MC
2647
2648 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2649 continue;
2650 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
2651 rxr->rx_tpa[0].agg_arr = agg;
2652 if (!agg)
2653 return -ENOMEM;
2654 for (j = 1; j < bp->max_tpa; j++)
2655 rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
ec4d8e7c
MC
2656 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
2657 GFP_KERNEL);
2658 if (!rxr->rx_tpa_idx_map)
2659 return -ENOMEM;
4a228a3a
MC
2660 }
2661 return 0;
2662}
2663
c0c050c5
MC
2664static void bnxt_free_rx_rings(struct bnxt *bp)
2665{
2666 int i;
2667
b6ab4b01 2668 if (!bp->rx_ring)
c0c050c5
MC
2669 return;
2670
4a228a3a 2671 bnxt_free_tpa_info(bp);
c0c050c5 2672 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 2673 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
2674 struct bnxt_ring_struct *ring;
2675
c6d30e83
MC
2676 if (rxr->xdp_prog)
2677 bpf_prog_put(rxr->xdp_prog);
2678
96a8604f
JDB
2679 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2680 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2681
12479f62 2682 page_pool_destroy(rxr->page_pool);
322b87ca
AG
2683 rxr->page_pool = NULL;
2684
c0c050c5
MC
2685 kfree(rxr->rx_agg_bmap);
2686 rxr->rx_agg_bmap = NULL;
2687
2688 ring = &rxr->rx_ring_struct;
6fe19886 2689 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
2690
2691 ring = &rxr->rx_agg_ring_struct;
6fe19886 2692 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
2693 }
2694}
2695
322b87ca
AG
2696static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
2697 struct bnxt_rx_ring_info *rxr)
2698{
2699 struct page_pool_params pp = { 0 };
2700
2701 pp.pool_size = bp->rx_ring_size;
2702 pp.nid = dev_to_node(&bp->pdev->dev);
2703 pp.dev = &bp->pdev->dev;
2704 pp.dma_dir = DMA_BIDIRECTIONAL;
2705
2706 rxr->page_pool = page_pool_create(&pp);
2707 if (IS_ERR(rxr->page_pool)) {
2708 int err = PTR_ERR(rxr->page_pool);
2709
2710 rxr->page_pool = NULL;
2711 return err;
2712 }
2713 return 0;
2714}
2715
c0c050c5
MC
2716static int bnxt_alloc_rx_rings(struct bnxt *bp)
2717{
4a228a3a 2718 int i, rc = 0, agg_rings = 0;
c0c050c5 2719
b6ab4b01
MC
2720 if (!bp->rx_ring)
2721 return -ENOMEM;
2722
c0c050c5
MC
2723 if (bp->flags & BNXT_FLAG_AGG_RINGS)
2724 agg_rings = 1;
2725
c0c050c5 2726 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 2727 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
2728 struct bnxt_ring_struct *ring;
2729
c0c050c5
MC
2730 ring = &rxr->rx_ring_struct;
2731
322b87ca
AG
2732 rc = bnxt_alloc_rx_page_pool(bp, rxr);
2733 if (rc)
2734 return rc;
2735
96a8604f 2736 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
12479f62 2737 if (rc < 0)
96a8604f
JDB
2738 return rc;
2739
f18c2b77 2740 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
322b87ca
AG
2741 MEM_TYPE_PAGE_POOL,
2742 rxr->page_pool);
f18c2b77
AG
2743 if (rc) {
2744 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2745 return rc;
2746 }
2747
6fe19886 2748 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
2749 if (rc)
2750 return rc;
2751
2c61d211 2752 ring->grp_idx = i;
c0c050c5
MC
2753 if (agg_rings) {
2754 u16 mem_size;
2755
2756 ring = &rxr->rx_agg_ring_struct;
6fe19886 2757 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
2758 if (rc)
2759 return rc;
2760
9899bb59 2761 ring->grp_idx = i;
c0c050c5
MC
2762 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2763 mem_size = rxr->rx_agg_bmap_size / 8;
2764 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2765 if (!rxr->rx_agg_bmap)
2766 return -ENOMEM;
c0c050c5
MC
2767 }
2768 }
4a228a3a
MC
2769 if (bp->flags & BNXT_FLAG_TPA)
2770 rc = bnxt_alloc_tpa_info(bp);
2771 return rc;
c0c050c5
MC
2772}
2773
2774static void bnxt_free_tx_rings(struct bnxt *bp)
2775{
2776 int i;
2777 struct pci_dev *pdev = bp->pdev;
2778
b6ab4b01 2779 if (!bp->tx_ring)
c0c050c5
MC
2780 return;
2781
2782 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2783 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
2784 struct bnxt_ring_struct *ring;
2785
c0c050c5
MC
2786 if (txr->tx_push) {
2787 dma_free_coherent(&pdev->dev, bp->tx_push_size,
2788 txr->tx_push, txr->tx_push_mapping);
2789 txr->tx_push = NULL;
2790 }
2791
2792 ring = &txr->tx_ring_struct;
2793
6fe19886 2794 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
2795 }
2796}
2797
2798static int bnxt_alloc_tx_rings(struct bnxt *bp)
2799{
2800 int i, j, rc;
2801 struct pci_dev *pdev = bp->pdev;
2802
2803 bp->tx_push_size = 0;
2804 if (bp->tx_push_thresh) {
2805 int push_size;
2806
2807 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2808 bp->tx_push_thresh);
2809
4419dbe6 2810 if (push_size > 256) {
c0c050c5
MC
2811 push_size = 0;
2812 bp->tx_push_thresh = 0;
2813 }
2814
2815 bp->tx_push_size = push_size;
2816 }
2817
2818 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2819 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5 2820 struct bnxt_ring_struct *ring;
2e8ef77e 2821 u8 qidx;
c0c050c5 2822
c0c050c5
MC
2823 ring = &txr->tx_ring_struct;
2824
6fe19886 2825 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
2826 if (rc)
2827 return rc;
2828
9899bb59 2829 ring->grp_idx = txr->bnapi->index;
c0c050c5 2830 if (bp->tx_push_size) {
c0c050c5
MC
2831 dma_addr_t mapping;
2832
2833 /* One pre-allocated DMA buffer to backup
2834 * TX push operation
2835 */
2836 txr->tx_push = dma_alloc_coherent(&pdev->dev,
2837 bp->tx_push_size,
2838 &txr->tx_push_mapping,
2839 GFP_KERNEL);
2840
2841 if (!txr->tx_push)
2842 return -ENOMEM;
2843
c0c050c5
MC
2844 mapping = txr->tx_push_mapping +
2845 sizeof(struct tx_push_bd);
4419dbe6 2846 txr->data_mapping = cpu_to_le64(mapping);
c0c050c5 2847 }
2e8ef77e
MC
2848 qidx = bp->tc_to_qidx[j];
2849 ring->queue_id = bp->q_info[qidx].queue_id;
5f449249
MC
2850 if (i < bp->tx_nr_rings_xdp)
2851 continue;
c0c050c5
MC
2852 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2853 j++;
2854 }
2855 return 0;
2856}
2857
2858static void bnxt_free_cp_rings(struct bnxt *bp)
2859{
2860 int i;
2861
2862 if (!bp->bnapi)
2863 return;
2864
2865 for (i = 0; i < bp->cp_nr_rings; i++) {
2866 struct bnxt_napi *bnapi = bp->bnapi[i];
2867 struct bnxt_cp_ring_info *cpr;
2868 struct bnxt_ring_struct *ring;
50e3ab78 2869 int j;
c0c050c5
MC
2870
2871 if (!bnapi)
2872 continue;
2873
2874 cpr = &bnapi->cp_ring;
2875 ring = &cpr->cp_ring_struct;
2876
6fe19886 2877 bnxt_free_ring(bp, &ring->ring_mem);
50e3ab78
MC
2878
2879 for (j = 0; j < 2; j++) {
2880 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2881
2882 if (cpr2) {
2883 ring = &cpr2->cp_ring_struct;
2884 bnxt_free_ring(bp, &ring->ring_mem);
2885 kfree(cpr2);
2886 cpr->cp_ring_arr[j] = NULL;
2887 }
2888 }
c0c050c5
MC
2889 }
2890}
2891
50e3ab78
MC
2892static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
2893{
2894 struct bnxt_ring_mem_info *rmem;
2895 struct bnxt_ring_struct *ring;
2896 struct bnxt_cp_ring_info *cpr;
2897 int rc;
2898
2899 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
2900 if (!cpr)
2901 return NULL;
2902
2903 ring = &cpr->cp_ring_struct;
2904 rmem = &ring->ring_mem;
2905 rmem->nr_pages = bp->cp_nr_pages;
2906 rmem->page_size = HW_CMPD_RING_SIZE;
2907 rmem->pg_arr = (void **)cpr->cp_desc_ring;
2908 rmem->dma_arr = cpr->cp_desc_mapping;
2909 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
2910 rc = bnxt_alloc_ring(bp, rmem);
2911 if (rc) {
2912 bnxt_free_ring(bp, rmem);
2913 kfree(cpr);
2914 cpr = NULL;
2915 }
2916 return cpr;
2917}
2918
c0c050c5
MC
2919static int bnxt_alloc_cp_rings(struct bnxt *bp)
2920{
50e3ab78 2921 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
e5811b8c 2922 int i, rc, ulp_base_vec, ulp_msix;
c0c050c5 2923
e5811b8c
MC
2924 ulp_msix = bnxt_get_ulp_msix_num(bp);
2925 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
c0c050c5
MC
2926 for (i = 0; i < bp->cp_nr_rings; i++) {
2927 struct bnxt_napi *bnapi = bp->bnapi[i];
2928 struct bnxt_cp_ring_info *cpr;
2929 struct bnxt_ring_struct *ring;
2930
2931 if (!bnapi)
2932 continue;
2933
2934 cpr = &bnapi->cp_ring;
50e3ab78 2935 cpr->bnapi = bnapi;
c0c050c5
MC
2936 ring = &cpr->cp_ring_struct;
2937
6fe19886 2938 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
2939 if (rc)
2940 return rc;
e5811b8c
MC
2941
2942 if (ulp_msix && i >= ulp_base_vec)
2943 ring->map_idx = i + ulp_msix;
2944 else
2945 ring->map_idx = i;
50e3ab78
MC
2946
2947 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2948 continue;
2949
2950 if (i < bp->rx_nr_rings) {
2951 struct bnxt_cp_ring_info *cpr2 =
2952 bnxt_alloc_cp_sub_ring(bp);
2953
2954 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
2955 if (!cpr2)
2956 return -ENOMEM;
2957 cpr2->bnapi = bnapi;
2958 }
2959 if ((sh && i < bp->tx_nr_rings) ||
2960 (!sh && i >= bp->rx_nr_rings)) {
2961 struct bnxt_cp_ring_info *cpr2 =
2962 bnxt_alloc_cp_sub_ring(bp);
2963
2964 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
2965 if (!cpr2)
2966 return -ENOMEM;
2967 cpr2->bnapi = bnapi;
2968 }
c0c050c5
MC
2969 }
2970 return 0;
2971}
2972
2973static void bnxt_init_ring_struct(struct bnxt *bp)
2974{
2975 int i;
2976
2977 for (i = 0; i < bp->cp_nr_rings; i++) {
2978 struct bnxt_napi *bnapi = bp->bnapi[i];
6fe19886 2979 struct bnxt_ring_mem_info *rmem;
c0c050c5
MC
2980 struct bnxt_cp_ring_info *cpr;
2981 struct bnxt_rx_ring_info *rxr;
2982 struct bnxt_tx_ring_info *txr;
2983 struct bnxt_ring_struct *ring;
2984
2985 if (!bnapi)
2986 continue;
2987
2988 cpr = &bnapi->cp_ring;
2989 ring = &cpr->cp_ring_struct;
6fe19886
MC
2990 rmem = &ring->ring_mem;
2991 rmem->nr_pages = bp->cp_nr_pages;
2992 rmem->page_size = HW_CMPD_RING_SIZE;
2993 rmem->pg_arr = (void **)cpr->cp_desc_ring;
2994 rmem->dma_arr = cpr->cp_desc_mapping;
2995 rmem->vmem_size = 0;
c0c050c5 2996
b6ab4b01 2997 rxr = bnapi->rx_ring;
3b2b7d9d
MC
2998 if (!rxr)
2999 goto skip_rx;
3000
c0c050c5 3001 ring = &rxr->rx_ring_struct;
6fe19886
MC
3002 rmem = &ring->ring_mem;
3003 rmem->nr_pages = bp->rx_nr_pages;
3004 rmem->page_size = HW_RXBD_RING_SIZE;
3005 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3006 rmem->dma_arr = rxr->rx_desc_mapping;
3007 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3008 rmem->vmem = (void **)&rxr->rx_buf_ring;
c0c050c5
MC
3009
3010 ring = &rxr->rx_agg_ring_struct;
6fe19886
MC
3011 rmem = &ring->ring_mem;
3012 rmem->nr_pages = bp->rx_agg_nr_pages;
3013 rmem->page_size = HW_RXBD_RING_SIZE;
3014 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3015 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3016 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3017 rmem->vmem = (void **)&rxr->rx_agg_ring;
c0c050c5 3018
3b2b7d9d 3019skip_rx:
b6ab4b01 3020 txr = bnapi->tx_ring;
3b2b7d9d
MC
3021 if (!txr)
3022 continue;
3023
c0c050c5 3024 ring = &txr->tx_ring_struct;
6fe19886
MC
3025 rmem = &ring->ring_mem;
3026 rmem->nr_pages = bp->tx_nr_pages;
3027 rmem->page_size = HW_RXBD_RING_SIZE;
3028 rmem->pg_arr = (void **)txr->tx_desc_ring;
3029 rmem->dma_arr = txr->tx_desc_mapping;
3030 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3031 rmem->vmem = (void **)&txr->tx_buf_ring;
c0c050c5
MC
3032 }
3033}
3034
3035static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3036{
3037 int i;
3038 u32 prod;
3039 struct rx_bd **rx_buf_ring;
3040
6fe19886
MC
3041 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3042 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
c0c050c5
MC
3043 int j;
3044 struct rx_bd *rxbd;
3045
3046 rxbd = rx_buf_ring[i];
3047 if (!rxbd)
3048 continue;
3049
3050 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3051 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3052 rxbd->rx_bd_opaque = prod;
3053 }
3054 }
3055}
3056
3057static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3058{
3059 struct net_device *dev = bp->dev;
c0c050c5
MC
3060 struct bnxt_rx_ring_info *rxr;
3061 struct bnxt_ring_struct *ring;
3062 u32 prod, type;
3063 int i;
3064
c0c050c5
MC
3065 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3066 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3067
3068 if (NET_IP_ALIGN == 2)
3069 type |= RX_BD_FLAGS_SOP;
3070
b6ab4b01 3071 rxr = &bp->rx_ring[ring_nr];
c0c050c5
MC
3072 ring = &rxr->rx_ring_struct;
3073 bnxt_init_rxbd_pages(ring, type);
3074
c6d30e83
MC
3075 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3076 rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
3077 if (IS_ERR(rxr->xdp_prog)) {
3078 int rc = PTR_ERR(rxr->xdp_prog);
3079
3080 rxr->xdp_prog = NULL;
3081 return rc;
3082 }
3083 }
c0c050c5
MC
3084 prod = rxr->rx_prod;
3085 for (i = 0; i < bp->rx_ring_size; i++) {
3086 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
3087 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3088 ring_nr, i, bp->rx_ring_size);
3089 break;
3090 }
3091 prod = NEXT_RX(prod);
3092 }
3093 rxr->rx_prod = prod;
3094 ring->fw_ring_id = INVALID_HW_RING_ID;
3095
edd0c2cc
MC
3096 ring = &rxr->rx_agg_ring_struct;
3097 ring->fw_ring_id = INVALID_HW_RING_ID;
3098
c0c050c5
MC
3099 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3100 return 0;
3101
2839f28b 3102 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
c0c050c5
MC
3103 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3104
3105 bnxt_init_rxbd_pages(ring, type);
3106
3107 prod = rxr->rx_agg_prod;
3108 for (i = 0; i < bp->rx_agg_ring_size; i++) {
3109 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
3110 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3111 ring_nr, i, bp->rx_ring_size);
3112 break;
3113 }
3114 prod = NEXT_RX_AGG(prod);
3115 }
3116 rxr->rx_agg_prod = prod;
c0c050c5
MC
3117
3118 if (bp->flags & BNXT_FLAG_TPA) {
3119 if (rxr->rx_tpa) {
3120 u8 *data;
3121 dma_addr_t mapping;
3122
79632e9b 3123 for (i = 0; i < bp->max_tpa; i++) {
c0c050c5
MC
3124 data = __bnxt_alloc_rx_data(bp, &mapping,
3125 GFP_KERNEL);
3126 if (!data)
3127 return -ENOMEM;
3128
3129 rxr->rx_tpa[i].data = data;
b3dba77c 3130 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
c0c050c5
MC
3131 rxr->rx_tpa[i].mapping = mapping;
3132 }
3133 } else {
3134 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
3135 return -ENOMEM;
3136 }
3137 }
3138
3139 return 0;
3140}
3141
2247925f
SP
3142static void bnxt_init_cp_rings(struct bnxt *bp)
3143{
3e08b184 3144 int i, j;
2247925f
SP
3145
3146 for (i = 0; i < bp->cp_nr_rings; i++) {
3147 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3148 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3149
3150 ring->fw_ring_id = INVALID_HW_RING_ID;
6a8788f2
AG
3151 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3152 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3e08b184
MC
3153 for (j = 0; j < 2; j++) {
3154 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3155
3156 if (!cpr2)
3157 continue;
3158
3159 ring = &cpr2->cp_ring_struct;
3160 ring->fw_ring_id = INVALID_HW_RING_ID;
3161 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3162 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3163 }
2247925f
SP
3164 }
3165}
3166
c0c050c5
MC
3167static int bnxt_init_rx_rings(struct bnxt *bp)
3168{
3169 int i, rc = 0;
3170
c61fb99c 3171 if (BNXT_RX_PAGE_MODE(bp)) {
c6d30e83
MC
3172 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3173 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
c61fb99c
MC
3174 } else {
3175 bp->rx_offset = BNXT_RX_OFFSET;
3176 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3177 }
b3dba77c 3178
c0c050c5
MC
3179 for (i = 0; i < bp->rx_nr_rings; i++) {
3180 rc = bnxt_init_one_rx_ring(bp, i);
3181 if (rc)
3182 break;
3183 }
3184
3185 return rc;
3186}
3187
3188static int bnxt_init_tx_rings(struct bnxt *bp)
3189{
3190 u16 i;
3191
3192 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3193 MAX_SKB_FRAGS + 1);
3194
3195 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 3196 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
3197 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3198
3199 ring->fw_ring_id = INVALID_HW_RING_ID;
3200 }
3201
3202 return 0;
3203}
3204
3205static void bnxt_free_ring_grps(struct bnxt *bp)
3206{
3207 kfree(bp->grp_info);
3208 bp->grp_info = NULL;
3209}
3210
3211static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3212{
3213 int i;
3214
3215 if (irq_re_init) {
3216 bp->grp_info = kcalloc(bp->cp_nr_rings,
3217 sizeof(struct bnxt_ring_grp_info),
3218 GFP_KERNEL);
3219 if (!bp->grp_info)
3220 return -ENOMEM;
3221 }
3222 for (i = 0; i < bp->cp_nr_rings; i++) {
3223 if (irq_re_init)
3224 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3225 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3226 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3227 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3228 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3229 }
3230 return 0;
3231}
3232
3233static void bnxt_free_vnics(struct bnxt *bp)
3234{
3235 kfree(bp->vnic_info);
3236 bp->vnic_info = NULL;
3237 bp->nr_vnics = 0;
3238}
3239
3240static int bnxt_alloc_vnics(struct bnxt *bp)
3241{
3242 int num_vnics = 1;
3243
3244#ifdef CONFIG_RFS_ACCEL
9b3d15e6 3245 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
c0c050c5
MC
3246 num_vnics += bp->rx_nr_rings;
3247#endif
3248
dc52c6c7
PS
3249 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3250 num_vnics++;
3251
c0c050c5
MC
3252 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3253 GFP_KERNEL);
3254 if (!bp->vnic_info)
3255 return -ENOMEM;
3256
3257 bp->nr_vnics = num_vnics;
3258 return 0;
3259}
3260
3261static void bnxt_init_vnics(struct bnxt *bp)
3262{
3263 int i;
3264
3265 for (i = 0; i < bp->nr_vnics; i++) {
3266 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
44c6f72a 3267 int j;
c0c050c5
MC
3268
3269 vnic->fw_vnic_id = INVALID_HW_RING_ID;
44c6f72a
MC
3270 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3271 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3272
c0c050c5
MC
3273 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3274
3275 if (bp->vnic_info[i].rss_hash_key) {
3276 if (i == 0)
3277 prandom_bytes(vnic->rss_hash_key,
3278 HW_HASH_KEY_SIZE);
3279 else
3280 memcpy(vnic->rss_hash_key,
3281 bp->vnic_info[0].rss_hash_key,
3282 HW_HASH_KEY_SIZE);
3283 }
3284 }
3285}
3286
3287static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3288{
3289 int pages;
3290
3291 pages = ring_size / desc_per_pg;
3292
3293 if (!pages)
3294 return 1;
3295
3296 pages++;
3297
3298 while (pages & (pages - 1))
3299 pages++;
3300
3301 return pages;
3302}
3303
c6d30e83 3304void bnxt_set_tpa_flags(struct bnxt *bp)
c0c050c5
MC
3305{
3306 bp->flags &= ~BNXT_FLAG_TPA;
341138c3
MC
3307 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3308 return;
c0c050c5
MC
3309 if (bp->dev->features & NETIF_F_LRO)
3310 bp->flags |= BNXT_FLAG_LRO;
1054aee8 3311 else if (bp->dev->features & NETIF_F_GRO_HW)
c0c050c5
MC
3312 bp->flags |= BNXT_FLAG_GRO;
3313}
3314
3315/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3316 * be set on entry.
3317 */
3318void bnxt_set_ring_params(struct bnxt *bp)
3319{
3320 u32 ring_size, rx_size, rx_space;
3321 u32 agg_factor = 0, agg_ring_size = 0;
3322
3323 /* 8 for CRC and VLAN */
3324 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3325
3326 rx_space = rx_size + NET_SKB_PAD +
3327 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3328
3329 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3330 ring_size = bp->rx_ring_size;
3331 bp->rx_agg_ring_size = 0;
3332 bp->rx_agg_nr_pages = 0;
3333
3334 if (bp->flags & BNXT_FLAG_TPA)
2839f28b 3335 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
c0c050c5
MC
3336
3337 bp->flags &= ~BNXT_FLAG_JUMBO;
bdbd1eb5 3338 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
c0c050c5
MC
3339 u32 jumbo_factor;
3340
3341 bp->flags |= BNXT_FLAG_JUMBO;
3342 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3343 if (jumbo_factor > agg_factor)
3344 agg_factor = jumbo_factor;
3345 }
3346 agg_ring_size = ring_size * agg_factor;
3347
3348 if (agg_ring_size) {
3349 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3350 RX_DESC_CNT);
3351 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3352 u32 tmp = agg_ring_size;
3353
3354 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3355 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3356 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3357 tmp, agg_ring_size);
3358 }
3359 bp->rx_agg_ring_size = agg_ring_size;
3360 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3361 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3362 rx_space = rx_size + NET_SKB_PAD +
3363 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3364 }
3365
3366 bp->rx_buf_use_size = rx_size;
3367 bp->rx_buf_size = rx_space;
3368
3369 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3370 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3371
3372 ring_size = bp->tx_ring_size;
3373 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3374 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3375
3376 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
3377 bp->cp_ring_size = ring_size;
3378
3379 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3380 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3381 bp->cp_nr_pages = MAX_CP_PAGES;
3382 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3383 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3384 ring_size, bp->cp_ring_size);
3385 }
3386 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3387 bp->cp_ring_mask = bp->cp_bit - 1;
3388}
3389
96a8604f
JDB
3390/* Changing allocation mode of RX rings.
3391 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3392 */
c61fb99c 3393int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
6bb19474 3394{
c61fb99c
MC
3395 if (page_mode) {
3396 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3397 return -EOPNOTSUPP;
7eb9bb3a
MC
3398 bp->dev->max_mtu =
3399 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
c61fb99c
MC
3400 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3401 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
c61fb99c
MC
3402 bp->rx_dir = DMA_BIDIRECTIONAL;
3403 bp->rx_skb_func = bnxt_rx_page_skb;
1054aee8
MC
3404 /* Disable LRO or GRO_HW */
3405 netdev_update_features(bp->dev);
c61fb99c 3406 } else {
7eb9bb3a 3407 bp->dev->max_mtu = bp->max_mtu;
c61fb99c
MC
3408 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3409 bp->rx_dir = DMA_FROM_DEVICE;
3410 bp->rx_skb_func = bnxt_rx_skb;
3411 }
6bb19474
MC
3412 return 0;
3413}
3414
c0c050c5
MC
3415static void bnxt_free_vnic_attributes(struct bnxt *bp)
3416{
3417 int i;
3418 struct bnxt_vnic_info *vnic;
3419 struct pci_dev *pdev = bp->pdev;
3420
3421 if (!bp->vnic_info)
3422 return;
3423
3424 for (i = 0; i < bp->nr_vnics; i++) {
3425 vnic = &bp->vnic_info[i];
3426
3427 kfree(vnic->fw_grp_ids);
3428 vnic->fw_grp_ids = NULL;
3429
3430 kfree(vnic->uc_list);
3431 vnic->uc_list = NULL;
3432
3433 if (vnic->mc_list) {
3434 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3435 vnic->mc_list, vnic->mc_list_mapping);
3436 vnic->mc_list = NULL;
3437 }
3438
3439 if (vnic->rss_table) {
3440 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3441 vnic->rss_table,
3442 vnic->rss_table_dma_addr);
3443 vnic->rss_table = NULL;
3444 }
3445
3446 vnic->rss_hash_key = NULL;
3447 vnic->flags = 0;
3448 }
3449}
3450
3451static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3452{
3453 int i, rc = 0, size;
3454 struct bnxt_vnic_info *vnic;
3455 struct pci_dev *pdev = bp->pdev;
3456 int max_rings;
3457
3458 for (i = 0; i < bp->nr_vnics; i++) {
3459 vnic = &bp->vnic_info[i];
3460
3461 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3462 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3463
3464 if (mem_size > 0) {
3465 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3466 if (!vnic->uc_list) {
3467 rc = -ENOMEM;
3468 goto out;
3469 }
3470 }
3471 }
3472
3473 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3474 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3475 vnic->mc_list =
3476 dma_alloc_coherent(&pdev->dev,
3477 vnic->mc_list_size,
3478 &vnic->mc_list_mapping,
3479 GFP_KERNEL);
3480 if (!vnic->mc_list) {
3481 rc = -ENOMEM;
3482 goto out;
3483 }
3484 }
3485
44c6f72a
MC
3486 if (bp->flags & BNXT_FLAG_CHIP_P5)
3487 goto vnic_skip_grps;
3488
c0c050c5
MC
3489 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3490 max_rings = bp->rx_nr_rings;
3491 else
3492 max_rings = 1;
3493
3494 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3495 if (!vnic->fw_grp_ids) {
3496 rc = -ENOMEM;
3497 goto out;
3498 }
44c6f72a 3499vnic_skip_grps:
ae10ae74
MC
3500 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3501 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3502 continue;
3503
c0c050c5
MC
3504 /* Allocate rss table and hash key */
3505 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3506 &vnic->rss_table_dma_addr,
3507 GFP_KERNEL);
3508 if (!vnic->rss_table) {
3509 rc = -ENOMEM;
3510 goto out;
3511 }
3512
3513 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3514
3515 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3516 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3517 }
3518 return 0;
3519
3520out:
3521 return rc;
3522}
3523
3524static void bnxt_free_hwrm_resources(struct bnxt *bp)
3525{
3526 struct pci_dev *pdev = bp->pdev;
3527
a2bf74f4
VD
3528 if (bp->hwrm_cmd_resp_addr) {
3529 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3530 bp->hwrm_cmd_resp_dma_addr);
3531 bp->hwrm_cmd_resp_addr = NULL;
3532 }
760b6d33
VD
3533
3534 if (bp->hwrm_cmd_kong_resp_addr) {
3535 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3536 bp->hwrm_cmd_kong_resp_addr,
3537 bp->hwrm_cmd_kong_resp_dma_addr);
3538 bp->hwrm_cmd_kong_resp_addr = NULL;
3539 }
3540}
3541
3542static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3543{
3544 struct pci_dev *pdev = bp->pdev;
3545
3546 bp->hwrm_cmd_kong_resp_addr =
3547 dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3548 &bp->hwrm_cmd_kong_resp_dma_addr,
3549 GFP_KERNEL);
3550 if (!bp->hwrm_cmd_kong_resp_addr)
3551 return -ENOMEM;
3552
3553 return 0;
c0c050c5
MC
3554}
3555
3556static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3557{
3558 struct pci_dev *pdev = bp->pdev;
3559
3560 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3561 &bp->hwrm_cmd_resp_dma_addr,
3562 GFP_KERNEL);
3563 if (!bp->hwrm_cmd_resp_addr)
3564 return -ENOMEM;
c0c050c5
MC
3565
3566 return 0;
3567}
3568
e605db80
DK
3569static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3570{
3571 if (bp->hwrm_short_cmd_req_addr) {
3572 struct pci_dev *pdev = bp->pdev;
3573
1dfddc41 3574 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
e605db80
DK
3575 bp->hwrm_short_cmd_req_addr,
3576 bp->hwrm_short_cmd_req_dma_addr);
3577 bp->hwrm_short_cmd_req_addr = NULL;
3578 }
3579}
3580
3581static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3582{
3583 struct pci_dev *pdev = bp->pdev;
3584
3585 bp->hwrm_short_cmd_req_addr =
1dfddc41 3586 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
e605db80
DK
3587 &bp->hwrm_short_cmd_req_dma_addr,
3588 GFP_KERNEL);
3589 if (!bp->hwrm_short_cmd_req_addr)
3590 return -ENOMEM;
3591
3592 return 0;
3593}
3594
fd3ab1c7 3595static void bnxt_free_port_stats(struct bnxt *bp)
c0c050c5 3596{
c0c050c5
MC
3597 struct pci_dev *pdev = bp->pdev;
3598
00db3cba
VV
3599 bp->flags &= ~BNXT_FLAG_PORT_STATS;
3600 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
3601
3bdf56c4
MC
3602 if (bp->hw_rx_port_stats) {
3603 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
3604 bp->hw_rx_port_stats,
3605 bp->hw_rx_port_stats_map);
3606 bp->hw_rx_port_stats = NULL;
00db3cba
VV
3607 }
3608
36e53349
MC
3609 if (bp->hw_tx_port_stats_ext) {
3610 dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext),
3611 bp->hw_tx_port_stats_ext,
3612 bp->hw_tx_port_stats_ext_map);
3613 bp->hw_tx_port_stats_ext = NULL;
3614 }
3615
00db3cba
VV
3616 if (bp->hw_rx_port_stats_ext) {
3617 dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3618 bp->hw_rx_port_stats_ext,
3619 bp->hw_rx_port_stats_ext_map);
3620 bp->hw_rx_port_stats_ext = NULL;
3bdf56c4 3621 }
55e4398d
VV
3622
3623 if (bp->hw_pcie_stats) {
3624 dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
3625 bp->hw_pcie_stats, bp->hw_pcie_stats_map);
3626 bp->hw_pcie_stats = NULL;
3627 }
fd3ab1c7
MC
3628}
3629
3630static void bnxt_free_ring_stats(struct bnxt *bp)
3631{
3632 struct pci_dev *pdev = bp->pdev;
3633 int size, i;
3bdf56c4 3634
c0c050c5
MC
3635 if (!bp->bnapi)
3636 return;
3637
3638 size = sizeof(struct ctx_hw_stats);
3639
3640 for (i = 0; i < bp->cp_nr_rings; i++) {
3641 struct bnxt_napi *bnapi = bp->bnapi[i];
3642 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3643
3644 if (cpr->hw_stats) {
3645 dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
3646 cpr->hw_stats_map);
3647 cpr->hw_stats = NULL;
3648 }
3649 }
3650}
3651
3652static int bnxt_alloc_stats(struct bnxt *bp)
3653{
3654 u32 size, i;
3655 struct pci_dev *pdev = bp->pdev;
3656
3657 size = sizeof(struct ctx_hw_stats);
3658
3659 for (i = 0; i < bp->cp_nr_rings; i++) {
3660 struct bnxt_napi *bnapi = bp->bnapi[i];
3661 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3662
3663 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
3664 &cpr->hw_stats_map,
3665 GFP_KERNEL);
3666 if (!cpr->hw_stats)
3667 return -ENOMEM;
3668
3669 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3670 }
3bdf56c4 3671
a220eabc
VV
3672 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
3673 return 0;
fd3ab1c7 3674
a220eabc
VV
3675 if (bp->hw_rx_port_stats)
3676 goto alloc_ext_stats;
3bdf56c4 3677
a220eabc
VV
3678 bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
3679 sizeof(struct tx_port_stats) + 1024;
3bdf56c4 3680
a220eabc
VV
3681 bp->hw_rx_port_stats =
3682 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
3683 &bp->hw_rx_port_stats_map,
3684 GFP_KERNEL);
3685 if (!bp->hw_rx_port_stats)
3686 return -ENOMEM;
3bdf56c4 3687
a220eabc
VV
3688 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512;
3689 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
3690 sizeof(struct rx_port_stats) + 512;
3691 bp->flags |= BNXT_FLAG_PORT_STATS;
00db3cba 3692
fd3ab1c7 3693alloc_ext_stats:
a220eabc
VV
3694 /* Display extended statistics only if FW supports it */
3695 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
6154532f 3696 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
00db3cba
VV
3697 return 0;
3698
a220eabc
VV
3699 if (bp->hw_rx_port_stats_ext)
3700 goto alloc_tx_ext_stats;
fd3ab1c7 3701
a220eabc
VV
3702 bp->hw_rx_port_stats_ext =
3703 dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3704 &bp->hw_rx_port_stats_ext_map, GFP_KERNEL);
3705 if (!bp->hw_rx_port_stats_ext)
3706 return 0;
00db3cba 3707
fd3ab1c7 3708alloc_tx_ext_stats:
a220eabc 3709 if (bp->hw_tx_port_stats_ext)
55e4398d 3710 goto alloc_pcie_stats;
fd3ab1c7 3711
6154532f
VV
3712 if (bp->hwrm_spec_code >= 0x10902 ||
3713 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
a220eabc
VV
3714 bp->hw_tx_port_stats_ext =
3715 dma_alloc_coherent(&pdev->dev,
3716 sizeof(struct tx_port_stats_ext),
3717 &bp->hw_tx_port_stats_ext_map,
3718 GFP_KERNEL);
3bdf56c4 3719 }
a220eabc 3720 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
55e4398d
VV
3721
3722alloc_pcie_stats:
3723 if (bp->hw_pcie_stats ||
3724 !(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
3725 return 0;
3726
3727 bp->hw_pcie_stats =
3728 dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
3729 &bp->hw_pcie_stats_map, GFP_KERNEL);
3730 if (!bp->hw_pcie_stats)
3731 return 0;
3732
3733 bp->flags |= BNXT_FLAG_PCIE_STATS;
c0c050c5
MC
3734 return 0;
3735}
3736
3737static void bnxt_clear_ring_indices(struct bnxt *bp)
3738{
3739 int i;
3740
3741 if (!bp->bnapi)
3742 return;
3743
3744 for (i = 0; i < bp->cp_nr_rings; i++) {
3745 struct bnxt_napi *bnapi = bp->bnapi[i];
3746 struct bnxt_cp_ring_info *cpr;
3747 struct bnxt_rx_ring_info *rxr;
3748 struct bnxt_tx_ring_info *txr;
3749
3750 if (!bnapi)
3751 continue;
3752
3753 cpr = &bnapi->cp_ring;
3754 cpr->cp_raw_cons = 0;
3755
b6ab4b01 3756 txr = bnapi->tx_ring;
3b2b7d9d
MC
3757 if (txr) {
3758 txr->tx_prod = 0;
3759 txr->tx_cons = 0;
3760 }
c0c050c5 3761
b6ab4b01 3762 rxr = bnapi->rx_ring;
3b2b7d9d
MC
3763 if (rxr) {
3764 rxr->rx_prod = 0;
3765 rxr->rx_agg_prod = 0;
3766 rxr->rx_sw_agg_prod = 0;
376a5b86 3767 rxr->rx_next_cons = 0;
3b2b7d9d 3768 }
c0c050c5
MC
3769 }
3770}
3771
3772static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
3773{
3774#ifdef CONFIG_RFS_ACCEL
3775 int i;
3776
3777 /* Under rtnl_lock and all our NAPIs have been disabled. It's
3778 * safe to delete the hash table.
3779 */
3780 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
3781 struct hlist_head *head;
3782 struct hlist_node *tmp;
3783 struct bnxt_ntuple_filter *fltr;
3784
3785 head = &bp->ntp_fltr_hash_tbl[i];
3786 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
3787 hlist_del(&fltr->hash);
3788 kfree(fltr);
3789 }
3790 }
3791 if (irq_reinit) {
3792 kfree(bp->ntp_fltr_bmap);
3793 bp->ntp_fltr_bmap = NULL;
3794 }
3795 bp->ntp_fltr_count = 0;
3796#endif
3797}
3798
3799static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
3800{
3801#ifdef CONFIG_RFS_ACCEL
3802 int i, rc = 0;
3803
3804 if (!(bp->flags & BNXT_FLAG_RFS))
3805 return 0;
3806
3807 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
3808 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
3809
3810 bp->ntp_fltr_count = 0;
ac45bd93
DC
3811 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
3812 sizeof(long),
c0c050c5
MC
3813 GFP_KERNEL);
3814
3815 if (!bp->ntp_fltr_bmap)
3816 rc = -ENOMEM;
3817
3818 return rc;
3819#else
3820 return 0;
3821#endif
3822}
3823
3824static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
3825{
3826 bnxt_free_vnic_attributes(bp);
3827 bnxt_free_tx_rings(bp);
3828 bnxt_free_rx_rings(bp);
3829 bnxt_free_cp_rings(bp);
3830 bnxt_free_ntp_fltrs(bp, irq_re_init);
3831 if (irq_re_init) {
fd3ab1c7 3832 bnxt_free_ring_stats(bp);
c0c050c5
MC
3833 bnxt_free_ring_grps(bp);
3834 bnxt_free_vnics(bp);
a960dec9
MC
3835 kfree(bp->tx_ring_map);
3836 bp->tx_ring_map = NULL;
b6ab4b01
MC
3837 kfree(bp->tx_ring);
3838 bp->tx_ring = NULL;
3839 kfree(bp->rx_ring);
3840 bp->rx_ring = NULL;
c0c050c5
MC
3841 kfree(bp->bnapi);
3842 bp->bnapi = NULL;
3843 } else {
3844 bnxt_clear_ring_indices(bp);
3845 }
3846}
3847
3848static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
3849{
01657bcd 3850 int i, j, rc, size, arr_size;
c0c050c5
MC
3851 void *bnapi;
3852
3853 if (irq_re_init) {
3854 /* Allocate bnapi mem pointer array and mem block for
3855 * all queues
3856 */
3857 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
3858 bp->cp_nr_rings);
3859 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
3860 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
3861 if (!bnapi)
3862 return -ENOMEM;
3863
3864 bp->bnapi = bnapi;
3865 bnapi += arr_size;
3866 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
3867 bp->bnapi[i] = bnapi;
3868 bp->bnapi[i]->index = i;
3869 bp->bnapi[i]->bp = bp;
e38287b7
MC
3870 if (bp->flags & BNXT_FLAG_CHIP_P5) {
3871 struct bnxt_cp_ring_info *cpr =
3872 &bp->bnapi[i]->cp_ring;
3873
3874 cpr->cp_ring_struct.ring_mem.flags =
3875 BNXT_RMEM_RING_PTE_FLAG;
3876 }
c0c050c5
MC
3877 }
3878
b6ab4b01
MC
3879 bp->rx_ring = kcalloc(bp->rx_nr_rings,
3880 sizeof(struct bnxt_rx_ring_info),
3881 GFP_KERNEL);
3882 if (!bp->rx_ring)
3883 return -ENOMEM;
3884
3885 for (i = 0; i < bp->rx_nr_rings; i++) {
e38287b7
MC
3886 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3887
3888 if (bp->flags & BNXT_FLAG_CHIP_P5) {
3889 rxr->rx_ring_struct.ring_mem.flags =
3890 BNXT_RMEM_RING_PTE_FLAG;
3891 rxr->rx_agg_ring_struct.ring_mem.flags =
3892 BNXT_RMEM_RING_PTE_FLAG;
3893 }
3894 rxr->bnapi = bp->bnapi[i];
b6ab4b01
MC
3895 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
3896 }
3897
3898 bp->tx_ring = kcalloc(bp->tx_nr_rings,
3899 sizeof(struct bnxt_tx_ring_info),
3900 GFP_KERNEL);
3901 if (!bp->tx_ring)
3902 return -ENOMEM;
3903
a960dec9
MC
3904 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
3905 GFP_KERNEL);
3906
3907 if (!bp->tx_ring_map)
3908 return -ENOMEM;
3909
01657bcd
MC
3910 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
3911 j = 0;
3912 else
3913 j = bp->rx_nr_rings;
3914
3915 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
e38287b7
MC
3916 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3917
3918 if (bp->flags & BNXT_FLAG_CHIP_P5)
3919 txr->tx_ring_struct.ring_mem.flags =
3920 BNXT_RMEM_RING_PTE_FLAG;
3921 txr->bnapi = bp->bnapi[j];
3922 bp->bnapi[j]->tx_ring = txr;
5f449249 3923 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
38413406 3924 if (i >= bp->tx_nr_rings_xdp) {
e38287b7 3925 txr->txq_index = i - bp->tx_nr_rings_xdp;
38413406
MC
3926 bp->bnapi[j]->tx_int = bnxt_tx_int;
3927 } else {
fa3e93e8 3928 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
38413406
MC
3929 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
3930 }
b6ab4b01
MC
3931 }
3932
c0c050c5
MC
3933 rc = bnxt_alloc_stats(bp);
3934 if (rc)
3935 goto alloc_mem_err;
3936
3937 rc = bnxt_alloc_ntp_fltrs(bp);
3938 if (rc)
3939 goto alloc_mem_err;
3940
3941 rc = bnxt_alloc_vnics(bp);
3942 if (rc)
3943 goto alloc_mem_err;
3944 }
3945
3946 bnxt_init_ring_struct(bp);
3947
3948 rc = bnxt_alloc_rx_rings(bp);
3949 if (rc)
3950 goto alloc_mem_err;
3951
3952 rc = bnxt_alloc_tx_rings(bp);
3953 if (rc)
3954 goto alloc_mem_err;
3955
3956 rc = bnxt_alloc_cp_rings(bp);
3957 if (rc)
3958 goto alloc_mem_err;
3959
3960 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
3961 BNXT_VNIC_UCAST_FLAG;
3962 rc = bnxt_alloc_vnic_attributes(bp);
3963 if (rc)
3964 goto alloc_mem_err;
3965 return 0;
3966
3967alloc_mem_err:
3968 bnxt_free_mem(bp, true);
3969 return rc;
3970}
3971
9d8bc097
MC
3972static void bnxt_disable_int(struct bnxt *bp)
3973{
3974 int i;
3975
3976 if (!bp->bnapi)
3977 return;
3978
3979 for (i = 0; i < bp->cp_nr_rings; i++) {
3980 struct bnxt_napi *bnapi = bp->bnapi[i];
3981 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
daf1f1e7 3982 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
9d8bc097 3983
daf1f1e7 3984 if (ring->fw_ring_id != INVALID_HW_RING_ID)
697197e5 3985 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
9d8bc097
MC
3986 }
3987}
3988
e5811b8c
MC
3989static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
3990{
3991 struct bnxt_napi *bnapi = bp->bnapi[n];
3992 struct bnxt_cp_ring_info *cpr;
3993
3994 cpr = &bnapi->cp_ring;
3995 return cpr->cp_ring_struct.map_idx;
3996}
3997
9d8bc097
MC
3998static void bnxt_disable_int_sync(struct bnxt *bp)
3999{
4000 int i;
4001
4002 atomic_inc(&bp->intr_sem);
4003
4004 bnxt_disable_int(bp);
e5811b8c
MC
4005 for (i = 0; i < bp->cp_nr_rings; i++) {
4006 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4007
4008 synchronize_irq(bp->irq_tbl[map_idx].vector);
4009 }
9d8bc097
MC
4010}
4011
4012static void bnxt_enable_int(struct bnxt *bp)
4013{
4014 int i;
4015
4016 atomic_set(&bp->intr_sem, 0);
4017 for (i = 0; i < bp->cp_nr_rings; i++) {
4018 struct bnxt_napi *bnapi = bp->bnapi[i];
4019 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4020
697197e5 4021 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
9d8bc097
MC
4022 }
4023}
4024
c0c050c5
MC
4025void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
4026 u16 cmpl_ring, u16 target_id)
4027{
a8643e16 4028 struct input *req = request;
c0c050c5 4029
a8643e16
MC
4030 req->req_type = cpu_to_le16(req_type);
4031 req->cmpl_ring = cpu_to_le16(cmpl_ring);
4032 req->target_id = cpu_to_le16(target_id);
760b6d33
VD
4033 if (bnxt_kong_hwrm_message(bp, req))
4034 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
4035 else
4036 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
c0c050c5
MC
4037}
4038
fbfbc485
MC
4039static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
4040 int timeout, bool silent)
c0c050c5 4041{
a11fa2be 4042 int i, intr_process, rc, tmo_count;
a8643e16 4043 struct input *req = msg;
c0c050c5 4044 u32 *data = msg;
845adfe4
MC
4045 __le32 *resp_len;
4046 u8 *valid;
c0c050c5
MC
4047 u16 cp_ring_id, len = 0;
4048 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
e605db80 4049 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
ebd5818c 4050 struct hwrm_short_input short_input = {0};
2e9ee398 4051 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
89455017 4052 u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr;
2e9ee398 4053 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
760b6d33 4054 u16 dst = BNXT_HWRM_CHNL_CHIMP;
c0c050c5 4055
1dfddc41
MC
4056 if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4057 if (msg_len > bp->hwrm_max_ext_req_len ||
4058 !bp->hwrm_short_cmd_req_addr)
4059 return -EINVAL;
4060 }
4061
760b6d33
VD
4062 if (bnxt_hwrm_kong_chnl(bp, req)) {
4063 dst = BNXT_HWRM_CHNL_KONG;
4064 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
4065 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
4066 resp = bp->hwrm_cmd_kong_resp_addr;
4067 resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr;
4068 }
4069
4070 memset(resp, 0, PAGE_SIZE);
4071 cp_ring_id = le16_to_cpu(req->cmpl_ring);
4072 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
4073
4074 req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
4075 /* currently supports only one outstanding message */
4076 if (intr_process)
4077 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
4078
1dfddc41
MC
4079 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
4080 msg_len > BNXT_HWRM_MAX_REQ_LEN) {
e605db80 4081 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
1dfddc41
MC
4082 u16 max_msg_len;
4083
4084 /* Set boundary for maximum extended request length for short
4085 * cmd format. If passed up from device use the max supported
4086 * internal req length.
4087 */
4088 max_msg_len = bp->hwrm_max_ext_req_len;
e605db80
DK
4089
4090 memcpy(short_cmd_req, req, msg_len);
1dfddc41
MC
4091 if (msg_len < max_msg_len)
4092 memset(short_cmd_req + msg_len, 0,
4093 max_msg_len - msg_len);
e605db80
DK
4094
4095 short_input.req_type = req->req_type;
4096 short_input.signature =
4097 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
4098 short_input.size = cpu_to_le16(msg_len);
4099 short_input.req_addr =
4100 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
4101
4102 data = (u32 *)&short_input;
4103 msg_len = sizeof(short_input);
4104
4105 /* Sync memory write before updating doorbell */
4106 wmb();
4107
4108 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
4109 }
4110
c0c050c5 4111 /* Write request msg to hwrm channel */
2e9ee398 4112 __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
c0c050c5 4113
e605db80 4114 for (i = msg_len; i < max_req_len; i += 4)
2e9ee398 4115 writel(0, bp->bar0 + bar_offset + i);
d79979a1 4116
c0c050c5 4117 /* Ring channel doorbell */
2e9ee398 4118 writel(1, bp->bar0 + doorbell_offset);
c0c050c5 4119
ff4fe81d
MC
4120 if (!timeout)
4121 timeout = DFLT_HWRM_CMD_TIMEOUT;
9751e8e7
AG
4122 /* convert timeout to usec */
4123 timeout *= 1000;
ff4fe81d 4124
c0c050c5 4125 i = 0;
9751e8e7
AG
4126 /* Short timeout for the first few iterations:
4127 * number of loops = number of loops for short timeout +
4128 * number of loops for standard timeout.
4129 */
4130 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
4131 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
4132 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
89455017
VD
4133 resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET);
4134
c0c050c5 4135 if (intr_process) {
fc718bb2
VD
4136 u16 seq_id = bp->hwrm_intr_seq_id;
4137
c0c050c5 4138 /* Wait until hwrm response cmpl interrupt is processed */
fc718bb2 4139 while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
a11fa2be 4140 i++ < tmo_count) {
9751e8e7
AG
4141 /* on first few passes, just barely sleep */
4142 if (i < HWRM_SHORT_TIMEOUT_COUNTER)
4143 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4144 HWRM_SHORT_MAX_TIMEOUT);
4145 else
4146 usleep_range(HWRM_MIN_TIMEOUT,
4147 HWRM_MAX_TIMEOUT);
c0c050c5
MC
4148 }
4149
fc718bb2 4150 if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
c0c050c5 4151 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
a8643e16 4152 le16_to_cpu(req->req_type));
c0c050c5
MC
4153 return -1;
4154 }
845adfe4
MC
4155 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
4156 HWRM_RESP_LEN_SFT;
89455017 4157 valid = resp_addr + len - 1;
c0c050c5 4158 } else {
cc559c1a
MC
4159 int j;
4160
c0c050c5 4161 /* Check if response len is updated */
a11fa2be 4162 for (i = 0; i < tmo_count; i++) {
c0c050c5
MC
4163 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
4164 HWRM_RESP_LEN_SFT;
4165 if (len)
4166 break;
9751e8e7 4167 /* on first few passes, just barely sleep */
67681d02 4168 if (i < HWRM_SHORT_TIMEOUT_COUNTER)
9751e8e7
AG
4169 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4170 HWRM_SHORT_MAX_TIMEOUT);
4171 else
4172 usleep_range(HWRM_MIN_TIMEOUT,
4173 HWRM_MAX_TIMEOUT);
c0c050c5
MC
4174 }
4175
a11fa2be 4176 if (i >= tmo_count) {
c0c050c5 4177 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
cc559c1a
MC
4178 HWRM_TOTAL_TIMEOUT(i),
4179 le16_to_cpu(req->req_type),
8578d6c1 4180 le16_to_cpu(req->seq_id), len);
c0c050c5
MC
4181 return -1;
4182 }
4183
845adfe4 4184 /* Last byte of resp contains valid bit */
89455017 4185 valid = resp_addr + len - 1;
cc559c1a 4186 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
845adfe4
MC
4187 /* make sure we read from updated DMA memory */
4188 dma_rmb();
4189 if (*valid)
c0c050c5 4190 break;
0000b81a 4191 usleep_range(1, 5);
c0c050c5
MC
4192 }
4193
cc559c1a 4194 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
c0c050c5 4195 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
cc559c1a
MC
4196 HWRM_TOTAL_TIMEOUT(i),
4197 le16_to_cpu(req->req_type),
a8643e16 4198 le16_to_cpu(req->seq_id), len, *valid);
c0c050c5
MC
4199 return -1;
4200 }
4201 }
4202
845adfe4
MC
4203 /* Zero valid bit for compatibility. Valid bit in an older spec
4204 * may become a new field in a newer spec. We must make sure that
4205 * a new field not implemented by old spec will read zero.
4206 */
4207 *valid = 0;
c0c050c5 4208 rc = le16_to_cpu(resp->error_code);
fbfbc485 4209 if (rc && !silent)
c0c050c5
MC
4210 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
4211 le16_to_cpu(resp->req_type),
4212 le16_to_cpu(resp->seq_id), rc);
fbfbc485
MC
4213 return rc;
4214}
4215
4216int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4217{
4218 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
c0c050c5
MC
4219}
4220
cc72f3b1
MC
4221int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4222 int timeout)
4223{
4224 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4225}
4226
c0c050c5
MC
4227int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4228{
4229 int rc;
4230
4231 mutex_lock(&bp->hwrm_cmd_lock);
4232 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
4233 mutex_unlock(&bp->hwrm_cmd_lock);
4234 return rc;
4235}
4236
90e20921
MC
4237int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4238 int timeout)
4239{
4240 int rc;
4241
4242 mutex_lock(&bp->hwrm_cmd_lock);
4243 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4244 mutex_unlock(&bp->hwrm_cmd_lock);
4245 return rc;
4246}
4247
a1653b13
MC
4248int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
4249 int bmap_size)
c0c050c5
MC
4250{
4251 struct hwrm_func_drv_rgtr_input req = {0};
25be8623
MC
4252 DECLARE_BITMAP(async_events_bmap, 256);
4253 u32 *events = (u32 *)async_events_bmap;
a1653b13 4254 int i;
c0c050c5
MC
4255
4256 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4257
4258 req.enables =
a1653b13 4259 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
c0c050c5 4260
25be8623
MC
4261 memset(async_events_bmap, 0, sizeof(async_events_bmap));
4262 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
4263 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4264
a1653b13
MC
4265 if (bmap && bmap_size) {
4266 for (i = 0; i < bmap_size; i++) {
4267 if (test_bit(i, bmap))
4268 __set_bit(i, async_events_bmap);
4269 }
4270 }
4271
25be8623
MC
4272 for (i = 0; i < 8; i++)
4273 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4274
a1653b13
MC
4275 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4276}
4277
4278static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
4279{
25e1acd6 4280 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
a1653b13 4281 struct hwrm_func_drv_rgtr_input req = {0};
25e1acd6 4282 int rc;
a1653b13
MC
4283
4284 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4285
4286 req.enables =
4287 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4288 FUNC_DRV_RGTR_REQ_ENABLES_VER);
4289
11f15ed3 4290 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
d4f52de0
MC
4291 req.flags = cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE);
4292 req.ver_maj_8b = DRV_VER_MAJ;
4293 req.ver_min_8b = DRV_VER_MIN;
4294 req.ver_upd_8b = DRV_VER_UPD;
4295 req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4296 req.ver_min = cpu_to_le16(DRV_VER_MIN);
4297 req.ver_upd = cpu_to_le16(DRV_VER_UPD);
c0c050c5
MC
4298
4299 if (BNXT_PF(bp)) {
9b0436c3 4300 u32 data[8];
a1653b13 4301 int i;
c0c050c5 4302
9b0436c3
MC
4303 memset(data, 0, sizeof(data));
4304 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4305 u16 cmd = bnxt_vf_req_snif[i];
4306 unsigned int bit, idx;
4307
4308 idx = cmd / 32;
4309 bit = cmd % 32;
4310 data[idx] |= 1 << bit;
4311 }
c0c050c5 4312
de68f5de
MC
4313 for (i = 0; i < 8; i++)
4314 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4315
c0c050c5
MC
4316 req.enables |=
4317 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4318 }
4319
abd43a13
VD
4320 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4321 req.flags |= cpu_to_le32(
4322 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4323
25e1acd6
MC
4324 mutex_lock(&bp->hwrm_cmd_lock);
4325 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4326 if (rc)
4327 rc = -EIO;
4328 else if (resp->flags &
4329 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4330 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4331 mutex_unlock(&bp->hwrm_cmd_lock);
4332 return rc;
c0c050c5
MC
4333}
4334
be58a0da
JH
4335static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4336{
4337 struct hwrm_func_drv_unrgtr_input req = {0};
4338
4339 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4340 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4341}
4342
c0c050c5
MC
4343static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4344{
4345 u32 rc = 0;
4346 struct hwrm_tunnel_dst_port_free_input req = {0};
4347
4348 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4349 req.tunnel_type = tunnel_type;
4350
4351 switch (tunnel_type) {
4352 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4353 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
4354 break;
4355 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4356 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
4357 break;
4358 default:
4359 break;
4360 }
4361
4362 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4363 if (rc)
4364 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4365 rc);
4366 return rc;
4367}
4368
4369static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4370 u8 tunnel_type)
4371{
4372 u32 rc = 0;
4373 struct hwrm_tunnel_dst_port_alloc_input req = {0};
4374 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4375
4376 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4377
4378 req.tunnel_type = tunnel_type;
4379 req.tunnel_dst_port_val = port;
4380
4381 mutex_lock(&bp->hwrm_cmd_lock);
4382 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4383 if (rc) {
4384 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4385 rc);
4386 goto err_out;
4387 }
4388
57aac71b
CJ
4389 switch (tunnel_type) {
4390 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
c0c050c5 4391 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
57aac71b
CJ
4392 break;
4393 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
c0c050c5 4394 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
57aac71b
CJ
4395 break;
4396 default:
4397 break;
4398 }
4399
c0c050c5
MC
4400err_out:
4401 mutex_unlock(&bp->hwrm_cmd_lock);
4402 return rc;
4403}
4404
4405static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4406{
4407 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4408 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4409
4410 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
c193554e 4411 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
c0c050c5
MC
4412
4413 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4414 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4415 req.mask = cpu_to_le32(vnic->rx_mask);
4416 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4417}
4418
4419#ifdef CONFIG_RFS_ACCEL
4420static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4421 struct bnxt_ntuple_filter *fltr)
4422{
4423 struct hwrm_cfa_ntuple_filter_free_input req = {0};
4424
4425 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4426 req.ntuple_filter_id = fltr->filter_id;
4427 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4428}
4429
4430#define BNXT_NTP_FLTR_FLAGS \
4431 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4432 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
4433 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
4434 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
4435 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
4436 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
4437 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
4438 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
4439 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
4440 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
4441 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
4442 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
4443 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
c193554e 4444 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
c0c050c5 4445
61aad724
MC
4446#define BNXT_NTP_TUNNEL_FLTR_FLAG \
4447 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4448
c0c050c5
MC
4449static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4450 struct bnxt_ntuple_filter *fltr)
4451{
c0c050c5 4452 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
5c209fc8 4453 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
c0c050c5 4454 struct flow_keys *keys = &fltr->fkeys;
ac33906c
MC
4455 struct bnxt_vnic_info *vnic;
4456 u32 dst_ena = 0;
5c209fc8 4457 int rc = 0;
c0c050c5
MC
4458
4459 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
a54c4d74 4460 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
c0c050c5 4461
ac33906c
MC
4462 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX) {
4463 dst_ena = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
4464 req.rfs_ring_tbl_idx = cpu_to_le16(fltr->rxq);
4465 vnic = &bp->vnic_info[0];
4466 } else {
4467 vnic = &bp->vnic_info[fltr->rxq + 1];
4468 }
4469 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
4470 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS | dst_ena);
c0c050c5
MC
4471
4472 req.ethertype = htons(ETH_P_IP);
4473 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
c193554e 4474 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
c0c050c5
MC
4475 req.ip_protocol = keys->basic.ip_proto;
4476
dda0e746
MC
4477 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4478 int i;
4479
4480 req.ethertype = htons(ETH_P_IPV6);
4481 req.ip_addr_type =
4482 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4483 *(struct in6_addr *)&req.src_ipaddr[0] =
4484 keys->addrs.v6addrs.src;
4485 *(struct in6_addr *)&req.dst_ipaddr[0] =
4486 keys->addrs.v6addrs.dst;
4487 for (i = 0; i < 4; i++) {
4488 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4489 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4490 }
4491 } else {
4492 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4493 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4494 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4495 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4496 }
61aad724
MC
4497 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4498 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4499 req.tunnel_type =
4500 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4501 }
c0c050c5
MC
4502
4503 req.src_port = keys->ports.src;
4504 req.src_port_mask = cpu_to_be16(0xffff);
4505 req.dst_port = keys->ports.dst;
4506 req.dst_port_mask = cpu_to_be16(0xffff);
4507
c0c050c5
MC
4508 mutex_lock(&bp->hwrm_cmd_lock);
4509 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5c209fc8
VD
4510 if (!rc) {
4511 resp = bnxt_get_hwrm_resp_addr(bp, &req);
c0c050c5 4512 fltr->filter_id = resp->ntuple_filter_id;
5c209fc8 4513 }
c0c050c5
MC
4514 mutex_unlock(&bp->hwrm_cmd_lock);
4515 return rc;
4516}
4517#endif
4518
4519static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4520 u8 *mac_addr)
4521{
4522 u32 rc = 0;
4523 struct hwrm_cfa_l2_filter_alloc_input req = {0};
4524 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4525
4526 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
dc52c6c7
PS
4527 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4528 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4529 req.flags |=
4530 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
c193554e 4531 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
c0c050c5
MC
4532 req.enables =
4533 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
c193554e 4534 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
c0c050c5
MC
4535 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4536 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
4537 req.l2_addr_mask[0] = 0xff;
4538 req.l2_addr_mask[1] = 0xff;
4539 req.l2_addr_mask[2] = 0xff;
4540 req.l2_addr_mask[3] = 0xff;
4541 req.l2_addr_mask[4] = 0xff;
4542 req.l2_addr_mask[5] = 0xff;
4543
4544 mutex_lock(&bp->hwrm_cmd_lock);
4545 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4546 if (!rc)
4547 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4548 resp->l2_filter_id;
4549 mutex_unlock(&bp->hwrm_cmd_lock);
4550 return rc;
4551}
4552
4553static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4554{
4555 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4556 int rc = 0;
4557
4558 /* Any associated ntuple filters will also be cleared by firmware. */
4559 mutex_lock(&bp->hwrm_cmd_lock);
4560 for (i = 0; i < num_of_vnics; i++) {
4561 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4562
4563 for (j = 0; j < vnic->uc_filter_count; j++) {
4564 struct hwrm_cfa_l2_filter_free_input req = {0};
4565
4566 bnxt_hwrm_cmd_hdr_init(bp, &req,
4567 HWRM_CFA_L2_FILTER_FREE, -1, -1);
4568
4569 req.l2_filter_id = vnic->fw_l2_filter_id[j];
4570
4571 rc = _hwrm_send_message(bp, &req, sizeof(req),
4572 HWRM_CMD_TIMEOUT);
4573 }
4574 vnic->uc_filter_count = 0;
4575 }
4576 mutex_unlock(&bp->hwrm_cmd_lock);
4577
4578 return rc;
4579}
4580
4581static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4582{
4583 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
79632e9b 4584 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
c0c050c5
MC
4585 struct hwrm_vnic_tpa_cfg_input req = {0};
4586
3c4fe80b
MC
4587 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4588 return 0;
4589
c0c050c5
MC
4590 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
4591
4592 if (tpa_flags) {
4593 u16 mss = bp->dev->mtu - 40;
4594 u32 nsegs, n, segs = 0, flags;
4595
4596 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4597 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4598 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4599 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4600 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4601 if (tpa_flags & BNXT_FLAG_GRO)
4602 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4603
4604 req.flags = cpu_to_le32(flags);
4605
4606 req.enables =
4607 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
c193554e
MC
4608 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4609 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
c0c050c5
MC
4610
4611 /* Number of segs are log2 units, and first packet is not
4612 * included as part of this units.
4613 */
2839f28b
MC
4614 if (mss <= BNXT_RX_PAGE_SIZE) {
4615 n = BNXT_RX_PAGE_SIZE / mss;
c0c050c5
MC
4616 nsegs = (MAX_SKB_FRAGS - 1) * n;
4617 } else {
2839f28b
MC
4618 n = mss / BNXT_RX_PAGE_SIZE;
4619 if (mss & (BNXT_RX_PAGE_SIZE - 1))
c0c050c5
MC
4620 n++;
4621 nsegs = (MAX_SKB_FRAGS - n) / n;
4622 }
4623
79632e9b
MC
4624 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4625 segs = MAX_TPA_SEGS_P5;
4626 max_aggs = bp->max_tpa;
4627 } else {
4628 segs = ilog2(nsegs);
4629 }
c0c050c5 4630 req.max_agg_segs = cpu_to_le16(segs);
79632e9b 4631 req.max_aggs = cpu_to_le16(max_aggs);
c193554e
MC
4632
4633 req.min_agg_len = cpu_to_le32(512);
c0c050c5
MC
4634 }
4635 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4636
4637 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4638}
4639
2c61d211
MC
4640static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4641{
4642 struct bnxt_ring_grp_info *grp_info;
4643
4644 grp_info = &bp->grp_info[ring->grp_idx];
4645 return grp_info->cp_fw_ring_id;
4646}
4647
4648static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
4649{
4650 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4651 struct bnxt_napi *bnapi = rxr->bnapi;
4652 struct bnxt_cp_ring_info *cpr;
4653
4654 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
4655 return cpr->cp_ring_struct.fw_ring_id;
4656 } else {
4657 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
4658 }
4659}
4660
4661static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
4662{
4663 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4664 struct bnxt_napi *bnapi = txr->bnapi;
4665 struct bnxt_cp_ring_info *cpr;
4666
4667 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
4668 return cpr->cp_ring_struct.fw_ring_id;
4669 } else {
4670 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
4671 }
4672}
4673
c0c050c5
MC
4674static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
4675{
4676 u32 i, j, max_rings;
4677 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4678 struct hwrm_vnic_rss_cfg_input req = {0};
4679
7b3af4f7
MC
4680 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
4681 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
c0c050c5
MC
4682 return 0;
4683
4684 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4685 if (set_rss) {
87da7f79 4686 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
50f011b6 4687 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
dc52c6c7
PS
4688 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
4689 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4690 max_rings = bp->rx_nr_rings - 1;
4691 else
4692 max_rings = bp->rx_nr_rings;
4693 } else {
c0c050c5 4694 max_rings = 1;
dc52c6c7 4695 }
c0c050c5
MC
4696
4697 /* Fill the RSS indirection table with ring group ids */
4698 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
4699 if (j == max_rings)
4700 j = 0;
4701 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
4702 }
4703
4704 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4705 req.hash_key_tbl_addr =
4706 cpu_to_le64(vnic->rss_hash_key_dma_addr);
4707 }
94ce9caa 4708 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
c0c050c5
MC
4709 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4710}
4711
7b3af4f7
MC
4712static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
4713{
4714 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4715 u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings;
4716 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4717 struct hwrm_vnic_rss_cfg_input req = {0};
4718
4719 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4720 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4721 if (!set_rss) {
4722 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4723 return 0;
4724 }
4725 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4726 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4727 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4728 req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
4729 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
4730 for (i = 0, k = 0; i < nr_ctxs; i++) {
4731 __le16 *ring_tbl = vnic->rss_table;
4732 int rc;
4733
4734 req.ring_table_pair_index = i;
4735 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
4736 for (j = 0; j < 64; j++) {
4737 u16 ring_id;
4738
4739 ring_id = rxr->rx_ring_struct.fw_ring_id;
4740 *ring_tbl++ = cpu_to_le16(ring_id);
4741 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
4742 *ring_tbl++ = cpu_to_le16(ring_id);
4743 rxr++;
4744 k++;
4745 if (k == max_rings) {
4746 k = 0;
4747 rxr = &bp->rx_ring[0];
4748 }
4749 }
4750 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4751 if (rc)
4752 return -EIO;
4753 }
4754 return 0;
4755}
4756
c0c050c5
MC
4757static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
4758{
4759 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4760 struct hwrm_vnic_plcmodes_cfg_input req = {0};
4761
4762 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
4763 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
4764 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
4765 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
4766 req.enables =
4767 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
4768 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
4769 /* thresholds not implemented in firmware yet */
4770 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
4771 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
4772 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4773 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4774}
4775
94ce9caa
PS
4776static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
4777 u16 ctx_idx)
c0c050c5
MC
4778{
4779 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
4780
4781 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
4782 req.rss_cos_lb_ctx_id =
94ce9caa 4783 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
c0c050c5
MC
4784
4785 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
94ce9caa 4786 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
c0c050c5
MC
4787}
4788
4789static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
4790{
94ce9caa 4791 int i, j;
c0c050c5
MC
4792
4793 for (i = 0; i < bp->nr_vnics; i++) {
4794 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4795
94ce9caa
PS
4796 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
4797 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
4798 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
4799 }
c0c050c5
MC
4800 }
4801 bp->rsscos_nr_ctxs = 0;
4802}
4803
94ce9caa 4804static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
c0c050c5
MC
4805{
4806 int rc;
4807 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
4808 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
4809 bp->hwrm_cmd_resp_addr;
4810
4811 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
4812 -1);
4813
4814 mutex_lock(&bp->hwrm_cmd_lock);
4815 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4816 if (!rc)
94ce9caa 4817 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
c0c050c5
MC
4818 le16_to_cpu(resp->rss_cos_lb_ctx_id);
4819 mutex_unlock(&bp->hwrm_cmd_lock);
4820
4821 return rc;
4822}
4823
abe93ad2
MC
4824static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
4825{
4826 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
4827 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
4828 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
4829}
4830
a588e458 4831int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
c0c050c5 4832{
b81a90d3 4833 unsigned int ring = 0, grp_idx;
c0c050c5
MC
4834 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4835 struct hwrm_vnic_cfg_input req = {0};
cf6645f8 4836 u16 def_vlan = 0;
c0c050c5
MC
4837
4838 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
dc52c6c7 4839
7b3af4f7
MC
4840 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4841 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4842
4843 req.default_rx_ring_id =
4844 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
4845 req.default_cmpl_ring_id =
4846 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
4847 req.enables =
4848 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
4849 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
4850 goto vnic_mru;
4851 }
dc52c6c7 4852 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
c0c050c5 4853 /* Only RSS support for now TBD: COS & LB */
dc52c6c7
PS
4854 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
4855 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
4856 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
4857 VNIC_CFG_REQ_ENABLES_MRU);
ae10ae74
MC
4858 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
4859 req.rss_rule =
4860 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
4861 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
4862 VNIC_CFG_REQ_ENABLES_MRU);
4863 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
dc52c6c7
PS
4864 } else {
4865 req.rss_rule = cpu_to_le16(0xffff);
4866 }
94ce9caa 4867
dc52c6c7
PS
4868 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
4869 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
94ce9caa
PS
4870 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
4871 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
4872 } else {
4873 req.cos_rule = cpu_to_le16(0xffff);
4874 }
4875
c0c050c5 4876 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
b81a90d3 4877 ring = 0;
c0c050c5 4878 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
b81a90d3 4879 ring = vnic_id - 1;
76595193
PS
4880 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
4881 ring = bp->rx_nr_rings - 1;
c0c050c5 4882
b81a90d3 4883 grp_idx = bp->rx_ring[ring].bnapi->index;
c0c050c5 4884 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
c0c050c5 4885 req.lb_rule = cpu_to_le16(0xffff);
7b3af4f7 4886vnic_mru:
c0c050c5
MC
4887 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
4888 VLAN_HLEN);
4889
7b3af4f7 4890 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
cf6645f8
MC
4891#ifdef CONFIG_BNXT_SRIOV
4892 if (BNXT_VF(bp))
4893 def_vlan = bp->vf.vlan;
4894#endif
4895 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
c0c050c5 4896 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
a588e458 4897 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
abe93ad2 4898 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
c0c050c5
MC
4899
4900 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4901}
4902
4903static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
4904{
4905 u32 rc = 0;
4906
4907 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
4908 struct hwrm_vnic_free_input req = {0};
4909
4910 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
4911 req.vnic_id =
4912 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
4913
4914 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4915 if (rc)
4916 return rc;
4917 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
4918 }
4919 return rc;
4920}
4921
4922static void bnxt_hwrm_vnic_free(struct bnxt *bp)
4923{
4924 u16 i;
4925
4926 for (i = 0; i < bp->nr_vnics; i++)
4927 bnxt_hwrm_vnic_free_one(bp, i);
4928}
4929
b81a90d3
MC
4930static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
4931 unsigned int start_rx_ring_idx,
4932 unsigned int nr_rings)
c0c050c5 4933{
b81a90d3
MC
4934 int rc = 0;
4935 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
c0c050c5
MC
4936 struct hwrm_vnic_alloc_input req = {0};
4937 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
44c6f72a
MC
4938 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4939
4940 if (bp->flags & BNXT_FLAG_CHIP_P5)
4941 goto vnic_no_ring_grps;
c0c050c5
MC
4942
4943 /* map ring groups to this vnic */
b81a90d3
MC
4944 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
4945 grp_idx = bp->rx_ring[i].bnapi->index;
4946 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
c0c050c5 4947 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
b81a90d3 4948 j, nr_rings);
c0c050c5
MC
4949 break;
4950 }
44c6f72a 4951 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
c0c050c5
MC
4952 }
4953
44c6f72a
MC
4954vnic_no_ring_grps:
4955 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
4956 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
c0c050c5
MC
4957 if (vnic_id == 0)
4958 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
4959
4960 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
4961
4962 mutex_lock(&bp->hwrm_cmd_lock);
4963 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4964 if (!rc)
44c6f72a 4965 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
c0c050c5
MC
4966 mutex_unlock(&bp->hwrm_cmd_lock);
4967 return rc;
4968}
4969
8fdefd63
MC
4970static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
4971{
4972 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4973 struct hwrm_vnic_qcaps_input req = {0};
4974 int rc;
4975
4976 if (bp->hwrm_spec_code < 0x10600)
4977 return 0;
4978
4979 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
4980 mutex_lock(&bp->hwrm_cmd_lock);
4981 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4982 if (!rc) {
abe93ad2
MC
4983 u32 flags = le32_to_cpu(resp->flags);
4984
41e8d798
MC
4985 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
4986 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
8fdefd63 4987 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
abe93ad2
MC
4988 if (flags &
4989 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
4990 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
79632e9b 4991 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
8fdefd63
MC
4992 }
4993 mutex_unlock(&bp->hwrm_cmd_lock);
4994 return rc;
4995}
4996
c0c050c5
MC
4997static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
4998{
4999 u16 i;
5000 u32 rc = 0;
5001
44c6f72a
MC
5002 if (bp->flags & BNXT_FLAG_CHIP_P5)
5003 return 0;
5004
c0c050c5
MC
5005 mutex_lock(&bp->hwrm_cmd_lock);
5006 for (i = 0; i < bp->rx_nr_rings; i++) {
5007 struct hwrm_ring_grp_alloc_input req = {0};
5008 struct hwrm_ring_grp_alloc_output *resp =
5009 bp->hwrm_cmd_resp_addr;
b81a90d3 5010 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
c0c050c5
MC
5011
5012 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
5013
b81a90d3
MC
5014 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5015 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5016 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5017 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
c0c050c5
MC
5018
5019 rc = _hwrm_send_message(bp, &req, sizeof(req),
5020 HWRM_CMD_TIMEOUT);
5021 if (rc)
5022 break;
5023
b81a90d3
MC
5024 bp->grp_info[grp_idx].fw_grp_id =
5025 le32_to_cpu(resp->ring_group_id);
c0c050c5
MC
5026 }
5027 mutex_unlock(&bp->hwrm_cmd_lock);
5028 return rc;
5029}
5030
5031static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5032{
5033 u16 i;
5034 u32 rc = 0;
5035 struct hwrm_ring_grp_free_input req = {0};
5036
44c6f72a 5037 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
c0c050c5
MC
5038 return 0;
5039
5040 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
5041
5042 mutex_lock(&bp->hwrm_cmd_lock);
5043 for (i = 0; i < bp->cp_nr_rings; i++) {
5044 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5045 continue;
5046 req.ring_group_id =
5047 cpu_to_le32(bp->grp_info[i].fw_grp_id);
5048
5049 rc = _hwrm_send_message(bp, &req, sizeof(req),
5050 HWRM_CMD_TIMEOUT);
5051 if (rc)
5052 break;
5053 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5054 }
5055 mutex_unlock(&bp->hwrm_cmd_lock);
5056 return rc;
5057}
5058
5059static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5060 struct bnxt_ring_struct *ring,
9899bb59 5061 u32 ring_type, u32 map_index)
c0c050c5
MC
5062{
5063 int rc = 0, err = 0;
5064 struct hwrm_ring_alloc_input req = {0};
5065 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6fe19886 5066 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
9899bb59 5067 struct bnxt_ring_grp_info *grp_info;
c0c050c5
MC
5068 u16 ring_id;
5069
5070 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
5071
5072 req.enables = 0;
6fe19886
MC
5073 if (rmem->nr_pages > 1) {
5074 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
c0c050c5
MC
5075 /* Page size is in log2 units */
5076 req.page_size = BNXT_PAGE_SHIFT;
5077 req.page_tbl_depth = 1;
5078 } else {
6fe19886 5079 req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
c0c050c5
MC
5080 }
5081 req.fbo = 0;
5082 /* Association of ring index with doorbell index and MSIX number */
5083 req.logical_id = cpu_to_le16(map_index);
5084
5085 switch (ring_type) {
2c61d211
MC
5086 case HWRM_RING_ALLOC_TX: {
5087 struct bnxt_tx_ring_info *txr;
5088
5089 txr = container_of(ring, struct bnxt_tx_ring_info,
5090 tx_ring_struct);
c0c050c5
MC
5091 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5092 /* Association of transmit ring with completion ring */
9899bb59 5093 grp_info = &bp->grp_info[ring->grp_idx];
2c61d211 5094 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
c0c050c5 5095 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
9899bb59 5096 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
c0c050c5
MC
5097 req.queue_id = cpu_to_le16(ring->queue_id);
5098 break;
2c61d211 5099 }
c0c050c5
MC
5100 case HWRM_RING_ALLOC_RX:
5101 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5102 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
23aefdd7
MC
5103 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5104 u16 flags = 0;
5105
5106 /* Association of rx ring with stats context */
5107 grp_info = &bp->grp_info[ring->grp_idx];
5108 req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5109 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5110 req.enables |= cpu_to_le32(
5111 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5112 if (NET_IP_ALIGN == 2)
5113 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5114 req.flags = cpu_to_le16(flags);
5115 }
c0c050c5
MC
5116 break;
5117 case HWRM_RING_ALLOC_AGG:
23aefdd7
MC
5118 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5119 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5120 /* Association of agg ring with rx ring */
5121 grp_info = &bp->grp_info[ring->grp_idx];
5122 req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5123 req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5124 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5125 req.enables |= cpu_to_le32(
5126 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5127 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5128 } else {
5129 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5130 }
c0c050c5
MC
5131 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5132 break;
5133 case HWRM_RING_ALLOC_CMPL:
bac9a7e0 5134 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
c0c050c5 5135 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
23aefdd7
MC
5136 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5137 /* Association of cp ring with nq */
5138 grp_info = &bp->grp_info[map_index];
5139 req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5140 req.cq_handle = cpu_to_le64(ring->handle);
5141 req.enables |= cpu_to_le32(
5142 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5143 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5144 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5145 }
5146 break;
5147 case HWRM_RING_ALLOC_NQ:
5148 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5149 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
c0c050c5
MC
5150 if (bp->flags & BNXT_FLAG_USING_MSIX)
5151 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5152 break;
5153 default:
5154 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5155 ring_type);
5156 return -1;
5157 }
5158
5159 mutex_lock(&bp->hwrm_cmd_lock);
5160 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5161 err = le16_to_cpu(resp->error_code);
5162 ring_id = le16_to_cpu(resp->ring_id);
5163 mutex_unlock(&bp->hwrm_cmd_lock);
5164
5165 if (rc || err) {
2727c888
MC
5166 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5167 ring_type, rc, err);
5168 return -EIO;
c0c050c5
MC
5169 }
5170 ring->fw_ring_id = ring_id;
5171 return rc;
5172}
5173
486b5c22
MC
5174static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5175{
5176 int rc;
5177
5178 if (BNXT_PF(bp)) {
5179 struct hwrm_func_cfg_input req = {0};
5180
5181 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5182 req.fid = cpu_to_le16(0xffff);
5183 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5184 req.async_event_cr = cpu_to_le16(idx);
5185 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5186 } else {
5187 struct hwrm_func_vf_cfg_input req = {0};
5188
5189 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
5190 req.enables =
5191 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5192 req.async_event_cr = cpu_to_le16(idx);
5193 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5194 }
5195 return rc;
5196}
5197
697197e5
MC
5198static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5199 u32 map_idx, u32 xid)
5200{
5201 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5202 if (BNXT_PF(bp))
5203 db->doorbell = bp->bar1 + 0x10000;
5204 else
5205 db->doorbell = bp->bar1 + 0x4000;
5206 switch (ring_type) {
5207 case HWRM_RING_ALLOC_TX:
5208 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5209 break;
5210 case HWRM_RING_ALLOC_RX:
5211 case HWRM_RING_ALLOC_AGG:
5212 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5213 break;
5214 case HWRM_RING_ALLOC_CMPL:
5215 db->db_key64 = DBR_PATH_L2;
5216 break;
5217 case HWRM_RING_ALLOC_NQ:
5218 db->db_key64 = DBR_PATH_L2;
5219 break;
5220 }
5221 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5222 } else {
5223 db->doorbell = bp->bar1 + map_idx * 0x80;
5224 switch (ring_type) {
5225 case HWRM_RING_ALLOC_TX:
5226 db->db_key32 = DB_KEY_TX;
5227 break;
5228 case HWRM_RING_ALLOC_RX:
5229 case HWRM_RING_ALLOC_AGG:
5230 db->db_key32 = DB_KEY_RX;
5231 break;
5232 case HWRM_RING_ALLOC_CMPL:
5233 db->db_key32 = DB_KEY_CP;
5234 break;
5235 }
5236 }
5237}
5238
c0c050c5
MC
5239static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5240{
5241 int i, rc = 0;
697197e5 5242 u32 type;
c0c050c5 5243
23aefdd7
MC
5244 if (bp->flags & BNXT_FLAG_CHIP_P5)
5245 type = HWRM_RING_ALLOC_NQ;
5246 else
5247 type = HWRM_RING_ALLOC_CMPL;
edd0c2cc
MC
5248 for (i = 0; i < bp->cp_nr_rings; i++) {
5249 struct bnxt_napi *bnapi = bp->bnapi[i];
5250 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5251 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
9899bb59 5252 u32 map_idx = ring->map_idx;
5e66e35a 5253 unsigned int vector;
c0c050c5 5254
5e66e35a
MC
5255 vector = bp->irq_tbl[map_idx].vector;
5256 disable_irq_nosync(vector);
697197e5 5257 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5e66e35a
MC
5258 if (rc) {
5259 enable_irq(vector);
edd0c2cc 5260 goto err_out;
5e66e35a 5261 }
697197e5
MC
5262 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5263 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5e66e35a 5264 enable_irq(vector);
edd0c2cc 5265 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
486b5c22
MC
5266
5267 if (!i) {
5268 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5269 if (rc)
5270 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5271 }
c0c050c5
MC
5272 }
5273
697197e5 5274 type = HWRM_RING_ALLOC_TX;
edd0c2cc 5275 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 5276 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3e08b184
MC
5277 struct bnxt_ring_struct *ring;
5278 u32 map_idx;
c0c050c5 5279
3e08b184
MC
5280 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5281 struct bnxt_napi *bnapi = txr->bnapi;
5282 struct bnxt_cp_ring_info *cpr, *cpr2;
5283 u32 type2 = HWRM_RING_ALLOC_CMPL;
5284
5285 cpr = &bnapi->cp_ring;
5286 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5287 ring = &cpr2->cp_ring_struct;
5288 ring->handle = BNXT_TX_HDL;
5289 map_idx = bnapi->index;
5290 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5291 if (rc)
5292 goto err_out;
5293 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5294 ring->fw_ring_id);
5295 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5296 }
5297 ring = &txr->tx_ring_struct;
5298 map_idx = i;
697197e5 5299 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
edd0c2cc
MC
5300 if (rc)
5301 goto err_out;
697197e5 5302 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
c0c050c5
MC
5303 }
5304
697197e5 5305 type = HWRM_RING_ALLOC_RX;
edd0c2cc 5306 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5307 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 5308 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
3e08b184
MC
5309 struct bnxt_napi *bnapi = rxr->bnapi;
5310 u32 map_idx = bnapi->index;
c0c050c5 5311
697197e5 5312 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
edd0c2cc
MC
5313 if (rc)
5314 goto err_out;
697197e5
MC
5315 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5316 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
b81a90d3 5317 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
3e08b184
MC
5318 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5319 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5320 u32 type2 = HWRM_RING_ALLOC_CMPL;
5321 struct bnxt_cp_ring_info *cpr2;
5322
5323 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5324 ring = &cpr2->cp_ring_struct;
5325 ring->handle = BNXT_RX_HDL;
5326 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5327 if (rc)
5328 goto err_out;
5329 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5330 ring->fw_ring_id);
5331 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5332 }
c0c050c5
MC
5333 }
5334
5335 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
697197e5 5336 type = HWRM_RING_ALLOC_AGG;
c0c050c5 5337 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5338 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
5339 struct bnxt_ring_struct *ring =
5340 &rxr->rx_agg_ring_struct;
9899bb59 5341 u32 grp_idx = ring->grp_idx;
b81a90d3 5342 u32 map_idx = grp_idx + bp->rx_nr_rings;
c0c050c5 5343
697197e5 5344 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
c0c050c5
MC
5345 if (rc)
5346 goto err_out;
5347
697197e5
MC
5348 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5349 ring->fw_ring_id);
5350 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
b81a90d3 5351 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
c0c050c5
MC
5352 }
5353 }
5354err_out:
5355 return rc;
5356}
5357
5358static int hwrm_ring_free_send_msg(struct bnxt *bp,
5359 struct bnxt_ring_struct *ring,
5360 u32 ring_type, int cmpl_ring_id)
5361{
5362 int rc;
5363 struct hwrm_ring_free_input req = {0};
5364 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5365 u16 error_code;
5366
74608fc9 5367 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
c0c050c5
MC
5368 req.ring_type = ring_type;
5369 req.ring_id = cpu_to_le16(ring->fw_ring_id);
5370
5371 mutex_lock(&bp->hwrm_cmd_lock);
5372 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5373 error_code = le16_to_cpu(resp->error_code);
5374 mutex_unlock(&bp->hwrm_cmd_lock);
5375
5376 if (rc || error_code) {
2727c888
MC
5377 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5378 ring_type, rc, error_code);
5379 return -EIO;
c0c050c5
MC
5380 }
5381 return 0;
5382}
5383
edd0c2cc 5384static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
c0c050c5 5385{
23aefdd7 5386 u32 type;
edd0c2cc 5387 int i;
c0c050c5
MC
5388
5389 if (!bp->bnapi)
edd0c2cc 5390 return;
c0c050c5 5391
edd0c2cc 5392 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 5393 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
edd0c2cc 5394 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
edd0c2cc
MC
5395
5396 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
5397 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5398
edd0c2cc
MC
5399 hwrm_ring_free_send_msg(bp, ring,
5400 RING_FREE_REQ_RING_TYPE_TX,
5401 close_path ? cmpl_ring_id :
5402 INVALID_HW_RING_ID);
5403 ring->fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
5404 }
5405 }
5406
edd0c2cc 5407 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5408 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 5409 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
b81a90d3 5410 u32 grp_idx = rxr->bnapi->index;
edd0c2cc
MC
5411
5412 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
5413 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5414
edd0c2cc
MC
5415 hwrm_ring_free_send_msg(bp, ring,
5416 RING_FREE_REQ_RING_TYPE_RX,
5417 close_path ? cmpl_ring_id :
5418 INVALID_HW_RING_ID);
5419 ring->fw_ring_id = INVALID_HW_RING_ID;
b81a90d3
MC
5420 bp->grp_info[grp_idx].rx_fw_ring_id =
5421 INVALID_HW_RING_ID;
c0c050c5
MC
5422 }
5423 }
5424
23aefdd7
MC
5425 if (bp->flags & BNXT_FLAG_CHIP_P5)
5426 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5427 else
5428 type = RING_FREE_REQ_RING_TYPE_RX;
edd0c2cc 5429 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5430 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 5431 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
b81a90d3 5432 u32 grp_idx = rxr->bnapi->index;
edd0c2cc
MC
5433
5434 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1f83391b
MC
5435 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5436
23aefdd7 5437 hwrm_ring_free_send_msg(bp, ring, type,
edd0c2cc
MC
5438 close_path ? cmpl_ring_id :
5439 INVALID_HW_RING_ID);
5440 ring->fw_ring_id = INVALID_HW_RING_ID;
b81a90d3
MC
5441 bp->grp_info[grp_idx].agg_fw_ring_id =
5442 INVALID_HW_RING_ID;
c0c050c5
MC
5443 }
5444 }
5445
9d8bc097
MC
5446 /* The completion rings are about to be freed. After that the
5447 * IRQ doorbell will not work anymore. So we need to disable
5448 * IRQ here.
5449 */
5450 bnxt_disable_int_sync(bp);
5451
23aefdd7
MC
5452 if (bp->flags & BNXT_FLAG_CHIP_P5)
5453 type = RING_FREE_REQ_RING_TYPE_NQ;
5454 else
5455 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
edd0c2cc
MC
5456 for (i = 0; i < bp->cp_nr_rings; i++) {
5457 struct bnxt_napi *bnapi = bp->bnapi[i];
5458 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3e08b184
MC
5459 struct bnxt_ring_struct *ring;
5460 int j;
edd0c2cc 5461
3e08b184
MC
5462 for (j = 0; j < 2; j++) {
5463 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5464
5465 if (cpr2) {
5466 ring = &cpr2->cp_ring_struct;
5467 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5468 continue;
5469 hwrm_ring_free_send_msg(bp, ring,
5470 RING_FREE_REQ_RING_TYPE_L2_CMPL,
5471 INVALID_HW_RING_ID);
5472 ring->fw_ring_id = INVALID_HW_RING_ID;
5473 }
5474 }
5475 ring = &cpr->cp_ring_struct;
edd0c2cc 5476 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
23aefdd7 5477 hwrm_ring_free_send_msg(bp, ring, type,
edd0c2cc
MC
5478 INVALID_HW_RING_ID);
5479 ring->fw_ring_id = INVALID_HW_RING_ID;
5480 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
5481 }
5482 }
c0c050c5
MC
5483}
5484
41e8d798
MC
5485static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5486 bool shared);
5487
674f50a5
MC
5488static int bnxt_hwrm_get_rings(struct bnxt *bp)
5489{
5490 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5491 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5492 struct hwrm_func_qcfg_input req = {0};
5493 int rc;
5494
5495 if (bp->hwrm_spec_code < 0x10601)
5496 return 0;
5497
5498 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5499 req.fid = cpu_to_le16(0xffff);
5500 mutex_lock(&bp->hwrm_cmd_lock);
5501 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5502 if (rc) {
5503 mutex_unlock(&bp->hwrm_cmd_lock);
5504 return -EIO;
5505 }
5506
5507 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
f1ca94de 5508 if (BNXT_NEW_RM(bp)) {
674f50a5
MC
5509 u16 cp, stats;
5510
5511 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5512 hw_resc->resv_hw_ring_grps =
5513 le32_to_cpu(resp->alloc_hw_ring_grps);
5514 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
5515 cp = le16_to_cpu(resp->alloc_cmpl_rings);
5516 stats = le16_to_cpu(resp->alloc_stat_ctx);
75720e63 5517 hw_resc->resv_irqs = cp;
41e8d798
MC
5518 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5519 int rx = hw_resc->resv_rx_rings;
5520 int tx = hw_resc->resv_tx_rings;
5521
5522 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5523 rx >>= 1;
5524 if (cp < (rx + tx)) {
5525 bnxt_trim_rings(bp, &rx, &tx, cp, false);
5526 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5527 rx <<= 1;
5528 hw_resc->resv_rx_rings = rx;
5529 hw_resc->resv_tx_rings = tx;
5530 }
75720e63 5531 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
41e8d798
MC
5532 hw_resc->resv_hw_ring_grps = rx;
5533 }
674f50a5 5534 hw_resc->resv_cp_rings = cp;
780baad4 5535 hw_resc->resv_stat_ctxs = stats;
674f50a5
MC
5536 }
5537 mutex_unlock(&bp->hwrm_cmd_lock);
5538 return 0;
5539}
5540
391be5c2
MC
5541/* Caller must hold bp->hwrm_cmd_lock */
5542int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
5543{
5544 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5545 struct hwrm_func_qcfg_input req = {0};
5546 int rc;
5547
5548 if (bp->hwrm_spec_code < 0x10601)
5549 return 0;
5550
5551 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5552 req.fid = cpu_to_le16(fid);
5553 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5554 if (!rc)
5555 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5556
5557 return rc;
5558}
5559
41e8d798
MC
5560static bool bnxt_rfs_supported(struct bnxt *bp);
5561
4ed50ef4
MC
5562static void
5563__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
5564 int tx_rings, int rx_rings, int ring_grps,
780baad4 5565 int cp_rings, int stats, int vnics)
391be5c2 5566{
674f50a5 5567 u32 enables = 0;
391be5c2 5568
4ed50ef4
MC
5569 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
5570 req->fid = cpu_to_le16(0xffff);
674f50a5 5571 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
4ed50ef4 5572 req->num_tx_rings = cpu_to_le16(tx_rings);
f1ca94de 5573 if (BNXT_NEW_RM(bp)) {
674f50a5 5574 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
3f93cd3f 5575 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
41e8d798
MC
5576 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5577 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
5578 enables |= tx_rings + ring_grps ?
3f93cd3f 5579 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
5580 enables |= rx_rings ?
5581 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5582 } else {
5583 enables |= cp_rings ?
3f93cd3f 5584 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
5585 enables |= ring_grps ?
5586 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
5587 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5588 }
dbe80d44 5589 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
674f50a5 5590
4ed50ef4 5591 req->num_rx_rings = cpu_to_le16(rx_rings);
41e8d798
MC
5592 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5593 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5594 req->num_msix = cpu_to_le16(cp_rings);
5595 req->num_rsscos_ctxs =
5596 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5597 } else {
5598 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5599 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5600 req->num_rsscos_ctxs = cpu_to_le16(1);
5601 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
5602 bnxt_rfs_supported(bp))
5603 req->num_rsscos_ctxs =
5604 cpu_to_le16(ring_grps + 1);
5605 }
780baad4 5606 req->num_stat_ctxs = cpu_to_le16(stats);
4ed50ef4 5607 req->num_vnics = cpu_to_le16(vnics);
674f50a5 5608 }
4ed50ef4
MC
5609 req->enables = cpu_to_le32(enables);
5610}
5611
5612static void
5613__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
5614 struct hwrm_func_vf_cfg_input *req, int tx_rings,
5615 int rx_rings, int ring_grps, int cp_rings,
780baad4 5616 int stats, int vnics)
4ed50ef4
MC
5617{
5618 u32 enables = 0;
5619
5620 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
5621 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
41e8d798
MC
5622 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
5623 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
3f93cd3f 5624 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
41e8d798
MC
5625 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5626 enables |= tx_rings + ring_grps ?
3f93cd3f 5627 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
5628 } else {
5629 enables |= cp_rings ?
3f93cd3f 5630 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
41e8d798
MC
5631 enables |= ring_grps ?
5632 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
5633 }
4ed50ef4 5634 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
41e8d798 5635 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
4ed50ef4 5636
41e8d798 5637 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
4ed50ef4
MC
5638 req->num_tx_rings = cpu_to_le16(tx_rings);
5639 req->num_rx_rings = cpu_to_le16(rx_rings);
41e8d798
MC
5640 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5641 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5642 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5643 } else {
5644 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5645 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5646 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
5647 }
780baad4 5648 req->num_stat_ctxs = cpu_to_le16(stats);
4ed50ef4
MC
5649 req->num_vnics = cpu_to_le16(vnics);
5650
5651 req->enables = cpu_to_le32(enables);
5652}
5653
5654static int
5655bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4 5656 int ring_grps, int cp_rings, int stats, int vnics)
4ed50ef4
MC
5657{
5658 struct hwrm_func_cfg_input req = {0};
5659 int rc;
5660
5661 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
780baad4 5662 cp_rings, stats, vnics);
4ed50ef4 5663 if (!req.enables)
391be5c2
MC
5664 return 0;
5665
674f50a5
MC
5666 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5667 if (rc)
5668 return -ENOMEM;
5669
5670 if (bp->hwrm_spec_code < 0x10601)
5671 bp->hw_resc.resv_tx_rings = tx_rings;
5672
5673 rc = bnxt_hwrm_get_rings(bp);
5674 return rc;
5675}
5676
5677static int
5678bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4 5679 int ring_grps, int cp_rings, int stats, int vnics)
674f50a5
MC
5680{
5681 struct hwrm_func_vf_cfg_input req = {0};
674f50a5
MC
5682 int rc;
5683
f1ca94de 5684 if (!BNXT_NEW_RM(bp)) {
674f50a5 5685 bp->hw_resc.resv_tx_rings = tx_rings;
391be5c2 5686 return 0;
674f50a5 5687 }
391be5c2 5688
4ed50ef4 5689 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
780baad4 5690 cp_rings, stats, vnics);
391be5c2 5691 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
674f50a5
MC
5692 if (rc)
5693 return -ENOMEM;
5694
5695 rc = bnxt_hwrm_get_rings(bp);
5696 return rc;
5697}
5698
5699static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
780baad4 5700 int cp, int stat, int vnic)
674f50a5
MC
5701{
5702 if (BNXT_PF(bp))
780baad4
VV
5703 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
5704 vnic);
674f50a5 5705 else
780baad4
VV
5706 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
5707 vnic);
674f50a5
MC
5708}
5709
b16b6891 5710int bnxt_nq_rings_in_use(struct bnxt *bp)
08654eb2
MC
5711{
5712 int cp = bp->cp_nr_rings;
5713 int ulp_msix, ulp_base;
5714
5715 ulp_msix = bnxt_get_ulp_msix_num(bp);
5716 if (ulp_msix) {
5717 ulp_base = bnxt_get_ulp_msix_base(bp);
5718 cp += ulp_msix;
5719 if ((ulp_base + ulp_msix) > cp)
5720 cp = ulp_base + ulp_msix;
5721 }
5722 return cp;
5723}
5724
c0b8cda0
MC
5725static int bnxt_cp_rings_in_use(struct bnxt *bp)
5726{
5727 int cp;
5728
5729 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5730 return bnxt_nq_rings_in_use(bp);
5731
5732 cp = bp->tx_nr_rings + bp->rx_nr_rings;
5733 return cp;
5734}
5735
780baad4
VV
5736static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
5737{
d77b1ad8
MC
5738 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
5739 int cp = bp->cp_nr_rings;
5740
5741 if (!ulp_stat)
5742 return cp;
5743
5744 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
5745 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
5746
5747 return cp + ulp_stat;
780baad4
VV
5748}
5749
4e41dc5d
MC
5750static bool bnxt_need_reserve_rings(struct bnxt *bp)
5751{
5752 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
fbcfc8e4 5753 int cp = bnxt_cp_rings_in_use(bp);
c0b8cda0 5754 int nq = bnxt_nq_rings_in_use(bp);
780baad4 5755 int rx = bp->rx_nr_rings, stat;
4e41dc5d
MC
5756 int vnic = 1, grp = rx;
5757
5758 if (bp->hwrm_spec_code < 0x10601)
5759 return false;
5760
5761 if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
5762 return true;
5763
41e8d798 5764 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
4e41dc5d
MC
5765 vnic = rx + 1;
5766 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5767 rx <<= 1;
780baad4 5768 stat = bnxt_get_func_stat_ctxs(bp);
f1ca94de 5769 if (BNXT_NEW_RM(bp) &&
4e41dc5d 5770 (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
01989c6b 5771 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
41e8d798
MC
5772 (hw_resc->resv_hw_ring_grps != grp &&
5773 !(bp->flags & BNXT_FLAG_CHIP_P5))))
4e41dc5d 5774 return true;
01989c6b
MC
5775 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
5776 hw_resc->resv_irqs != nq)
5777 return true;
4e41dc5d
MC
5778 return false;
5779}
5780
674f50a5
MC
5781static int __bnxt_reserve_rings(struct bnxt *bp)
5782{
5783 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
c0b8cda0 5784 int cp = bnxt_nq_rings_in_use(bp);
674f50a5
MC
5785 int tx = bp->tx_nr_rings;
5786 int rx = bp->rx_nr_rings;
674f50a5 5787 int grp, rx_rings, rc;
780baad4 5788 int vnic = 1, stat;
674f50a5 5789 bool sh = false;
674f50a5 5790
4e41dc5d 5791 if (!bnxt_need_reserve_rings(bp))
674f50a5
MC
5792 return 0;
5793
5794 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5795 sh = true;
41e8d798 5796 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
674f50a5
MC
5797 vnic = rx + 1;
5798 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5799 rx <<= 1;
674f50a5 5800 grp = bp->rx_nr_rings;
780baad4 5801 stat = bnxt_get_func_stat_ctxs(bp);
674f50a5 5802
780baad4 5803 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
391be5c2
MC
5804 if (rc)
5805 return rc;
5806
674f50a5 5807 tx = hw_resc->resv_tx_rings;
f1ca94de 5808 if (BNXT_NEW_RM(bp)) {
674f50a5 5809 rx = hw_resc->resv_rx_rings;
c0b8cda0 5810 cp = hw_resc->resv_irqs;
674f50a5
MC
5811 grp = hw_resc->resv_hw_ring_grps;
5812 vnic = hw_resc->resv_vnics;
780baad4 5813 stat = hw_resc->resv_stat_ctxs;
674f50a5
MC
5814 }
5815
5816 rx_rings = rx;
5817 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
5818 if (rx >= 2) {
5819 rx_rings = rx >> 1;
5820 } else {
5821 if (netif_running(bp->dev))
5822 return -ENOMEM;
5823
5824 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
5825 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
5826 bp->dev->hw_features &= ~NETIF_F_LRO;
5827 bp->dev->features &= ~NETIF_F_LRO;
5828 bnxt_set_ring_params(bp);
5829 }
5830 }
5831 rx_rings = min_t(int, rx_rings, grp);
780baad4
VV
5832 cp = min_t(int, cp, bp->cp_nr_rings);
5833 if (stat > bnxt_get_ulp_stat_ctxs(bp))
5834 stat -= bnxt_get_ulp_stat_ctxs(bp);
5835 cp = min_t(int, cp, stat);
674f50a5
MC
5836 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
5837 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5838 rx = rx_rings << 1;
5839 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
5840 bp->tx_nr_rings = tx;
5841 bp->rx_nr_rings = rx_rings;
5842 bp->cp_nr_rings = cp;
5843
780baad4 5844 if (!tx || !rx || !cp || !grp || !vnic || !stat)
674f50a5
MC
5845 return -ENOMEM;
5846
391be5c2
MC
5847 return rc;
5848}
5849
8f23d638 5850static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
5851 int ring_grps, int cp_rings, int stats,
5852 int vnics)
98fdbe73 5853{
8f23d638 5854 struct hwrm_func_vf_cfg_input req = {0};
6fc2ffdf 5855 u32 flags;
98fdbe73
MC
5856 int rc;
5857
f1ca94de 5858 if (!BNXT_NEW_RM(bp))
98fdbe73
MC
5859 return 0;
5860
6fc2ffdf 5861 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
780baad4 5862 cp_rings, stats, vnics);
8f23d638
MC
5863 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
5864 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
5865 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8f23d638 5866 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
41e8d798
MC
5867 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
5868 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
5869 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5870 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8f23d638
MC
5871
5872 req.flags = cpu_to_le32(flags);
8f23d638
MC
5873 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5874 if (rc)
5875 return -ENOMEM;
5876 return 0;
5877}
5878
5879static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
5880 int ring_grps, int cp_rings, int stats,
5881 int vnics)
8f23d638
MC
5882{
5883 struct hwrm_func_cfg_input req = {0};
6fc2ffdf 5884 u32 flags;
8f23d638 5885 int rc;
98fdbe73 5886
6fc2ffdf 5887 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
780baad4 5888 cp_rings, stats, vnics);
8f23d638 5889 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
41e8d798 5890 if (BNXT_NEW_RM(bp)) {
8f23d638
MC
5891 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
5892 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8f23d638
MC
5893 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
5894 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
41e8d798 5895 if (bp->flags & BNXT_FLAG_CHIP_P5)
0b815023
MC
5896 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
5897 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
41e8d798
MC
5898 else
5899 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
5900 }
6fc2ffdf 5901
8f23d638 5902 req.flags = cpu_to_le32(flags);
98fdbe73
MC
5903 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5904 if (rc)
5905 return -ENOMEM;
5906 return 0;
5907}
5908
8f23d638 5909static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
5910 int ring_grps, int cp_rings, int stats,
5911 int vnics)
8f23d638
MC
5912{
5913 if (bp->hwrm_spec_code < 0x10801)
5914 return 0;
5915
5916 if (BNXT_PF(bp))
5917 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
780baad4
VV
5918 ring_grps, cp_rings, stats,
5919 vnics);
8f23d638
MC
5920
5921 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
780baad4 5922 cp_rings, stats, vnics);
8f23d638
MC
5923}
5924
74706afa
MC
5925static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
5926{
5927 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5928 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5929 struct hwrm_ring_aggint_qcaps_input req = {0};
5930 int rc;
5931
5932 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
5933 coal_cap->num_cmpl_dma_aggr_max = 63;
5934 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
5935 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
5936 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
5937 coal_cap->int_lat_tmr_min_max = 65535;
5938 coal_cap->int_lat_tmr_max_max = 65535;
5939 coal_cap->num_cmpl_aggr_int_max = 65535;
5940 coal_cap->timer_units = 80;
5941
5942 if (bp->hwrm_spec_code < 0x10902)
5943 return;
5944
5945 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
5946 mutex_lock(&bp->hwrm_cmd_lock);
5947 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5948 if (!rc) {
5949 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
58590c8d 5950 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
74706afa
MC
5951 coal_cap->num_cmpl_dma_aggr_max =
5952 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
5953 coal_cap->num_cmpl_dma_aggr_during_int_max =
5954 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
5955 coal_cap->cmpl_aggr_dma_tmr_max =
5956 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
5957 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
5958 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
5959 coal_cap->int_lat_tmr_min_max =
5960 le16_to_cpu(resp->int_lat_tmr_min_max);
5961 coal_cap->int_lat_tmr_max_max =
5962 le16_to_cpu(resp->int_lat_tmr_max_max);
5963 coal_cap->num_cmpl_aggr_int_max =
5964 le16_to_cpu(resp->num_cmpl_aggr_int_max);
5965 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
5966 }
5967 mutex_unlock(&bp->hwrm_cmd_lock);
5968}
5969
5970static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
5971{
5972 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5973
5974 return usec * 1000 / coal_cap->timer_units;
5975}
5976
5977static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
5978 struct bnxt_coal *hw_coal,
bb053f52
MC
5979 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
5980{
74706afa
MC
5981 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5982 u32 cmpl_params = coal_cap->cmpl_params;
5983 u16 val, tmr, max, flags = 0;
f8503969
MC
5984
5985 max = hw_coal->bufs_per_record * 128;
5986 if (hw_coal->budget)
5987 max = hw_coal->bufs_per_record * hw_coal->budget;
74706afa 5988 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
f8503969
MC
5989
5990 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
5991 req->num_cmpl_aggr_int = cpu_to_le16(val);
b153cbc5 5992
74706afa 5993 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
f8503969
MC
5994 req->num_cmpl_dma_aggr = cpu_to_le16(val);
5995
74706afa
MC
5996 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
5997 coal_cap->num_cmpl_dma_aggr_during_int_max);
f8503969
MC
5998 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
5999
74706afa
MC
6000 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6001 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
f8503969
MC
6002 req->int_lat_tmr_max = cpu_to_le16(tmr);
6003
6004 /* min timer set to 1/2 of interrupt timer */
74706afa
MC
6005 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6006 val = tmr / 2;
6007 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6008 req->int_lat_tmr_min = cpu_to_le16(val);
6009 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6010 }
f8503969
MC
6011
6012 /* buf timer set to 1/4 of interrupt timer */
74706afa 6013 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
f8503969
MC
6014 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6015
74706afa
MC
6016 if (cmpl_params &
6017 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6018 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6019 val = clamp_t(u16, tmr, 1,
6020 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6021 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
6022 req->enables |=
6023 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6024 }
f8503969 6025
74706afa
MC
6026 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6027 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6028 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6029 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
f8503969 6030 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
bb053f52 6031 req->flags = cpu_to_le16(flags);
74706afa 6032 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
bb053f52
MC
6033}
6034
58590c8d
MC
6035/* Caller holds bp->hwrm_cmd_lock */
6036static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6037 struct bnxt_coal *hw_coal)
6038{
6039 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
6040 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6041 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6042 u32 nq_params = coal_cap->nq_params;
6043 u16 tmr;
6044
6045 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6046 return 0;
6047
6048 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
6049 -1, -1);
6050 req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6051 req.flags =
6052 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6053
6054 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6055 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6056 req.int_lat_tmr_min = cpu_to_le16(tmr);
6057 req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6058 return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6059}
6060
6a8788f2
AG
6061int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6062{
6063 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
6064 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6065 struct bnxt_coal coal;
6a8788f2
AG
6066
6067 /* Tick values in micro seconds.
6068 * 1 coal_buf x bufs_per_record = 1 completion record.
6069 */
6070 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6071
6072 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6073 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6074
6075 if (!bnapi->rx_ring)
6076 return -ENODEV;
6077
6078 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6079 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6080
74706afa 6081 bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
6a8788f2 6082
2c61d211 6083 req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6a8788f2
AG
6084
6085 return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
6086 HWRM_CMD_TIMEOUT);
6087}
6088
c0c050c5
MC
6089int bnxt_hwrm_set_coal(struct bnxt *bp)
6090{
6091 int i, rc = 0;
dfc9c94a
MC
6092 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
6093 req_tx = {0}, *req;
c0c050c5 6094
dfc9c94a
MC
6095 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6096 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6097 bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
6098 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
c0c050c5 6099
74706afa
MC
6100 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
6101 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
c0c050c5
MC
6102
6103 mutex_lock(&bp->hwrm_cmd_lock);
6104 for (i = 0; i < bp->cp_nr_rings; i++) {
dfc9c94a 6105 struct bnxt_napi *bnapi = bp->bnapi[i];
58590c8d 6106 struct bnxt_coal *hw_coal;
2c61d211 6107 u16 ring_id;
c0c050c5 6108
dfc9c94a 6109 req = &req_rx;
2c61d211
MC
6110 if (!bnapi->rx_ring) {
6111 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
dfc9c94a 6112 req = &req_tx;
2c61d211
MC
6113 } else {
6114 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6115 }
6116 req->ring_id = cpu_to_le16(ring_id);
dfc9c94a
MC
6117
6118 rc = _hwrm_send_message(bp, req, sizeof(*req),
c0c050c5
MC
6119 HWRM_CMD_TIMEOUT);
6120 if (rc)
6121 break;
58590c8d
MC
6122
6123 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6124 continue;
6125
6126 if (bnapi->rx_ring && bnapi->tx_ring) {
6127 req = &req_tx;
6128 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6129 req->ring_id = cpu_to_le16(ring_id);
6130 rc = _hwrm_send_message(bp, req, sizeof(*req),
6131 HWRM_CMD_TIMEOUT);
6132 if (rc)
6133 break;
6134 }
6135 if (bnapi->rx_ring)
6136 hw_coal = &bp->rx_coal;
6137 else
6138 hw_coal = &bp->tx_coal;
6139 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
c0c050c5
MC
6140 }
6141 mutex_unlock(&bp->hwrm_cmd_lock);
6142 return rc;
6143}
6144
6145static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6146{
6147 int rc = 0, i;
6148 struct hwrm_stat_ctx_free_input req = {0};
6149
6150 if (!bp->bnapi)
6151 return 0;
6152
3e8060fa
PS
6153 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6154 return 0;
6155
c0c050c5
MC
6156 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
6157
6158 mutex_lock(&bp->hwrm_cmd_lock);
6159 for (i = 0; i < bp->cp_nr_rings; i++) {
6160 struct bnxt_napi *bnapi = bp->bnapi[i];
6161 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6162
6163 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6164 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6165
6166 rc = _hwrm_send_message(bp, &req, sizeof(req),
6167 HWRM_CMD_TIMEOUT);
6168 if (rc)
6169 break;
6170
6171 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6172 }
6173 }
6174 mutex_unlock(&bp->hwrm_cmd_lock);
6175 return rc;
6176}
6177
6178static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6179{
6180 int rc = 0, i;
6181 struct hwrm_stat_ctx_alloc_input req = {0};
6182 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6183
3e8060fa
PS
6184 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6185 return 0;
6186
c0c050c5
MC
6187 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
6188
51f30785 6189 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
c0c050c5
MC
6190
6191 mutex_lock(&bp->hwrm_cmd_lock);
6192 for (i = 0; i < bp->cp_nr_rings; i++) {
6193 struct bnxt_napi *bnapi = bp->bnapi[i];
6194 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6195
6196 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
6197
6198 rc = _hwrm_send_message(bp, &req, sizeof(req),
6199 HWRM_CMD_TIMEOUT);
6200 if (rc)
6201 break;
6202
6203 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6204
6205 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6206 }
6207 mutex_unlock(&bp->hwrm_cmd_lock);
89aa8445 6208 return rc;
c0c050c5
MC
6209}
6210
cf6645f8
MC
6211static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6212{
6213 struct hwrm_func_qcfg_input req = {0};
567b2abe 6214 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9315edca 6215 u16 flags;
cf6645f8
MC
6216 int rc;
6217
6218 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6219 req.fid = cpu_to_le16(0xffff);
6220 mutex_lock(&bp->hwrm_cmd_lock);
6221 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6222 if (rc)
6223 goto func_qcfg_exit;
6224
6225#ifdef CONFIG_BNXT_SRIOV
6226 if (BNXT_VF(bp)) {
cf6645f8
MC
6227 struct bnxt_vf_info *vf = &bp->vf;
6228
6229 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6230 }
6231#endif
9315edca
MC
6232 flags = le16_to_cpu(resp->flags);
6233 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6234 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
97381a18 6235 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
9315edca 6236 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
97381a18 6237 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
9315edca
MC
6238 }
6239 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6240 bp->flags |= BNXT_FLAG_MULTI_HOST;
bc39f885 6241
567b2abe
SB
6242 switch (resp->port_partition_type) {
6243 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6244 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6245 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6246 bp->port_partition_type = resp->port_partition_type;
6247 break;
6248 }
32e8239c
MC
6249 if (bp->hwrm_spec_code < 0x10707 ||
6250 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6251 bp->br_mode = BRIDGE_MODE_VEB;
6252 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6253 bp->br_mode = BRIDGE_MODE_VEPA;
6254 else
6255 bp->br_mode = BRIDGE_MODE_UNDEF;
cf6645f8 6256
7eb9bb3a
MC
6257 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6258 if (!bp->max_mtu)
6259 bp->max_mtu = BNXT_MAX_MTU;
6260
cf6645f8
MC
6261func_qcfg_exit:
6262 mutex_unlock(&bp->hwrm_cmd_lock);
6263 return rc;
6264}
6265
98f04cf0
MC
6266static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6267{
6268 struct hwrm_func_backing_store_qcaps_input req = {0};
6269 struct hwrm_func_backing_store_qcaps_output *resp =
6270 bp->hwrm_cmd_resp_addr;
6271 int rc;
6272
6273 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6274 return 0;
6275
6276 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
6277 mutex_lock(&bp->hwrm_cmd_lock);
6278 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6279 if (!rc) {
6280 struct bnxt_ctx_pg_info *ctx_pg;
6281 struct bnxt_ctx_mem_info *ctx;
6282 int i;
6283
6284 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6285 if (!ctx) {
6286 rc = -ENOMEM;
6287 goto ctx_err;
6288 }
6289 ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL);
6290 if (!ctx_pg) {
6291 kfree(ctx);
6292 rc = -ENOMEM;
6293 goto ctx_err;
6294 }
6295 for (i = 0; i < bp->max_q + 1; i++, ctx_pg++)
6296 ctx->tqm_mem[i] = ctx_pg;
6297
6298 bp->ctx = ctx;
6299 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6300 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6301 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6302 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6303 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6304 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6305 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6306 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6307 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6308 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6309 ctx->vnic_max_vnic_entries =
6310 le16_to_cpu(resp->vnic_max_vnic_entries);
6311 ctx->vnic_max_ring_table_entries =
6312 le16_to_cpu(resp->vnic_max_ring_table_entries);
6313 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6314 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6315 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6316 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6317 ctx->tqm_min_entries_per_ring =
6318 le32_to_cpu(resp->tqm_min_entries_per_ring);
6319 ctx->tqm_max_entries_per_ring =
6320 le32_to_cpu(resp->tqm_max_entries_per_ring);
6321 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6322 if (!ctx->tqm_entries_multiple)
6323 ctx->tqm_entries_multiple = 1;
6324 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6325 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
53579e37
DS
6326 ctx->mrav_num_entries_units =
6327 le16_to_cpu(resp->mrav_num_entries_units);
98f04cf0
MC
6328 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6329 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6330 } else {
6331 rc = 0;
6332 }
6333ctx_err:
6334 mutex_unlock(&bp->hwrm_cmd_lock);
6335 return rc;
6336}
6337
1b9394e5
MC
6338static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6339 __le64 *pg_dir)
6340{
6341 u8 pg_size = 0;
6342
6343 if (BNXT_PAGE_SHIFT == 13)
6344 pg_size = 1 << 4;
6345 else if (BNXT_PAGE_SIZE == 16)
6346 pg_size = 2 << 4;
6347
6348 *pg_attr = pg_size;
08fe9d18
MC
6349 if (rmem->depth >= 1) {
6350 if (rmem->depth == 2)
6351 *pg_attr |= 2;
6352 else
6353 *pg_attr |= 1;
1b9394e5
MC
6354 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6355 } else {
6356 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6357 }
6358}
6359
6360#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
6361 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
6362 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
6363 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
6364 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
6365 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6366
6367static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6368{
6369 struct hwrm_func_backing_store_cfg_input req = {0};
6370 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6371 struct bnxt_ctx_pg_info *ctx_pg;
6372 __le32 *num_entries;
6373 __le64 *pg_dir;
53579e37 6374 u32 flags = 0;
1b9394e5
MC
6375 u8 *pg_attr;
6376 int i, rc;
6377 u32 ena;
6378
6379 if (!ctx)
6380 return 0;
6381
6382 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
6383 req.enables = cpu_to_le32(enables);
6384
6385 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
6386 ctx_pg = &ctx->qp_mem;
6387 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
6388 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
6389 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
6390 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
6391 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6392 &req.qpc_pg_size_qpc_lvl,
6393 &req.qpc_page_dir);
6394 }
6395 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
6396 ctx_pg = &ctx->srq_mem;
6397 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
6398 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
6399 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
6400 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6401 &req.srq_pg_size_srq_lvl,
6402 &req.srq_page_dir);
6403 }
6404 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
6405 ctx_pg = &ctx->cq_mem;
6406 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
6407 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
6408 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
6409 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
6410 &req.cq_page_dir);
6411 }
6412 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
6413 ctx_pg = &ctx->vnic_mem;
6414 req.vnic_num_vnic_entries =
6415 cpu_to_le16(ctx->vnic_max_vnic_entries);
6416 req.vnic_num_ring_table_entries =
6417 cpu_to_le16(ctx->vnic_max_ring_table_entries);
6418 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
6419 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6420 &req.vnic_pg_size_vnic_lvl,
6421 &req.vnic_page_dir);
6422 }
6423 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
6424 ctx_pg = &ctx->stat_mem;
6425 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
6426 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
6427 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6428 &req.stat_pg_size_stat_lvl,
6429 &req.stat_page_dir);
6430 }
cf6daed0
MC
6431 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
6432 ctx_pg = &ctx->mrav_mem;
6433 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
53579e37
DS
6434 if (ctx->mrav_num_entries_units)
6435 flags |=
6436 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
cf6daed0
MC
6437 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
6438 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6439 &req.mrav_pg_size_mrav_lvl,
6440 &req.mrav_page_dir);
6441 }
6442 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
6443 ctx_pg = &ctx->tim_mem;
6444 req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
6445 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
6446 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6447 &req.tim_pg_size_tim_lvl,
6448 &req.tim_page_dir);
6449 }
1b9394e5
MC
6450 for (i = 0, num_entries = &req.tqm_sp_num_entries,
6451 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
6452 pg_dir = &req.tqm_sp_page_dir,
6453 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
6454 i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
6455 if (!(enables & ena))
6456 continue;
6457
6458 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
6459 ctx_pg = ctx->tqm_mem[i];
6460 *num_entries = cpu_to_le32(ctx_pg->entries);
6461 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
6462 }
53579e37 6463 req.flags = cpu_to_le32(flags);
1b9394e5
MC
6464 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6465 if (rc)
6466 rc = -EIO;
6467 return rc;
6468}
6469
98f04cf0 6470static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
08fe9d18 6471 struct bnxt_ctx_pg_info *ctx_pg)
98f04cf0
MC
6472{
6473 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6474
98f04cf0
MC
6475 rmem->page_size = BNXT_PAGE_SIZE;
6476 rmem->pg_arr = ctx_pg->ctx_pg_arr;
6477 rmem->dma_arr = ctx_pg->ctx_dma_arr;
1b9394e5 6478 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
08fe9d18
MC
6479 if (rmem->depth >= 1)
6480 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
98f04cf0
MC
6481 return bnxt_alloc_ring(bp, rmem);
6482}
6483
08fe9d18
MC
6484static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
6485 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
6486 u8 depth)
6487{
6488 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6489 int rc;
6490
6491 if (!mem_size)
6492 return 0;
6493
6494 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6495 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
6496 ctx_pg->nr_pages = 0;
6497 return -EINVAL;
6498 }
6499 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
6500 int nr_tbls, i;
6501
6502 rmem->depth = 2;
6503 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
6504 GFP_KERNEL);
6505 if (!ctx_pg->ctx_pg_tbl)
6506 return -ENOMEM;
6507 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
6508 rmem->nr_pages = nr_tbls;
6509 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6510 if (rc)
6511 return rc;
6512 for (i = 0; i < nr_tbls; i++) {
6513 struct bnxt_ctx_pg_info *pg_tbl;
6514
6515 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
6516 if (!pg_tbl)
6517 return -ENOMEM;
6518 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
6519 rmem = &pg_tbl->ring_mem;
6520 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
6521 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
6522 rmem->depth = 1;
6523 rmem->nr_pages = MAX_CTX_PAGES;
6ef982de
MC
6524 if (i == (nr_tbls - 1)) {
6525 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
6526
6527 if (rem)
6528 rmem->nr_pages = rem;
6529 }
08fe9d18
MC
6530 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
6531 if (rc)
6532 break;
6533 }
6534 } else {
6535 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6536 if (rmem->nr_pages > 1 || depth)
6537 rmem->depth = 1;
6538 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6539 }
6540 return rc;
6541}
6542
6543static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
6544 struct bnxt_ctx_pg_info *ctx_pg)
6545{
6546 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6547
6548 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
6549 ctx_pg->ctx_pg_tbl) {
6550 int i, nr_tbls = rmem->nr_pages;
6551
6552 for (i = 0; i < nr_tbls; i++) {
6553 struct bnxt_ctx_pg_info *pg_tbl;
6554 struct bnxt_ring_mem_info *rmem2;
6555
6556 pg_tbl = ctx_pg->ctx_pg_tbl[i];
6557 if (!pg_tbl)
6558 continue;
6559 rmem2 = &pg_tbl->ring_mem;
6560 bnxt_free_ring(bp, rmem2);
6561 ctx_pg->ctx_pg_arr[i] = NULL;
6562 kfree(pg_tbl);
6563 ctx_pg->ctx_pg_tbl[i] = NULL;
6564 }
6565 kfree(ctx_pg->ctx_pg_tbl);
6566 ctx_pg->ctx_pg_tbl = NULL;
6567 }
6568 bnxt_free_ring(bp, rmem);
6569 ctx_pg->nr_pages = 0;
6570}
6571
98f04cf0
MC
6572static void bnxt_free_ctx_mem(struct bnxt *bp)
6573{
6574 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6575 int i;
6576
6577 if (!ctx)
6578 return;
6579
6580 if (ctx->tqm_mem[0]) {
6581 for (i = 0; i < bp->max_q + 1; i++)
08fe9d18 6582 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
98f04cf0
MC
6583 kfree(ctx->tqm_mem[0]);
6584 ctx->tqm_mem[0] = NULL;
6585 }
6586
cf6daed0
MC
6587 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
6588 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
08fe9d18
MC
6589 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
6590 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
6591 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
6592 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
6593 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
98f04cf0
MC
6594 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
6595}
6596
6597static int bnxt_alloc_ctx_mem(struct bnxt *bp)
6598{
6599 struct bnxt_ctx_pg_info *ctx_pg;
6600 struct bnxt_ctx_mem_info *ctx;
1b9394e5 6601 u32 mem_size, ena, entries;
53579e37 6602 u32 num_mr, num_ah;
cf6daed0
MC
6603 u32 extra_srqs = 0;
6604 u32 extra_qps = 0;
6605 u8 pg_lvl = 1;
98f04cf0
MC
6606 int i, rc;
6607
6608 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
6609 if (rc) {
6610 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
6611 rc);
6612 return rc;
6613 }
6614 ctx = bp->ctx;
6615 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
6616 return 0;
6617
d629522e 6618 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
cf6daed0
MC
6619 pg_lvl = 2;
6620 extra_qps = 65536;
6621 extra_srqs = 8192;
6622 }
6623
98f04cf0 6624 ctx_pg = &ctx->qp_mem;
cf6daed0
MC
6625 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
6626 extra_qps;
98f04cf0 6627 mem_size = ctx->qp_entry_size * ctx_pg->entries;
cf6daed0 6628 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
98f04cf0
MC
6629 if (rc)
6630 return rc;
6631
6632 ctx_pg = &ctx->srq_mem;
cf6daed0 6633 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
98f04cf0 6634 mem_size = ctx->srq_entry_size * ctx_pg->entries;
cf6daed0 6635 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
98f04cf0
MC
6636 if (rc)
6637 return rc;
6638
6639 ctx_pg = &ctx->cq_mem;
cf6daed0 6640 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
98f04cf0 6641 mem_size = ctx->cq_entry_size * ctx_pg->entries;
cf6daed0 6642 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
98f04cf0
MC
6643 if (rc)
6644 return rc;
6645
6646 ctx_pg = &ctx->vnic_mem;
6647 ctx_pg->entries = ctx->vnic_max_vnic_entries +
6648 ctx->vnic_max_ring_table_entries;
6649 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
08fe9d18 6650 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
98f04cf0
MC
6651 if (rc)
6652 return rc;
6653
6654 ctx_pg = &ctx->stat_mem;
6655 ctx_pg->entries = ctx->stat_max_entries;
6656 mem_size = ctx->stat_entry_size * ctx_pg->entries;
08fe9d18 6657 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
98f04cf0
MC
6658 if (rc)
6659 return rc;
6660
cf6daed0
MC
6661 ena = 0;
6662 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
6663 goto skip_rdma;
6664
6665 ctx_pg = &ctx->mrav_mem;
53579e37
DS
6666 /* 128K extra is needed to accommodate static AH context
6667 * allocation by f/w.
6668 */
6669 num_mr = 1024 * 256;
6670 num_ah = 1024 * 128;
6671 ctx_pg->entries = num_mr + num_ah;
cf6daed0
MC
6672 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
6673 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2);
6674 if (rc)
6675 return rc;
6676 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
53579e37
DS
6677 if (ctx->mrav_num_entries_units)
6678 ctx_pg->entries =
6679 ((num_mr / ctx->mrav_num_entries_units) << 16) |
6680 (num_ah / ctx->mrav_num_entries_units);
cf6daed0
MC
6681
6682 ctx_pg = &ctx->tim_mem;
6683 ctx_pg->entries = ctx->qp_mem.entries;
6684 mem_size = ctx->tim_entry_size * ctx_pg->entries;
6685 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6686 if (rc)
6687 return rc;
6688 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
6689
6690skip_rdma:
6691 entries = ctx->qp_max_l2_entries + extra_qps;
98f04cf0
MC
6692 entries = roundup(entries, ctx->tqm_entries_multiple);
6693 entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
6694 ctx->tqm_max_entries_per_ring);
cf6daed0 6695 for (i = 0; i < bp->max_q + 1; i++) {
98f04cf0
MC
6696 ctx_pg = ctx->tqm_mem[i];
6697 ctx_pg->entries = entries;
6698 mem_size = ctx->tqm_entry_size * entries;
08fe9d18 6699 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
98f04cf0
MC
6700 if (rc)
6701 return rc;
1b9394e5 6702 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
98f04cf0 6703 }
1b9394e5
MC
6704 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
6705 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
6706 if (rc)
6707 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
6708 rc);
6709 else
6710 ctx->flags |= BNXT_CTX_FLAG_INITED;
6711
98f04cf0
MC
6712 return 0;
6713}
6714
db4723b3 6715int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
be0dd9c4
MC
6716{
6717 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6718 struct hwrm_func_resource_qcaps_input req = {0};
6719 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6720 int rc;
6721
6722 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
6723 req.fid = cpu_to_le16(0xffff);
6724
6725 mutex_lock(&bp->hwrm_cmd_lock);
351cbde9
JT
6726 rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
6727 HWRM_CMD_TIMEOUT);
be0dd9c4
MC
6728 if (rc) {
6729 rc = -EIO;
6730 goto hwrm_func_resc_qcaps_exit;
6731 }
6732
db4723b3
MC
6733 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
6734 if (!all)
6735 goto hwrm_func_resc_qcaps_exit;
6736
be0dd9c4
MC
6737 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
6738 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6739 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
6740 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6741 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
6742 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6743 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
6744 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6745 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
6746 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
6747 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
6748 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6749 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
6750 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6751 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
6752 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6753
9c1fabdf
MC
6754 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6755 u16 max_msix = le16_to_cpu(resp->max_msix);
6756
f7588cd8 6757 hw_resc->max_nqs = max_msix;
9c1fabdf
MC
6758 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
6759 }
6760
4673d664
MC
6761 if (BNXT_PF(bp)) {
6762 struct bnxt_pf_info *pf = &bp->pf;
6763
6764 pf->vf_resv_strategy =
6765 le16_to_cpu(resp->vf_reservation_strategy);
bf82736d 6766 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
4673d664
MC
6767 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
6768 }
be0dd9c4
MC
6769hwrm_func_resc_qcaps_exit:
6770 mutex_unlock(&bp->hwrm_cmd_lock);
6771 return rc;
6772}
6773
6774static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
c0c050c5
MC
6775{
6776 int rc = 0;
6777 struct hwrm_func_qcaps_input req = {0};
6778 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6a4f2947
MC
6779 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6780 u32 flags;
c0c050c5
MC
6781
6782 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
6783 req.fid = cpu_to_le16(0xffff);
6784
6785 mutex_lock(&bp->hwrm_cmd_lock);
6786 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6787 if (rc)
6788 goto hwrm_func_qcaps_exit;
6789
6a4f2947
MC
6790 flags = le32_to_cpu(resp->flags);
6791 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
e4060d30 6792 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
6a4f2947 6793 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
e4060d30 6794 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
55e4398d
VV
6795 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
6796 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
6154532f
VV
6797 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
6798 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
e4060d30 6799
7cc5a20e 6800 bp->tx_push_thresh = 0;
6a4f2947 6801 if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
7cc5a20e
MC
6802 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
6803
6a4f2947
MC
6804 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6805 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6806 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6807 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6808 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
6809 if (!hw_resc->max_hw_ring_grps)
6810 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
6811 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6812 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6813 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6814
c0c050c5
MC
6815 if (BNXT_PF(bp)) {
6816 struct bnxt_pf_info *pf = &bp->pf;
6817
6818 pf->fw_fid = le16_to_cpu(resp->fid);
6819 pf->port_id = le16_to_cpu(resp->port_id);
87027db1 6820 bp->dev->dev_port = pf->port_id;
11f15ed3 6821 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
c0c050c5
MC
6822 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
6823 pf->max_vfs = le16_to_cpu(resp->max_vfs);
6824 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
6825 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
6826 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
6827 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
6828 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
6829 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
6a4f2947 6830 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
c1ef146a 6831 bp->flags |= BNXT_FLAG_WOL_CAP;
c0c050c5 6832 } else {
379a80a1 6833#ifdef CONFIG_BNXT_SRIOV
c0c050c5
MC
6834 struct bnxt_vf_info *vf = &bp->vf;
6835
6836 vf->fw_fid = le16_to_cpu(resp->fid);
7cc5a20e 6837 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
379a80a1 6838#endif
c0c050c5
MC
6839 }
6840
c0c050c5
MC
6841hwrm_func_qcaps_exit:
6842 mutex_unlock(&bp->hwrm_cmd_lock);
6843 return rc;
6844}
6845
804fba4e
MC
6846static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
6847
be0dd9c4
MC
6848static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
6849{
6850 int rc;
6851
6852 rc = __bnxt_hwrm_func_qcaps(bp);
6853 if (rc)
6854 return rc;
804fba4e
MC
6855 rc = bnxt_hwrm_queue_qportcfg(bp);
6856 if (rc) {
6857 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
6858 return rc;
6859 }
be0dd9c4 6860 if (bp->hwrm_spec_code >= 0x10803) {
98f04cf0
MC
6861 rc = bnxt_alloc_ctx_mem(bp);
6862 if (rc)
6863 return rc;
db4723b3 6864 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
be0dd9c4 6865 if (!rc)
97381a18 6866 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
be0dd9c4
MC
6867 }
6868 return 0;
6869}
6870
e969ae5b
MC
6871static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
6872{
6873 struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
6874 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
6875 int rc = 0;
6876 u32 flags;
6877
6878 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
6879 return 0;
6880
6881 resp = bp->hwrm_cmd_resp_addr;
6882 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
6883
6884 mutex_lock(&bp->hwrm_cmd_lock);
6885 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6886 if (rc)
6887 goto hwrm_cfa_adv_qcaps_exit;
6888
6889 flags = le32_to_cpu(resp->flags);
6890 if (flags &
6891 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED)
6892 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX;
6893
6894hwrm_cfa_adv_qcaps_exit:
6895 mutex_unlock(&bp->hwrm_cmd_lock);
6896 return rc;
6897}
6898
c0c050c5
MC
6899static int bnxt_hwrm_func_reset(struct bnxt *bp)
6900{
6901 struct hwrm_func_reset_input req = {0};
6902
6903 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
6904 req.enables = 0;
6905
6906 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
6907}
6908
6909static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
6910{
6911 int rc = 0;
6912 struct hwrm_queue_qportcfg_input req = {0};
6913 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
aabfc016
MC
6914 u8 i, j, *qptr;
6915 bool no_rdma;
c0c050c5
MC
6916
6917 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
6918
6919 mutex_lock(&bp->hwrm_cmd_lock);
6920 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6921 if (rc)
6922 goto qportcfg_exit;
6923
6924 if (!resp->max_configurable_queues) {
6925 rc = -EINVAL;
6926 goto qportcfg_exit;
6927 }
6928 bp->max_tc = resp->max_configurable_queues;
87c374de 6929 bp->max_lltc = resp->max_configurable_lossless_queues;
c0c050c5
MC
6930 if (bp->max_tc > BNXT_MAX_QUEUE)
6931 bp->max_tc = BNXT_MAX_QUEUE;
6932
aabfc016
MC
6933 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
6934 qptr = &resp->queue_id0;
6935 for (i = 0, j = 0; i < bp->max_tc; i++) {
98f04cf0
MC
6936 bp->q_info[j].queue_id = *qptr;
6937 bp->q_ids[i] = *qptr++;
aabfc016
MC
6938 bp->q_info[j].queue_profile = *qptr++;
6939 bp->tc_to_qidx[j] = j;
6940 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
6941 (no_rdma && BNXT_PF(bp)))
6942 j++;
6943 }
98f04cf0 6944 bp->max_q = bp->max_tc;
aabfc016
MC
6945 bp->max_tc = max_t(u8, j, 1);
6946
441cabbb
MC
6947 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
6948 bp->max_tc = 1;
6949
87c374de
MC
6950 if (bp->max_lltc > bp->max_tc)
6951 bp->max_lltc = bp->max_tc;
6952
c0c050c5
MC
6953qportcfg_exit:
6954 mutex_unlock(&bp->hwrm_cmd_lock);
6955 return rc;
6956}
6957
6958static int bnxt_hwrm_ver_get(struct bnxt *bp)
6959{
6960 int rc;
6961 struct hwrm_ver_get_input req = {0};
6962 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
e605db80 6963 u32 dev_caps_cfg;
c0c050c5 6964
e6ef2699 6965 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
c0c050c5
MC
6966 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
6967 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
6968 req.hwrm_intf_min = HWRM_VERSION_MINOR;
6969 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
6970 mutex_lock(&bp->hwrm_cmd_lock);
6971 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6972 if (rc)
6973 goto hwrm_ver_get_exit;
6974
6975 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
6976
894aa69a
MC
6977 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
6978 resp->hwrm_intf_min_8b << 8 |
6979 resp->hwrm_intf_upd_8b;
6980 if (resp->hwrm_intf_maj_8b < 1) {
c193554e 6981 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
894aa69a
MC
6982 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
6983 resp->hwrm_intf_upd_8b);
c193554e 6984 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
c0c050c5 6985 }
431aa1eb 6986 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
894aa69a
MC
6987 resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
6988 resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
c0c050c5 6989
691aa620
VV
6990 if (strlen(resp->active_pkg_name)) {
6991 int fw_ver_len = strlen(bp->fw_ver_str);
6992
6993 snprintf(bp->fw_ver_str + fw_ver_len,
6994 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
6995 resp->active_pkg_name);
6996 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
6997 }
6998
ff4fe81d
MC
6999 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
7000 if (!bp->hwrm_cmd_timeout)
7001 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
7002
1dfddc41 7003 if (resp->hwrm_intf_maj_8b >= 1) {
e6ef2699 7004 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
1dfddc41
MC
7005 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
7006 }
7007 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
7008 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
e6ef2699 7009
659c805c 7010 bp->chip_num = le16_to_cpu(resp->chip_num);
3e8060fa
PS
7011 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
7012 !resp->chip_metal)
7013 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
659c805c 7014
e605db80
DK
7015 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
7016 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
7017 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
97381a18 7018 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
e605db80 7019
760b6d33
VD
7020 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
7021 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
7022
abd43a13
VD
7023 if (dev_caps_cfg &
7024 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
7025 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
7026
2a516444
MC
7027 if (dev_caps_cfg &
7028 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
7029 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
7030
e969ae5b
MC
7031 if (dev_caps_cfg &
7032 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
7033 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
7034
c0c050c5
MC
7035hwrm_ver_get_exit:
7036 mutex_unlock(&bp->hwrm_cmd_lock);
7037 return rc;
7038}
7039
5ac67d8b
RS
7040int bnxt_hwrm_fw_set_time(struct bnxt *bp)
7041{
7042 struct hwrm_fw_set_time_input req = {0};
7dfaa7bc
AB
7043 struct tm tm;
7044 time64_t now = ktime_get_real_seconds();
5ac67d8b 7045
ca2c39e2
MC
7046 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
7047 bp->hwrm_spec_code < 0x10400)
5ac67d8b
RS
7048 return -EOPNOTSUPP;
7049
7dfaa7bc 7050 time64_to_tm(now, 0, &tm);
5ac67d8b
RS
7051 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
7052 req.year = cpu_to_le16(1900 + tm.tm_year);
7053 req.month = 1 + tm.tm_mon;
7054 req.day = tm.tm_mday;
7055 req.hour = tm.tm_hour;
7056 req.minute = tm.tm_min;
7057 req.second = tm.tm_sec;
7058 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7059}
7060
3bdf56c4
MC
7061static int bnxt_hwrm_port_qstats(struct bnxt *bp)
7062{
7063 int rc;
7064 struct bnxt_pf_info *pf = &bp->pf;
7065 struct hwrm_port_qstats_input req = {0};
7066
7067 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
7068 return 0;
7069
7070 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
7071 req.port_id = cpu_to_le16(pf->port_id);
7072 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
7073 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
7074 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7075 return rc;
7076}
7077
00db3cba
VV
7078static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
7079{
36e53349 7080 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
e37fed79 7081 struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
00db3cba
VV
7082 struct hwrm_port_qstats_ext_input req = {0};
7083 struct bnxt_pf_info *pf = &bp->pf;
ad361adf 7084 u32 tx_stat_size;
36e53349 7085 int rc;
00db3cba
VV
7086
7087 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
7088 return 0;
7089
7090 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
7091 req.port_id = cpu_to_le16(pf->port_id);
7092 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
7093 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
ad361adf
MC
7094 tx_stat_size = bp->hw_tx_port_stats_ext ?
7095 sizeof(*bp->hw_tx_port_stats_ext) : 0;
7096 req.tx_stat_size = cpu_to_le16(tx_stat_size);
36e53349
MC
7097 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
7098 mutex_lock(&bp->hwrm_cmd_lock);
7099 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7100 if (!rc) {
7101 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
ad361adf
MC
7102 bp->fw_tx_stats_ext_size = tx_stat_size ?
7103 le16_to_cpu(resp->tx_stat_size) / 8 : 0;
36e53349
MC
7104 } else {
7105 bp->fw_rx_stats_ext_size = 0;
7106 bp->fw_tx_stats_ext_size = 0;
7107 }
e37fed79
MC
7108 if (bp->fw_tx_stats_ext_size <=
7109 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
7110 mutex_unlock(&bp->hwrm_cmd_lock);
7111 bp->pri2cos_valid = 0;
7112 return rc;
7113 }
7114
7115 bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
7116 req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
7117
7118 rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
7119 if (!rc) {
7120 struct hwrm_queue_pri2cos_qcfg_output *resp2;
7121 u8 *pri2cos;
7122 int i, j;
7123
7124 resp2 = bp->hwrm_cmd_resp_addr;
7125 pri2cos = &resp2->pri0_cos_queue_id;
7126 for (i = 0; i < 8; i++) {
7127 u8 queue_id = pri2cos[i];
7128
7129 for (j = 0; j < bp->max_q; j++) {
7130 if (bp->q_ids[j] == queue_id)
7131 bp->pri2cos[i] = j;
7132 }
7133 }
7134 bp->pri2cos_valid = 1;
7135 }
36e53349
MC
7136 mutex_unlock(&bp->hwrm_cmd_lock);
7137 return rc;
00db3cba
VV
7138}
7139
55e4398d
VV
7140static int bnxt_hwrm_pcie_qstats(struct bnxt *bp)
7141{
7142 struct hwrm_pcie_qstats_input req = {0};
7143
7144 if (!(bp->flags & BNXT_FLAG_PCIE_STATS))
7145 return 0;
7146
7147 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
7148 req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats));
7149 req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map);
7150 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7151}
7152
c0c050c5
MC
7153static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
7154{
7155 if (bp->vxlan_port_cnt) {
7156 bnxt_hwrm_tunnel_dst_port_free(
7157 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
7158 }
7159 bp->vxlan_port_cnt = 0;
7160 if (bp->nge_port_cnt) {
7161 bnxt_hwrm_tunnel_dst_port_free(
7162 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
7163 }
7164 bp->nge_port_cnt = 0;
7165}
7166
7167static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
7168{
7169 int rc, i;
7170 u32 tpa_flags = 0;
7171
7172 if (set_tpa)
7173 tpa_flags = bp->flags & BNXT_FLAG_TPA;
7174 for (i = 0; i < bp->nr_vnics; i++) {
7175 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
7176 if (rc) {
7177 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
23e12c89 7178 i, rc);
c0c050c5
MC
7179 return rc;
7180 }
7181 }
7182 return 0;
7183}
7184
7185static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
7186{
7187 int i;
7188
7189 for (i = 0; i < bp->nr_vnics; i++)
7190 bnxt_hwrm_vnic_set_rss(bp, i, false);
7191}
7192
7193static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
7194 bool irq_re_init)
7195{
7196 if (bp->vnic_info) {
7197 bnxt_hwrm_clear_vnic_filter(bp);
7198 /* clear all RSS setting before free vnic ctx */
7199 bnxt_hwrm_clear_vnic_rss(bp);
7200 bnxt_hwrm_vnic_ctx_free(bp);
7201 /* before free the vnic, undo the vnic tpa settings */
7202 if (bp->flags & BNXT_FLAG_TPA)
7203 bnxt_set_tpa(bp, false);
7204 bnxt_hwrm_vnic_free(bp);
7205 }
7206 bnxt_hwrm_ring_free(bp, close_path);
7207 bnxt_hwrm_ring_grp_free(bp);
7208 if (irq_re_init) {
7209 bnxt_hwrm_stat_ctx_free(bp);
7210 bnxt_hwrm_free_tunnel_ports(bp);
7211 }
7212}
7213
39d8ba2e
MC
7214static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
7215{
7216 struct hwrm_func_cfg_input req = {0};
7217 int rc;
7218
7219 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
7220 req.fid = cpu_to_le16(0xffff);
7221 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
7222 if (br_mode == BRIDGE_MODE_VEB)
7223 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
7224 else if (br_mode == BRIDGE_MODE_VEPA)
7225 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
7226 else
7227 return -EINVAL;
7228 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7229 if (rc)
7230 rc = -EIO;
7231 return rc;
7232}
7233
c3480a60
MC
7234static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
7235{
7236 struct hwrm_func_cfg_input req = {0};
7237 int rc;
7238
7239 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
7240 return 0;
7241
7242 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
7243 req.fid = cpu_to_le16(0xffff);
7244 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
d4f52de0 7245 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
c3480a60 7246 if (size == 128)
d4f52de0 7247 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
c3480a60
MC
7248
7249 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7250 if (rc)
7251 rc = -EIO;
7252 return rc;
7253}
7254
7b3af4f7 7255static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
c0c050c5 7256{
ae10ae74 7257 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
c0c050c5
MC
7258 int rc;
7259
ae10ae74
MC
7260 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
7261 goto skip_rss_ctx;
7262
c0c050c5 7263 /* allocate context for vnic */
94ce9caa 7264 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
c0c050c5
MC
7265 if (rc) {
7266 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
7267 vnic_id, rc);
7268 goto vnic_setup_err;
7269 }
7270 bp->rsscos_nr_ctxs++;
7271
94ce9caa
PS
7272 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7273 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
7274 if (rc) {
7275 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
7276 vnic_id, rc);
7277 goto vnic_setup_err;
7278 }
7279 bp->rsscos_nr_ctxs++;
7280 }
7281
ae10ae74 7282skip_rss_ctx:
c0c050c5
MC
7283 /* configure default vnic, ring grp */
7284 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
7285 if (rc) {
7286 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
7287 vnic_id, rc);
7288 goto vnic_setup_err;
7289 }
7290
7291 /* Enable RSS hashing on vnic */
7292 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
7293 if (rc) {
7294 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
7295 vnic_id, rc);
7296 goto vnic_setup_err;
7297 }
7298
7299 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7300 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
7301 if (rc) {
7302 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
7303 vnic_id, rc);
7304 }
7305 }
7306
7307vnic_setup_err:
7308 return rc;
7309}
7310
7b3af4f7
MC
7311static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
7312{
7313 int rc, i, nr_ctxs;
7314
7315 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
7316 for (i = 0; i < nr_ctxs; i++) {
7317 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
7318 if (rc) {
7319 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
7320 vnic_id, i, rc);
7321 break;
7322 }
7323 bp->rsscos_nr_ctxs++;
7324 }
7325 if (i < nr_ctxs)
7326 return -ENOMEM;
7327
7328 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
7329 if (rc) {
7330 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
7331 vnic_id, rc);
7332 return rc;
7333 }
7334 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
7335 if (rc) {
7336 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
7337 vnic_id, rc);
7338 return rc;
7339 }
7340 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7341 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
7342 if (rc) {
7343 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
7344 vnic_id, rc);
7345 }
7346 }
7347 return rc;
7348}
7349
7350static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
7351{
7352 if (bp->flags & BNXT_FLAG_CHIP_P5)
7353 return __bnxt_setup_vnic_p5(bp, vnic_id);
7354 else
7355 return __bnxt_setup_vnic(bp, vnic_id);
7356}
7357
c0c050c5
MC
7358static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
7359{
7360#ifdef CONFIG_RFS_ACCEL
7361 int i, rc = 0;
7362
9b3d15e6
MC
7363 if (bp->flags & BNXT_FLAG_CHIP_P5)
7364 return 0;
7365
c0c050c5 7366 for (i = 0; i < bp->rx_nr_rings; i++) {
ae10ae74 7367 struct bnxt_vnic_info *vnic;
c0c050c5
MC
7368 u16 vnic_id = i + 1;
7369 u16 ring_id = i;
7370
7371 if (vnic_id >= bp->nr_vnics)
7372 break;
7373
ae10ae74
MC
7374 vnic = &bp->vnic_info[vnic_id];
7375 vnic->flags |= BNXT_VNIC_RFS_FLAG;
7376 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
7377 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
b81a90d3 7378 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
c0c050c5
MC
7379 if (rc) {
7380 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
7381 vnic_id, rc);
7382 break;
7383 }
7384 rc = bnxt_setup_vnic(bp, vnic_id);
7385 if (rc)
7386 break;
7387 }
7388 return rc;
7389#else
7390 return 0;
7391#endif
7392}
7393
17c71ac3
MC
7394/* Allow PF and VF with default VLAN to be in promiscuous mode */
7395static bool bnxt_promisc_ok(struct bnxt *bp)
7396{
7397#ifdef CONFIG_BNXT_SRIOV
7398 if (BNXT_VF(bp) && !bp->vf.vlan)
7399 return false;
7400#endif
7401 return true;
7402}
7403
dc52c6c7
PS
7404static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
7405{
7406 unsigned int rc = 0;
7407
7408 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
7409 if (rc) {
7410 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7411 rc);
7412 return rc;
7413 }
7414
7415 rc = bnxt_hwrm_vnic_cfg(bp, 1);
7416 if (rc) {
7417 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7418 rc);
7419 return rc;
7420 }
7421 return rc;
7422}
7423
b664f008 7424static int bnxt_cfg_rx_mode(struct bnxt *);
7d2837dd 7425static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
b664f008 7426
c0c050c5
MC
7427static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
7428{
7d2837dd 7429 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
c0c050c5 7430 int rc = 0;
76595193 7431 unsigned int rx_nr_rings = bp->rx_nr_rings;
c0c050c5
MC
7432
7433 if (irq_re_init) {
7434 rc = bnxt_hwrm_stat_ctx_alloc(bp);
7435 if (rc) {
7436 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
7437 rc);
7438 goto err_out;
7439 }
7440 }
7441
7442 rc = bnxt_hwrm_ring_alloc(bp);
7443 if (rc) {
7444 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
7445 goto err_out;
7446 }
7447
7448 rc = bnxt_hwrm_ring_grp_alloc(bp);
7449 if (rc) {
7450 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
7451 goto err_out;
7452 }
7453
76595193
PS
7454 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7455 rx_nr_rings--;
7456
c0c050c5 7457 /* default vnic 0 */
76595193 7458 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
c0c050c5
MC
7459 if (rc) {
7460 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
7461 goto err_out;
7462 }
7463
7464 rc = bnxt_setup_vnic(bp, 0);
7465 if (rc)
7466 goto err_out;
7467
7468 if (bp->flags & BNXT_FLAG_RFS) {
7469 rc = bnxt_alloc_rfs_vnics(bp);
7470 if (rc)
7471 goto err_out;
7472 }
7473
7474 if (bp->flags & BNXT_FLAG_TPA) {
7475 rc = bnxt_set_tpa(bp, true);
7476 if (rc)
7477 goto err_out;
7478 }
7479
7480 if (BNXT_VF(bp))
7481 bnxt_update_vf_mac(bp);
7482
7483 /* Filter for default vnic 0 */
7484 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
7485 if (rc) {
7486 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
7487 goto err_out;
7488 }
7d2837dd 7489 vnic->uc_filter_count = 1;
c0c050c5 7490
30e33848
MC
7491 vnic->rx_mask = 0;
7492 if (bp->dev->flags & IFF_BROADCAST)
7493 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
c0c050c5 7494
17c71ac3 7495 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
7d2837dd
MC
7496 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
7497
7498 if (bp->dev->flags & IFF_ALLMULTI) {
7499 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
7500 vnic->mc_list_count = 0;
7501 } else {
7502 u32 mask = 0;
7503
7504 bnxt_mc_list_updated(bp, &mask);
7505 vnic->rx_mask |= mask;
7506 }
c0c050c5 7507
b664f008
MC
7508 rc = bnxt_cfg_rx_mode(bp);
7509 if (rc)
c0c050c5 7510 goto err_out;
c0c050c5
MC
7511
7512 rc = bnxt_hwrm_set_coal(bp);
7513 if (rc)
7514 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
dc52c6c7
PS
7515 rc);
7516
7517 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7518 rc = bnxt_setup_nitroa0_vnic(bp);
7519 if (rc)
7520 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
7521 rc);
7522 }
c0c050c5 7523
cf6645f8
MC
7524 if (BNXT_VF(bp)) {
7525 bnxt_hwrm_func_qcfg(bp);
7526 netdev_update_features(bp->dev);
7527 }
7528
c0c050c5
MC
7529 return 0;
7530
7531err_out:
7532 bnxt_hwrm_resource_free(bp, 0, true);
7533
7534 return rc;
7535}
7536
7537static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
7538{
7539 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
7540 return 0;
7541}
7542
7543static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
7544{
2247925f 7545 bnxt_init_cp_rings(bp);
c0c050c5
MC
7546 bnxt_init_rx_rings(bp);
7547 bnxt_init_tx_rings(bp);
7548 bnxt_init_ring_grps(bp, irq_re_init);
7549 bnxt_init_vnics(bp);
7550
7551 return bnxt_init_chip(bp, irq_re_init);
7552}
7553
c0c050c5
MC
7554static int bnxt_set_real_num_queues(struct bnxt *bp)
7555{
7556 int rc;
7557 struct net_device *dev = bp->dev;
7558
5f449249
MC
7559 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
7560 bp->tx_nr_rings_xdp);
c0c050c5
MC
7561 if (rc)
7562 return rc;
7563
7564 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
7565 if (rc)
7566 return rc;
7567
7568#ifdef CONFIG_RFS_ACCEL
45019a18 7569 if (bp->flags & BNXT_FLAG_RFS)
c0c050c5 7570 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
c0c050c5
MC
7571#endif
7572
7573 return rc;
7574}
7575
6e6c5a57
MC
7576static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7577 bool shared)
7578{
7579 int _rx = *rx, _tx = *tx;
7580
7581 if (shared) {
7582 *rx = min_t(int, _rx, max);
7583 *tx = min_t(int, _tx, max);
7584 } else {
7585 if (max < 2)
7586 return -ENOMEM;
7587
7588 while (_rx + _tx > max) {
7589 if (_rx > _tx && _rx > 1)
7590 _rx--;
7591 else if (_tx > 1)
7592 _tx--;
7593 }
7594 *rx = _rx;
7595 *tx = _tx;
7596 }
7597 return 0;
7598}
7599
7809592d
MC
7600static void bnxt_setup_msix(struct bnxt *bp)
7601{
7602 const int len = sizeof(bp->irq_tbl[0].name);
7603 struct net_device *dev = bp->dev;
7604 int tcs, i;
7605
7606 tcs = netdev_get_num_tc(dev);
7607 if (tcs > 1) {
d1e7925e 7608 int i, off, count;
7809592d 7609
d1e7925e
MC
7610 for (i = 0; i < tcs; i++) {
7611 count = bp->tx_nr_rings_per_tc;
7612 off = i * count;
7613 netdev_set_tc_queue(dev, i, count, off);
7809592d
MC
7614 }
7615 }
7616
7617 for (i = 0; i < bp->cp_nr_rings; i++) {
e5811b8c 7618 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7809592d
MC
7619 char *attr;
7620
7621 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7622 attr = "TxRx";
7623 else if (i < bp->rx_nr_rings)
7624 attr = "rx";
7625 else
7626 attr = "tx";
7627
e5811b8c
MC
7628 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
7629 attr, i);
7630 bp->irq_tbl[map_idx].handler = bnxt_msix;
7809592d
MC
7631 }
7632}
7633
7634static void bnxt_setup_inta(struct bnxt *bp)
7635{
7636 const int len = sizeof(bp->irq_tbl[0].name);
7637
7638 if (netdev_get_num_tc(bp->dev))
7639 netdev_reset_tc(bp->dev);
7640
7641 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
7642 0);
7643 bp->irq_tbl[0].handler = bnxt_inta;
7644}
7645
7646static int bnxt_setup_int_mode(struct bnxt *bp)
7647{
7648 int rc;
7649
7650 if (bp->flags & BNXT_FLAG_USING_MSIX)
7651 bnxt_setup_msix(bp);
7652 else
7653 bnxt_setup_inta(bp);
7654
7655 rc = bnxt_set_real_num_queues(bp);
7656 return rc;
7657}
7658
b7429954 7659#ifdef CONFIG_RFS_ACCEL
8079e8f1
MC
7660static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
7661{
6a4f2947 7662 return bp->hw_resc.max_rsscos_ctxs;
8079e8f1
MC
7663}
7664
7665static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
7666{
6a4f2947 7667 return bp->hw_resc.max_vnics;
8079e8f1 7668}
b7429954 7669#endif
8079e8f1 7670
e4060d30
MC
7671unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
7672{
6a4f2947 7673 return bp->hw_resc.max_stat_ctxs;
e4060d30
MC
7674}
7675
7676unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
7677{
6a4f2947 7678 return bp->hw_resc.max_cp_rings;
e4060d30
MC
7679}
7680
e916b081 7681static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
a588e458 7682{
c0b8cda0
MC
7683 unsigned int cp = bp->hw_resc.max_cp_rings;
7684
7685 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
7686 cp -= bnxt_get_ulp_msix_num(bp);
7687
7688 return cp;
a588e458
MC
7689}
7690
ad95c27b 7691static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
7809592d 7692{
6a4f2947
MC
7693 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7694
f7588cd8
MC
7695 if (bp->flags & BNXT_FLAG_CHIP_P5)
7696 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
7697
6a4f2947 7698 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
7809592d
MC
7699}
7700
30f52947 7701static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
33c2657e 7702{
6a4f2947 7703 bp->hw_resc.max_irqs = max_irqs;
33c2657e
MC
7704}
7705
e916b081
MC
7706unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
7707{
7708 unsigned int cp;
7709
7710 cp = bnxt_get_max_func_cp_rings_for_en(bp);
7711 if (bp->flags & BNXT_FLAG_CHIP_P5)
7712 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
7713 else
7714 return cp - bp->cp_nr_rings;
7715}
7716
c027c6b4
VV
7717unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
7718{
d77b1ad8 7719 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
c027c6b4
VV
7720}
7721
fbcfc8e4
MC
7722int bnxt_get_avail_msix(struct bnxt *bp, int num)
7723{
7724 int max_cp = bnxt_get_max_func_cp_rings(bp);
7725 int max_irq = bnxt_get_max_func_irqs(bp);
7726 int total_req = bp->cp_nr_rings + num;
7727 int max_idx, avail_msix;
7728
75720e63
MC
7729 max_idx = bp->total_irqs;
7730 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
7731 max_idx = min_t(int, bp->total_irqs, max_cp);
fbcfc8e4 7732 avail_msix = max_idx - bp->cp_nr_rings;
f1ca94de 7733 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
fbcfc8e4
MC
7734 return avail_msix;
7735
7736 if (max_irq < total_req) {
7737 num = max_irq - bp->cp_nr_rings;
7738 if (num <= 0)
7739 return 0;
7740 }
7741 return num;
7742}
7743
08654eb2
MC
7744static int bnxt_get_num_msix(struct bnxt *bp)
7745{
f1ca94de 7746 if (!BNXT_NEW_RM(bp))
08654eb2
MC
7747 return bnxt_get_max_func_irqs(bp);
7748
c0b8cda0 7749 return bnxt_nq_rings_in_use(bp);
08654eb2
MC
7750}
7751
7809592d 7752static int bnxt_init_msix(struct bnxt *bp)
c0c050c5 7753{
fbcfc8e4 7754 int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
7809592d 7755 struct msix_entry *msix_ent;
c0c050c5 7756
08654eb2
MC
7757 total_vecs = bnxt_get_num_msix(bp);
7758 max = bnxt_get_max_func_irqs(bp);
7759 if (total_vecs > max)
7760 total_vecs = max;
7761
2773dfb2
MC
7762 if (!total_vecs)
7763 return 0;
7764
c0c050c5
MC
7765 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
7766 if (!msix_ent)
7767 return -ENOMEM;
7768
7769 for (i = 0; i < total_vecs; i++) {
7770 msix_ent[i].entry = i;
7771 msix_ent[i].vector = 0;
7772 }
7773
01657bcd
MC
7774 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
7775 min = 2;
7776
7777 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
fbcfc8e4
MC
7778 ulp_msix = bnxt_get_ulp_msix_num(bp);
7779 if (total_vecs < 0 || total_vecs < ulp_msix) {
c0c050c5
MC
7780 rc = -ENODEV;
7781 goto msix_setup_exit;
7782 }
7783
7784 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
7785 if (bp->irq_tbl) {
7809592d
MC
7786 for (i = 0; i < total_vecs; i++)
7787 bp->irq_tbl[i].vector = msix_ent[i].vector;
c0c050c5 7788
7809592d 7789 bp->total_irqs = total_vecs;
c0c050c5 7790 /* Trim rings based upon num of vectors allocated */
6e6c5a57 7791 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
fbcfc8e4 7792 total_vecs - ulp_msix, min == 1);
6e6c5a57
MC
7793 if (rc)
7794 goto msix_setup_exit;
7795
7809592d
MC
7796 bp->cp_nr_rings = (min == 1) ?
7797 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
7798 bp->tx_nr_rings + bp->rx_nr_rings;
c0c050c5 7799
c0c050c5
MC
7800 } else {
7801 rc = -ENOMEM;
7802 goto msix_setup_exit;
7803 }
7804 bp->flags |= BNXT_FLAG_USING_MSIX;
7805 kfree(msix_ent);
7806 return 0;
7807
7808msix_setup_exit:
7809592d
MC
7809 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
7810 kfree(bp->irq_tbl);
7811 bp->irq_tbl = NULL;
c0c050c5
MC
7812 pci_disable_msix(bp->pdev);
7813 kfree(msix_ent);
7814 return rc;
7815}
7816
7809592d 7817static int bnxt_init_inta(struct bnxt *bp)
c0c050c5 7818{
c0c050c5 7819 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
7809592d
MC
7820 if (!bp->irq_tbl)
7821 return -ENOMEM;
7822
7823 bp->total_irqs = 1;
c0c050c5
MC
7824 bp->rx_nr_rings = 1;
7825 bp->tx_nr_rings = 1;
7826 bp->cp_nr_rings = 1;
01657bcd 7827 bp->flags |= BNXT_FLAG_SHARED_RINGS;
c0c050c5 7828 bp->irq_tbl[0].vector = bp->pdev->irq;
7809592d 7829 return 0;
c0c050c5
MC
7830}
7831
7809592d 7832static int bnxt_init_int_mode(struct bnxt *bp)
c0c050c5
MC
7833{
7834 int rc = 0;
7835
7836 if (bp->flags & BNXT_FLAG_MSIX_CAP)
7809592d 7837 rc = bnxt_init_msix(bp);
c0c050c5 7838
1fa72e29 7839 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
c0c050c5 7840 /* fallback to INTA */
7809592d 7841 rc = bnxt_init_inta(bp);
c0c050c5
MC
7842 }
7843 return rc;
7844}
7845
7809592d
MC
7846static void bnxt_clear_int_mode(struct bnxt *bp)
7847{
7848 if (bp->flags & BNXT_FLAG_USING_MSIX)
7849 pci_disable_msix(bp->pdev);
7850
7851 kfree(bp->irq_tbl);
7852 bp->irq_tbl = NULL;
7853 bp->flags &= ~BNXT_FLAG_USING_MSIX;
7854}
7855
1b3f0b75 7856int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
674f50a5 7857{
674f50a5 7858 int tcs = netdev_get_num_tc(bp->dev);
1b3f0b75 7859 bool irq_cleared = false;
674f50a5
MC
7860 int rc;
7861
7862 if (!bnxt_need_reserve_rings(bp))
7863 return 0;
7864
1b3f0b75
MC
7865 if (irq_re_init && BNXT_NEW_RM(bp) &&
7866 bnxt_get_num_msix(bp) != bp->total_irqs) {
ec86f14e 7867 bnxt_ulp_irq_stop(bp);
674f50a5 7868 bnxt_clear_int_mode(bp);
1b3f0b75 7869 irq_cleared = true;
36d65be9
MC
7870 }
7871 rc = __bnxt_reserve_rings(bp);
1b3f0b75 7872 if (irq_cleared) {
36d65be9
MC
7873 if (!rc)
7874 rc = bnxt_init_int_mode(bp);
ec86f14e 7875 bnxt_ulp_irq_restart(bp, rc);
36d65be9
MC
7876 }
7877 if (rc) {
7878 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
7879 return rc;
674f50a5
MC
7880 }
7881 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
7882 netdev_err(bp->dev, "tx ring reservation failure\n");
7883 netdev_reset_tc(bp->dev);
7884 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
7885 return -ENOMEM;
7886 }
674f50a5
MC
7887 return 0;
7888}
7889
c0c050c5
MC
7890static void bnxt_free_irq(struct bnxt *bp)
7891{
7892 struct bnxt_irq *irq;
7893 int i;
7894
7895#ifdef CONFIG_RFS_ACCEL
7896 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
7897 bp->dev->rx_cpu_rmap = NULL;
7898#endif
cb98526b 7899 if (!bp->irq_tbl || !bp->bnapi)
c0c050c5
MC
7900 return;
7901
7902 for (i = 0; i < bp->cp_nr_rings; i++) {
e5811b8c
MC
7903 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7904
7905 irq = &bp->irq_tbl[map_idx];
56f0fd80
VV
7906 if (irq->requested) {
7907 if (irq->have_cpumask) {
7908 irq_set_affinity_hint(irq->vector, NULL);
7909 free_cpumask_var(irq->cpu_mask);
7910 irq->have_cpumask = 0;
7911 }
c0c050c5 7912 free_irq(irq->vector, bp->bnapi[i]);
56f0fd80
VV
7913 }
7914
c0c050c5
MC
7915 irq->requested = 0;
7916 }
c0c050c5
MC
7917}
7918
7919static int bnxt_request_irq(struct bnxt *bp)
7920{
b81a90d3 7921 int i, j, rc = 0;
c0c050c5
MC
7922 unsigned long flags = 0;
7923#ifdef CONFIG_RFS_ACCEL
e5811b8c 7924 struct cpu_rmap *rmap;
c0c050c5
MC
7925#endif
7926
e5811b8c
MC
7927 rc = bnxt_setup_int_mode(bp);
7928 if (rc) {
7929 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
7930 rc);
7931 return rc;
7932 }
7933#ifdef CONFIG_RFS_ACCEL
7934 rmap = bp->dev->rx_cpu_rmap;
7935#endif
c0c050c5
MC
7936 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
7937 flags = IRQF_SHARED;
7938
b81a90d3 7939 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
e5811b8c
MC
7940 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7941 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
7942
c0c050c5 7943#ifdef CONFIG_RFS_ACCEL
b81a90d3 7944 if (rmap && bp->bnapi[i]->rx_ring) {
c0c050c5
MC
7945 rc = irq_cpu_rmap_add(rmap, irq->vector);
7946 if (rc)
7947 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
b81a90d3
MC
7948 j);
7949 j++;
c0c050c5
MC
7950 }
7951#endif
7952 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
7953 bp->bnapi[i]);
7954 if (rc)
7955 break;
7956
7957 irq->requested = 1;
56f0fd80
VV
7958
7959 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
7960 int numa_node = dev_to_node(&bp->pdev->dev);
7961
7962 irq->have_cpumask = 1;
7963 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
7964 irq->cpu_mask);
7965 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
7966 if (rc) {
7967 netdev_warn(bp->dev,
7968 "Set affinity failed, IRQ = %d\n",
7969 irq->vector);
7970 break;
7971 }
7972 }
c0c050c5
MC
7973 }
7974 return rc;
7975}
7976
7977static void bnxt_del_napi(struct bnxt *bp)
7978{
7979 int i;
7980
7981 if (!bp->bnapi)
7982 return;
7983
7984 for (i = 0; i < bp->cp_nr_rings; i++) {
7985 struct bnxt_napi *bnapi = bp->bnapi[i];
7986
7987 napi_hash_del(&bnapi->napi);
7988 netif_napi_del(&bnapi->napi);
7989 }
e5f6f564
ED
7990 /* We called napi_hash_del() before netif_napi_del(), we need
7991 * to respect an RCU grace period before freeing napi structures.
7992 */
7993 synchronize_net();
c0c050c5
MC
7994}
7995
7996static void bnxt_init_napi(struct bnxt *bp)
7997{
7998 int i;
10bbdaf5 7999 unsigned int cp_nr_rings = bp->cp_nr_rings;
c0c050c5
MC
8000 struct bnxt_napi *bnapi;
8001
8002 if (bp->flags & BNXT_FLAG_USING_MSIX) {
0fcec985
MC
8003 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
8004
8005 if (bp->flags & BNXT_FLAG_CHIP_P5)
8006 poll_fn = bnxt_poll_p5;
8007 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
10bbdaf5
PS
8008 cp_nr_rings--;
8009 for (i = 0; i < cp_nr_rings; i++) {
c0c050c5 8010 bnapi = bp->bnapi[i];
0fcec985 8011 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
c0c050c5 8012 }
10bbdaf5
PS
8013 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8014 bnapi = bp->bnapi[cp_nr_rings];
8015 netif_napi_add(bp->dev, &bnapi->napi,
8016 bnxt_poll_nitroa0, 64);
10bbdaf5 8017 }
c0c050c5
MC
8018 } else {
8019 bnapi = bp->bnapi[0];
8020 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
c0c050c5
MC
8021 }
8022}
8023
8024static void bnxt_disable_napi(struct bnxt *bp)
8025{
8026 int i;
8027
8028 if (!bp->bnapi)
8029 return;
8030
0bc0b97f
AG
8031 for (i = 0; i < bp->cp_nr_rings; i++) {
8032 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
8033
8034 if (bp->bnapi[i]->rx_ring)
8035 cancel_work_sync(&cpr->dim.work);
8036
c0c050c5 8037 napi_disable(&bp->bnapi[i]->napi);
0bc0b97f 8038 }
c0c050c5
MC
8039}
8040
8041static void bnxt_enable_napi(struct bnxt *bp)
8042{
8043 int i;
8044
8045 for (i = 0; i < bp->cp_nr_rings; i++) {
6a8788f2 8046 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
fa7e2812 8047 bp->bnapi[i]->in_reset = false;
6a8788f2
AG
8048
8049 if (bp->bnapi[i]->rx_ring) {
8050 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
c002bd52 8051 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
6a8788f2 8052 }
c0c050c5
MC
8053 napi_enable(&bp->bnapi[i]->napi);
8054 }
8055}
8056
7df4ae9f 8057void bnxt_tx_disable(struct bnxt *bp)
c0c050c5
MC
8058{
8059 int i;
c0c050c5 8060 struct bnxt_tx_ring_info *txr;
c0c050c5 8061
b6ab4b01 8062 if (bp->tx_ring) {
c0c050c5 8063 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 8064 txr = &bp->tx_ring[i];
c0c050c5 8065 txr->dev_state = BNXT_DEV_STATE_CLOSING;
c0c050c5
MC
8066 }
8067 }
8068 /* Stop all TX queues */
8069 netif_tx_disable(bp->dev);
8070 netif_carrier_off(bp->dev);
8071}
8072
7df4ae9f 8073void bnxt_tx_enable(struct bnxt *bp)
c0c050c5
MC
8074{
8075 int i;
c0c050c5 8076 struct bnxt_tx_ring_info *txr;
c0c050c5
MC
8077
8078 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 8079 txr = &bp->tx_ring[i];
c0c050c5
MC
8080 txr->dev_state = 0;
8081 }
8082 netif_tx_wake_all_queues(bp->dev);
8083 if (bp->link_info.link_up)
8084 netif_carrier_on(bp->dev);
8085}
8086
8087static void bnxt_report_link(struct bnxt *bp)
8088{
8089 if (bp->link_info.link_up) {
8090 const char *duplex;
8091 const char *flow_ctrl;
38a21b34
DK
8092 u32 speed;
8093 u16 fec;
c0c050c5
MC
8094
8095 netif_carrier_on(bp->dev);
8096 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
8097 duplex = "full";
8098 else
8099 duplex = "half";
8100 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
8101 flow_ctrl = "ON - receive & transmit";
8102 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
8103 flow_ctrl = "ON - transmit";
8104 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
8105 flow_ctrl = "ON - receive";
8106 else
8107 flow_ctrl = "none";
8108 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
38a21b34 8109 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
c0c050c5 8110 speed, duplex, flow_ctrl);
170ce013
MC
8111 if (bp->flags & BNXT_FLAG_EEE_CAP)
8112 netdev_info(bp->dev, "EEE is %s\n",
8113 bp->eee.eee_active ? "active" :
8114 "not active");
e70c752f
MC
8115 fec = bp->link_info.fec_cfg;
8116 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
8117 netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
8118 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
8119 (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
8120 (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
c0c050c5
MC
8121 } else {
8122 netif_carrier_off(bp->dev);
8123 netdev_err(bp->dev, "NIC Link is Down\n");
8124 }
8125}
8126
170ce013
MC
8127static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
8128{
8129 int rc = 0;
8130 struct hwrm_port_phy_qcaps_input req = {0};
8131 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
93ed8117 8132 struct bnxt_link_info *link_info = &bp->link_info;
170ce013
MC
8133
8134 if (bp->hwrm_spec_code < 0x10201)
8135 return 0;
8136
8137 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
8138
8139 mutex_lock(&bp->hwrm_cmd_lock);
8140 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8141 if (rc)
8142 goto hwrm_phy_qcaps_exit;
8143
acb20054 8144 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
170ce013
MC
8145 struct ethtool_eee *eee = &bp->eee;
8146 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
8147
8148 bp->flags |= BNXT_FLAG_EEE_CAP;
8149 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8150 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
8151 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
8152 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
8153 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
8154 }
55fd0cf3
MC
8155 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
8156 if (bp->test_info)
8157 bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
8158 }
520ad89a
MC
8159 if (resp->supported_speeds_auto_mode)
8160 link_info->support_auto_speeds =
8161 le16_to_cpu(resp->supported_speeds_auto_mode);
170ce013 8162
d5430d31
MC
8163 bp->port_count = resp->port_cnt;
8164
170ce013
MC
8165hwrm_phy_qcaps_exit:
8166 mutex_unlock(&bp->hwrm_cmd_lock);
8167 return rc;
8168}
8169
c0c050c5
MC
8170static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
8171{
8172 int rc = 0;
8173 struct bnxt_link_info *link_info = &bp->link_info;
8174 struct hwrm_port_phy_qcfg_input req = {0};
8175 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8176 u8 link_up = link_info->link_up;
286ef9d6 8177 u16 diff;
c0c050c5
MC
8178
8179 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
8180
8181 mutex_lock(&bp->hwrm_cmd_lock);
8182 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8183 if (rc) {
8184 mutex_unlock(&bp->hwrm_cmd_lock);
8185 return rc;
8186 }
8187
8188 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
8189 link_info->phy_link_status = resp->link;
acb20054
MC
8190 link_info->duplex = resp->duplex_cfg;
8191 if (bp->hwrm_spec_code >= 0x10800)
8192 link_info->duplex = resp->duplex_state;
c0c050c5
MC
8193 link_info->pause = resp->pause;
8194 link_info->auto_mode = resp->auto_mode;
8195 link_info->auto_pause_setting = resp->auto_pause;
3277360e 8196 link_info->lp_pause = resp->link_partner_adv_pause;
c0c050c5 8197 link_info->force_pause_setting = resp->force_pause;
acb20054 8198 link_info->duplex_setting = resp->duplex_cfg;
c0c050c5
MC
8199 if (link_info->phy_link_status == BNXT_LINK_LINK)
8200 link_info->link_speed = le16_to_cpu(resp->link_speed);
8201 else
8202 link_info->link_speed = 0;
8203 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
c0c050c5
MC
8204 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
8205 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
3277360e
MC
8206 link_info->lp_auto_link_speeds =
8207 le16_to_cpu(resp->link_partner_adv_speeds);
c0c050c5
MC
8208 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
8209 link_info->phy_ver[0] = resp->phy_maj;
8210 link_info->phy_ver[1] = resp->phy_min;
8211 link_info->phy_ver[2] = resp->phy_bld;
8212 link_info->media_type = resp->media_type;
03efbec0 8213 link_info->phy_type = resp->phy_type;
11f15ed3 8214 link_info->transceiver = resp->xcvr_pkg_type;
170ce013
MC
8215 link_info->phy_addr = resp->eee_config_phy_addr &
8216 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
42ee18fe 8217 link_info->module_status = resp->module_status;
170ce013
MC
8218
8219 if (bp->flags & BNXT_FLAG_EEE_CAP) {
8220 struct ethtool_eee *eee = &bp->eee;
8221 u16 fw_speeds;
8222
8223 eee->eee_active = 0;
8224 if (resp->eee_config_phy_addr &
8225 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
8226 eee->eee_active = 1;
8227 fw_speeds = le16_to_cpu(
8228 resp->link_partner_adv_eee_link_speed_mask);
8229 eee->lp_advertised =
8230 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8231 }
8232
8233 /* Pull initial EEE config */
8234 if (!chng_link_state) {
8235 if (resp->eee_config_phy_addr &
8236 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
8237 eee->eee_enabled = 1;
c0c050c5 8238
170ce013
MC
8239 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
8240 eee->advertised =
8241 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8242
8243 if (resp->eee_config_phy_addr &
8244 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
8245 __le32 tmr;
8246
8247 eee->tx_lpi_enabled = 1;
8248 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
8249 eee->tx_lpi_timer = le32_to_cpu(tmr) &
8250 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
8251 }
8252 }
8253 }
e70c752f
MC
8254
8255 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
8256 if (bp->hwrm_spec_code >= 0x10504)
8257 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
8258
c0c050c5
MC
8259 /* TODO: need to add more logic to report VF link */
8260 if (chng_link_state) {
8261 if (link_info->phy_link_status == BNXT_LINK_LINK)
8262 link_info->link_up = 1;
8263 else
8264 link_info->link_up = 0;
8265 if (link_up != link_info->link_up)
8266 bnxt_report_link(bp);
8267 } else {
8268 /* alwasy link down if not require to update link state */
8269 link_info->link_up = 0;
8270 }
8271 mutex_unlock(&bp->hwrm_cmd_lock);
286ef9d6 8272
dac04907
MC
8273 if (!BNXT_SINGLE_PF(bp))
8274 return 0;
8275
286ef9d6
MC
8276 diff = link_info->support_auto_speeds ^ link_info->advertising;
8277 if ((link_info->support_auto_speeds | diff) !=
8278 link_info->support_auto_speeds) {
8279 /* An advertised speed is no longer supported, so we need to
0eaa24b9
MC
8280 * update the advertisement settings. Caller holds RTNL
8281 * so we can modify link settings.
286ef9d6 8282 */
286ef9d6 8283 link_info->advertising = link_info->support_auto_speeds;
0eaa24b9 8284 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
286ef9d6 8285 bnxt_hwrm_set_link_setting(bp, true, false);
286ef9d6 8286 }
c0c050c5
MC
8287 return 0;
8288}
8289
10289bec
MC
8290static void bnxt_get_port_module_status(struct bnxt *bp)
8291{
8292 struct bnxt_link_info *link_info = &bp->link_info;
8293 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
8294 u8 module_status;
8295
8296 if (bnxt_update_link(bp, true))
8297 return;
8298
8299 module_status = link_info->module_status;
8300 switch (module_status) {
8301 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
8302 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
8303 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
8304 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
8305 bp->pf.port_id);
8306 if (bp->hwrm_spec_code >= 0x10201) {
8307 netdev_warn(bp->dev, "Module part number %s\n",
8308 resp->phy_vendor_partnumber);
8309 }
8310 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
8311 netdev_warn(bp->dev, "TX is disabled\n");
8312 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
8313 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
8314 }
8315}
8316
c0c050c5
MC
8317static void
8318bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
8319{
8320 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
c9ee9516
MC
8321 if (bp->hwrm_spec_code >= 0x10201)
8322 req->auto_pause =
8323 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
c0c050c5
MC
8324 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
8325 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
8326 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
49b5c7a1 8327 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
c0c050c5
MC
8328 req->enables |=
8329 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
8330 } else {
8331 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
8332 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
8333 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
8334 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
8335 req->enables |=
8336 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
c9ee9516
MC
8337 if (bp->hwrm_spec_code >= 0x10201) {
8338 req->auto_pause = req->force_pause;
8339 req->enables |= cpu_to_le32(
8340 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
8341 }
c0c050c5
MC
8342 }
8343}
8344
8345static void bnxt_hwrm_set_link_common(struct bnxt *bp,
8346 struct hwrm_port_phy_cfg_input *req)
8347{
8348 u8 autoneg = bp->link_info.autoneg;
8349 u16 fw_link_speed = bp->link_info.req_link_speed;
68515a18 8350 u16 advertising = bp->link_info.advertising;
c0c050c5
MC
8351
8352 if (autoneg & BNXT_AUTONEG_SPEED) {
8353 req->auto_mode |=
11f15ed3 8354 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
c0c050c5
MC
8355
8356 req->enables |= cpu_to_le32(
8357 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
8358 req->auto_link_speed_mask = cpu_to_le16(advertising);
8359
8360 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
8361 req->flags |=
8362 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
8363 } else {
8364 req->force_link_speed = cpu_to_le16(fw_link_speed);
8365 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
8366 }
8367
c0c050c5
MC
8368 /* tell chimp that the setting takes effect immediately */
8369 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
8370}
8371
8372int bnxt_hwrm_set_pause(struct bnxt *bp)
8373{
8374 struct hwrm_port_phy_cfg_input req = {0};
8375 int rc;
8376
8377 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8378 bnxt_hwrm_set_pause_common(bp, &req);
8379
8380 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
8381 bp->link_info.force_link_chng)
8382 bnxt_hwrm_set_link_common(bp, &req);
8383
8384 mutex_lock(&bp->hwrm_cmd_lock);
8385 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8386 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
8387 /* since changing of pause setting doesn't trigger any link
8388 * change event, the driver needs to update the current pause
8389 * result upon successfully return of the phy_cfg command
8390 */
8391 bp->link_info.pause =
8392 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
8393 bp->link_info.auto_pause_setting = 0;
8394 if (!bp->link_info.force_link_chng)
8395 bnxt_report_link(bp);
8396 }
8397 bp->link_info.force_link_chng = false;
8398 mutex_unlock(&bp->hwrm_cmd_lock);
8399 return rc;
8400}
8401
939f7f0c
MC
8402static void bnxt_hwrm_set_eee(struct bnxt *bp,
8403 struct hwrm_port_phy_cfg_input *req)
8404{
8405 struct ethtool_eee *eee = &bp->eee;
8406
8407 if (eee->eee_enabled) {
8408 u16 eee_speeds;
8409 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
8410
8411 if (eee->tx_lpi_enabled)
8412 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
8413 else
8414 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
8415
8416 req->flags |= cpu_to_le32(flags);
8417 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
8418 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
8419 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
8420 } else {
8421 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
8422 }
8423}
8424
8425int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
c0c050c5
MC
8426{
8427 struct hwrm_port_phy_cfg_input req = {0};
8428
8429 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8430 if (set_pause)
8431 bnxt_hwrm_set_pause_common(bp, &req);
8432
8433 bnxt_hwrm_set_link_common(bp, &req);
939f7f0c
MC
8434
8435 if (set_eee)
8436 bnxt_hwrm_set_eee(bp, &req);
c0c050c5
MC
8437 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8438}
8439
33f7d55f
MC
8440static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
8441{
8442 struct hwrm_port_phy_cfg_input req = {0};
8443
567b2abe 8444 if (!BNXT_SINGLE_PF(bp))
33f7d55f
MC
8445 return 0;
8446
8447 if (pci_num_vf(bp->pdev))
8448 return 0;
8449
8450 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
16d663a6 8451 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
33f7d55f
MC
8452 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8453}
8454
25e1acd6
MC
8455static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
8456{
8457 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
8458 struct hwrm_func_drv_if_change_input req = {0};
8459 bool resc_reinit = false;
8460 int rc;
8461
8462 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
8463 return 0;
8464
8465 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
8466 if (up)
8467 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
8468 mutex_lock(&bp->hwrm_cmd_lock);
8469 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8470 if (!rc && (resp->flags &
8471 cpu_to_le32(FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)))
8472 resc_reinit = true;
8473 mutex_unlock(&bp->hwrm_cmd_lock);
8474
8475 if (up && resc_reinit && BNXT_NEW_RM(bp)) {
8476 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8477
8478 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
8479 hw_resc->resv_cp_rings = 0;
780baad4 8480 hw_resc->resv_stat_ctxs = 0;
75720e63 8481 hw_resc->resv_irqs = 0;
25e1acd6
MC
8482 hw_resc->resv_tx_rings = 0;
8483 hw_resc->resv_rx_rings = 0;
8484 hw_resc->resv_hw_ring_grps = 0;
8485 hw_resc->resv_vnics = 0;
6b95c3e9
MC
8486 bp->tx_nr_rings = 0;
8487 bp->rx_nr_rings = 0;
25e1acd6
MC
8488 }
8489 return rc;
8490}
8491
5ad2cbee
MC
8492static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
8493{
8494 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
8495 struct hwrm_port_led_qcaps_input req = {0};
8496 struct bnxt_pf_info *pf = &bp->pf;
8497 int rc;
8498
8499 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
8500 return 0;
8501
8502 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
8503 req.port_id = cpu_to_le16(pf->port_id);
8504 mutex_lock(&bp->hwrm_cmd_lock);
8505 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8506 if (rc) {
8507 mutex_unlock(&bp->hwrm_cmd_lock);
8508 return rc;
8509 }
8510 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
8511 int i;
8512
8513 bp->num_leds = resp->num_leds;
8514 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
8515 bp->num_leds);
8516 for (i = 0; i < bp->num_leds; i++) {
8517 struct bnxt_led_info *led = &bp->leds[i];
8518 __le16 caps = led->led_state_caps;
8519
8520 if (!led->led_group_id ||
8521 !BNXT_LED_ALT_BLINK_CAP(caps)) {
8522 bp->num_leds = 0;
8523 break;
8524 }
8525 }
8526 }
8527 mutex_unlock(&bp->hwrm_cmd_lock);
8528 return 0;
8529}
8530
5282db6c
MC
8531int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
8532{
8533 struct hwrm_wol_filter_alloc_input req = {0};
8534 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
8535 int rc;
8536
8537 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
8538 req.port_id = cpu_to_le16(bp->pf.port_id);
8539 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
8540 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
8541 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
8542 mutex_lock(&bp->hwrm_cmd_lock);
8543 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8544 if (!rc)
8545 bp->wol_filter_id = resp->wol_filter_id;
8546 mutex_unlock(&bp->hwrm_cmd_lock);
8547 return rc;
8548}
8549
8550int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
8551{
8552 struct hwrm_wol_filter_free_input req = {0};
8553 int rc;
8554
8555 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
8556 req.port_id = cpu_to_le16(bp->pf.port_id);
8557 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
8558 req.wol_filter_id = bp->wol_filter_id;
8559 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8560 return rc;
8561}
8562
c1ef146a
MC
8563static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
8564{
8565 struct hwrm_wol_filter_qcfg_input req = {0};
8566 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8567 u16 next_handle = 0;
8568 int rc;
8569
8570 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
8571 req.port_id = cpu_to_le16(bp->pf.port_id);
8572 req.handle = cpu_to_le16(handle);
8573 mutex_lock(&bp->hwrm_cmd_lock);
8574 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8575 if (!rc) {
8576 next_handle = le16_to_cpu(resp->next_handle);
8577 if (next_handle != 0) {
8578 if (resp->wol_type ==
8579 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
8580 bp->wol = 1;
8581 bp->wol_filter_id = resp->wol_filter_id;
8582 }
8583 }
8584 }
8585 mutex_unlock(&bp->hwrm_cmd_lock);
8586 return next_handle;
8587}
8588
8589static void bnxt_get_wol_settings(struct bnxt *bp)
8590{
8591 u16 handle = 0;
8592
8593 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
8594 return;
8595
8596 do {
8597 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
8598 } while (handle && handle != 0xffff);
8599}
8600
cde49a42
VV
8601#ifdef CONFIG_BNXT_HWMON
8602static ssize_t bnxt_show_temp(struct device *dev,
8603 struct device_attribute *devattr, char *buf)
8604{
8605 struct hwrm_temp_monitor_query_input req = {0};
8606 struct hwrm_temp_monitor_query_output *resp;
8607 struct bnxt *bp = dev_get_drvdata(dev);
8608 u32 temp = 0;
8609
8610 resp = bp->hwrm_cmd_resp_addr;
8611 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
8612 mutex_lock(&bp->hwrm_cmd_lock);
8613 if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
8614 temp = resp->temp * 1000; /* display millidegree */
8615 mutex_unlock(&bp->hwrm_cmd_lock);
8616
8617 return sprintf(buf, "%u\n", temp);
8618}
8619static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
8620
8621static struct attribute *bnxt_attrs[] = {
8622 &sensor_dev_attr_temp1_input.dev_attr.attr,
8623 NULL
8624};
8625ATTRIBUTE_GROUPS(bnxt);
8626
8627static void bnxt_hwmon_close(struct bnxt *bp)
8628{
8629 if (bp->hwmon_dev) {
8630 hwmon_device_unregister(bp->hwmon_dev);
8631 bp->hwmon_dev = NULL;
8632 }
8633}
8634
8635static void bnxt_hwmon_open(struct bnxt *bp)
8636{
8637 struct pci_dev *pdev = bp->pdev;
8638
8639 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
8640 DRV_MODULE_NAME, bp,
8641 bnxt_groups);
8642 if (IS_ERR(bp->hwmon_dev)) {
8643 bp->hwmon_dev = NULL;
8644 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
8645 }
8646}
8647#else
8648static void bnxt_hwmon_close(struct bnxt *bp)
8649{
8650}
8651
8652static void bnxt_hwmon_open(struct bnxt *bp)
8653{
8654}
8655#endif
8656
939f7f0c
MC
8657static bool bnxt_eee_config_ok(struct bnxt *bp)
8658{
8659 struct ethtool_eee *eee = &bp->eee;
8660 struct bnxt_link_info *link_info = &bp->link_info;
8661
8662 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
8663 return true;
8664
8665 if (eee->eee_enabled) {
8666 u32 advertising =
8667 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
8668
8669 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
8670 eee->eee_enabled = 0;
8671 return false;
8672 }
8673 if (eee->advertised & ~advertising) {
8674 eee->advertised = advertising & eee->supported;
8675 return false;
8676 }
8677 }
8678 return true;
8679}
8680
c0c050c5
MC
8681static int bnxt_update_phy_setting(struct bnxt *bp)
8682{
8683 int rc;
8684 bool update_link = false;
8685 bool update_pause = false;
939f7f0c 8686 bool update_eee = false;
c0c050c5
MC
8687 struct bnxt_link_info *link_info = &bp->link_info;
8688
8689 rc = bnxt_update_link(bp, true);
8690 if (rc) {
8691 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
8692 rc);
8693 return rc;
8694 }
33dac24a
MC
8695 if (!BNXT_SINGLE_PF(bp))
8696 return 0;
8697
c0c050c5 8698 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
c9ee9516
MC
8699 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
8700 link_info->req_flow_ctrl)
c0c050c5
MC
8701 update_pause = true;
8702 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
8703 link_info->force_pause_setting != link_info->req_flow_ctrl)
8704 update_pause = true;
c0c050c5
MC
8705 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
8706 if (BNXT_AUTO_MODE(link_info->auto_mode))
8707 update_link = true;
8708 if (link_info->req_link_speed != link_info->force_link_speed)
8709 update_link = true;
de73018f
MC
8710 if (link_info->req_duplex != link_info->duplex_setting)
8711 update_link = true;
c0c050c5
MC
8712 } else {
8713 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
8714 update_link = true;
8715 if (link_info->advertising != link_info->auto_link_speeds)
8716 update_link = true;
c0c050c5
MC
8717 }
8718
16d663a6
MC
8719 /* The last close may have shutdown the link, so need to call
8720 * PHY_CFG to bring it back up.
8721 */
8722 if (!netif_carrier_ok(bp->dev))
8723 update_link = true;
8724
939f7f0c
MC
8725 if (!bnxt_eee_config_ok(bp))
8726 update_eee = true;
8727
c0c050c5 8728 if (update_link)
939f7f0c 8729 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
c0c050c5
MC
8730 else if (update_pause)
8731 rc = bnxt_hwrm_set_pause(bp);
8732 if (rc) {
8733 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
8734 rc);
8735 return rc;
8736 }
8737
8738 return rc;
8739}
8740
11809490
JH
8741/* Common routine to pre-map certain register block to different GRC window.
8742 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
8743 * in PF and 3 windows in VF that can be customized to map in different
8744 * register blocks.
8745 */
8746static void bnxt_preset_reg_win(struct bnxt *bp)
8747{
8748 if (BNXT_PF(bp)) {
8749 /* CAG registers map to GRC window #4 */
8750 writel(BNXT_CAG_REG_BASE,
8751 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
8752 }
8753}
8754
47558acd
MC
8755static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
8756
c0c050c5
MC
8757static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8758{
8759 int rc = 0;
8760
11809490 8761 bnxt_preset_reg_win(bp);
c0c050c5
MC
8762 netif_carrier_off(bp->dev);
8763 if (irq_re_init) {
47558acd
MC
8764 /* Reserve rings now if none were reserved at driver probe. */
8765 rc = bnxt_init_dflt_ring_mode(bp);
8766 if (rc) {
8767 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
8768 return rc;
8769 }
c0c050c5 8770 }
1b3f0b75 8771 rc = bnxt_reserve_rings(bp, irq_re_init);
41e8d798
MC
8772 if (rc)
8773 return rc;
c0c050c5
MC
8774 if ((bp->flags & BNXT_FLAG_RFS) &&
8775 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
8776 /* disable RFS if falling back to INTA */
8777 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
8778 bp->flags &= ~BNXT_FLAG_RFS;
8779 }
8780
8781 rc = bnxt_alloc_mem(bp, irq_re_init);
8782 if (rc) {
8783 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
8784 goto open_err_free_mem;
8785 }
8786
8787 if (irq_re_init) {
8788 bnxt_init_napi(bp);
8789 rc = bnxt_request_irq(bp);
8790 if (rc) {
8791 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
c58387ab 8792 goto open_err_irq;
c0c050c5
MC
8793 }
8794 }
8795
8796 bnxt_enable_napi(bp);
cabfb09d 8797 bnxt_debug_dev_init(bp);
c0c050c5
MC
8798
8799 rc = bnxt_init_nic(bp, irq_re_init);
8800 if (rc) {
8801 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
8802 goto open_err;
8803 }
8804
8805 if (link_re_init) {
e2dc9b6e 8806 mutex_lock(&bp->link_lock);
c0c050c5 8807 rc = bnxt_update_phy_setting(bp);
e2dc9b6e 8808 mutex_unlock(&bp->link_lock);
a1ef4a79 8809 if (rc) {
ba41d46f 8810 netdev_warn(bp->dev, "failed to update phy settings\n");
a1ef4a79
MC
8811 if (BNXT_SINGLE_PF(bp)) {
8812 bp->link_info.phy_retry = true;
8813 bp->link_info.phy_retry_expires =
8814 jiffies + 5 * HZ;
8815 }
8816 }
c0c050c5
MC
8817 }
8818
7cdd5fc3 8819 if (irq_re_init)
ad51b8e9 8820 udp_tunnel_get_rx_info(bp->dev);
c0c050c5 8821
caefe526 8822 set_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
8823 bnxt_enable_int(bp);
8824 /* Enable TX queues */
8825 bnxt_tx_enable(bp);
8826 mod_timer(&bp->timer, jiffies + bp->current_interval);
10289bec
MC
8827 /* Poll link status and check for SFP+ module status */
8828 bnxt_get_port_module_status(bp);
c0c050c5 8829
ee5c7fb3
SP
8830 /* VF-reps may need to be re-opened after the PF is re-opened */
8831 if (BNXT_PF(bp))
8832 bnxt_vf_reps_open(bp);
c0c050c5
MC
8833 return 0;
8834
8835open_err:
cabfb09d 8836 bnxt_debug_dev_exit(bp);
c0c050c5 8837 bnxt_disable_napi(bp);
c58387ab
VG
8838
8839open_err_irq:
c0c050c5
MC
8840 bnxt_del_napi(bp);
8841
8842open_err_free_mem:
8843 bnxt_free_skbs(bp);
8844 bnxt_free_irq(bp);
8845 bnxt_free_mem(bp, true);
8846 return rc;
8847}
8848
8849/* rtnl_lock held */
8850int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8851{
8852 int rc = 0;
8853
8854 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
8855 if (rc) {
8856 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
8857 dev_close(bp->dev);
8858 }
8859 return rc;
8860}
8861
f7dc1ea6
MC
8862/* rtnl_lock held, open the NIC half way by allocating all resources, but
8863 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
8864 * self tests.
8865 */
8866int bnxt_half_open_nic(struct bnxt *bp)
8867{
8868 int rc = 0;
8869
8870 rc = bnxt_alloc_mem(bp, false);
8871 if (rc) {
8872 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
8873 goto half_open_err;
8874 }
8875 rc = bnxt_init_nic(bp, false);
8876 if (rc) {
8877 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
8878 goto half_open_err;
8879 }
8880 return 0;
8881
8882half_open_err:
8883 bnxt_free_skbs(bp);
8884 bnxt_free_mem(bp, false);
8885 dev_close(bp->dev);
8886 return rc;
8887}
8888
8889/* rtnl_lock held, this call can only be made after a previous successful
8890 * call to bnxt_half_open_nic().
8891 */
8892void bnxt_half_close_nic(struct bnxt *bp)
8893{
8894 bnxt_hwrm_resource_free(bp, false, false);
8895 bnxt_free_skbs(bp);
8896 bnxt_free_mem(bp, false);
8897}
8898
c0c050c5
MC
8899static int bnxt_open(struct net_device *dev)
8900{
8901 struct bnxt *bp = netdev_priv(dev);
25e1acd6 8902 int rc;
c0c050c5 8903
25e1acd6
MC
8904 bnxt_hwrm_if_change(bp, true);
8905 rc = __bnxt_open_nic(bp, true, true);
8906 if (rc)
8907 bnxt_hwrm_if_change(bp, false);
cde49a42
VV
8908
8909 bnxt_hwmon_open(bp);
8910
25e1acd6 8911 return rc;
c0c050c5
MC
8912}
8913
f9b76ebd
MC
8914static bool bnxt_drv_busy(struct bnxt *bp)
8915{
8916 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
8917 test_bit(BNXT_STATE_READ_STATS, &bp->state));
8918}
8919
b8875ca3
MC
8920static void bnxt_get_ring_stats(struct bnxt *bp,
8921 struct rtnl_link_stats64 *stats);
8922
86e953db
MC
8923static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
8924 bool link_re_init)
c0c050c5 8925{
ee5c7fb3
SP
8926 /* Close the VF-reps before closing PF */
8927 if (BNXT_PF(bp))
8928 bnxt_vf_reps_close(bp);
86e953db 8929
c0c050c5
MC
8930 /* Change device state to avoid TX queue wake up's */
8931 bnxt_tx_disable(bp);
8932
caefe526 8933 clear_bit(BNXT_STATE_OPEN, &bp->state);
4cebdcec 8934 smp_mb__after_atomic();
f9b76ebd 8935 while (bnxt_drv_busy(bp))
4cebdcec 8936 msleep(20);
c0c050c5 8937
9d8bc097 8938 /* Flush rings and and disable interrupts */
c0c050c5
MC
8939 bnxt_shutdown_nic(bp, irq_re_init);
8940
8941 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
8942
cabfb09d 8943 bnxt_debug_dev_exit(bp);
c0c050c5 8944 bnxt_disable_napi(bp);
c0c050c5
MC
8945 del_timer_sync(&bp->timer);
8946 bnxt_free_skbs(bp);
8947
b8875ca3
MC
8948 /* Save ring stats before shutdown */
8949 if (bp->bnapi)
8950 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
c0c050c5
MC
8951 if (irq_re_init) {
8952 bnxt_free_irq(bp);
8953 bnxt_del_napi(bp);
8954 }
8955 bnxt_free_mem(bp, irq_re_init);
86e953db
MC
8956}
8957
8958int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8959{
8960 int rc = 0;
8961
8962#ifdef CONFIG_BNXT_SRIOV
8963 if (bp->sriov_cfg) {
8964 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
8965 !bp->sriov_cfg,
8966 BNXT_SRIOV_CFG_WAIT_TMO);
8967 if (rc)
8968 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
8969 }
8970#endif
8971 __bnxt_close_nic(bp, irq_re_init, link_re_init);
c0c050c5
MC
8972 return rc;
8973}
8974
8975static int bnxt_close(struct net_device *dev)
8976{
8977 struct bnxt *bp = netdev_priv(dev);
8978
cde49a42 8979 bnxt_hwmon_close(bp);
c0c050c5 8980 bnxt_close_nic(bp, true, true);
33f7d55f 8981 bnxt_hwrm_shutdown_link(bp);
25e1acd6 8982 bnxt_hwrm_if_change(bp, false);
c0c050c5
MC
8983 return 0;
8984}
8985
0ca12be9
VV
8986static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
8987 u16 *val)
8988{
8989 struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
8990 struct hwrm_port_phy_mdio_read_input req = {0};
8991 int rc;
8992
8993 if (bp->hwrm_spec_code < 0x10a00)
8994 return -EOPNOTSUPP;
8995
8996 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
8997 req.port_id = cpu_to_le16(bp->pf.port_id);
8998 req.phy_addr = phy_addr;
8999 req.reg_addr = cpu_to_le16(reg & 0x1f);
2730214d 9000 if (mdio_phy_id_is_c45(phy_addr)) {
0ca12be9
VV
9001 req.cl45_mdio = 1;
9002 req.phy_addr = mdio_phy_id_prtad(phy_addr);
9003 req.dev_addr = mdio_phy_id_devad(phy_addr);
9004 req.reg_addr = cpu_to_le16(reg);
9005 }
9006
9007 mutex_lock(&bp->hwrm_cmd_lock);
9008 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9009 if (!rc)
9010 *val = le16_to_cpu(resp->reg_data);
9011 mutex_unlock(&bp->hwrm_cmd_lock);
9012 return rc;
9013}
9014
9015static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
9016 u16 val)
9017{
9018 struct hwrm_port_phy_mdio_write_input req = {0};
9019
9020 if (bp->hwrm_spec_code < 0x10a00)
9021 return -EOPNOTSUPP;
9022
9023 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
9024 req.port_id = cpu_to_le16(bp->pf.port_id);
9025 req.phy_addr = phy_addr;
9026 req.reg_addr = cpu_to_le16(reg & 0x1f);
2730214d 9027 if (mdio_phy_id_is_c45(phy_addr)) {
0ca12be9
VV
9028 req.cl45_mdio = 1;
9029 req.phy_addr = mdio_phy_id_prtad(phy_addr);
9030 req.dev_addr = mdio_phy_id_devad(phy_addr);
9031 req.reg_addr = cpu_to_le16(reg);
9032 }
9033 req.reg_data = cpu_to_le16(val);
9034
9035 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9036}
9037
c0c050c5
MC
9038/* rtnl_lock held */
9039static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9040{
0ca12be9
VV
9041 struct mii_ioctl_data *mdio = if_mii(ifr);
9042 struct bnxt *bp = netdev_priv(dev);
9043 int rc;
9044
c0c050c5
MC
9045 switch (cmd) {
9046 case SIOCGMIIPHY:
0ca12be9
VV
9047 mdio->phy_id = bp->link_info.phy_addr;
9048
c0c050c5
MC
9049 /* fallthru */
9050 case SIOCGMIIREG: {
0ca12be9
VV
9051 u16 mii_regval = 0;
9052
c0c050c5
MC
9053 if (!netif_running(dev))
9054 return -EAGAIN;
9055
0ca12be9
VV
9056 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
9057 &mii_regval);
9058 mdio->val_out = mii_regval;
9059 return rc;
c0c050c5
MC
9060 }
9061
9062 case SIOCSMIIREG:
9063 if (!netif_running(dev))
9064 return -EAGAIN;
9065
0ca12be9
VV
9066 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
9067 mdio->val_in);
c0c050c5
MC
9068
9069 default:
9070 /* do nothing */
9071 break;
9072 }
9073 return -EOPNOTSUPP;
9074}
9075
b8875ca3
MC
9076static void bnxt_get_ring_stats(struct bnxt *bp,
9077 struct rtnl_link_stats64 *stats)
c0c050c5 9078{
b8875ca3 9079 int i;
c0c050c5 9080
c0c050c5 9081
c0c050c5
MC
9082 for (i = 0; i < bp->cp_nr_rings; i++) {
9083 struct bnxt_napi *bnapi = bp->bnapi[i];
9084 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
9085 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
9086
9087 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
9088 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
9089 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
9090
9091 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
9092 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
9093 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
9094
9095 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
9096 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
9097 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
9098
9099 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
9100 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
9101 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
9102
9103 stats->rx_missed_errors +=
9104 le64_to_cpu(hw_stats->rx_discard_pkts);
9105
9106 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
9107
c0c050c5
MC
9108 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
9109 }
b8875ca3
MC
9110}
9111
9112static void bnxt_add_prev_stats(struct bnxt *bp,
9113 struct rtnl_link_stats64 *stats)
9114{
9115 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
9116
9117 stats->rx_packets += prev_stats->rx_packets;
9118 stats->tx_packets += prev_stats->tx_packets;
9119 stats->rx_bytes += prev_stats->rx_bytes;
9120 stats->tx_bytes += prev_stats->tx_bytes;
9121 stats->rx_missed_errors += prev_stats->rx_missed_errors;
9122 stats->multicast += prev_stats->multicast;
9123 stats->tx_dropped += prev_stats->tx_dropped;
9124}
9125
9126static void
9127bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
9128{
9129 struct bnxt *bp = netdev_priv(dev);
9130
9131 set_bit(BNXT_STATE_READ_STATS, &bp->state);
9132 /* Make sure bnxt_close_nic() sees that we are reading stats before
9133 * we check the BNXT_STATE_OPEN flag.
9134 */
9135 smp_mb__after_atomic();
9136 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9137 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
9138 *stats = bp->net_stats_prev;
9139 return;
9140 }
9141
9142 bnxt_get_ring_stats(bp, stats);
9143 bnxt_add_prev_stats(bp, stats);
c0c050c5 9144
9947f83f
MC
9145 if (bp->flags & BNXT_FLAG_PORT_STATS) {
9146 struct rx_port_stats *rx = bp->hw_rx_port_stats;
9147 struct tx_port_stats *tx = bp->hw_tx_port_stats;
9148
9149 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
9150 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
9151 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
9152 le64_to_cpu(rx->rx_ovrsz_frames) +
9153 le64_to_cpu(rx->rx_runt_frames);
9154 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
9155 le64_to_cpu(rx->rx_jbr_frames);
9156 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
9157 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
9158 stats->tx_errors = le64_to_cpu(tx->tx_err);
9159 }
f9b76ebd 9160 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
c0c050c5
MC
9161}
9162
9163static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
9164{
9165 struct net_device *dev = bp->dev;
9166 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9167 struct netdev_hw_addr *ha;
9168 u8 *haddr;
9169 int mc_count = 0;
9170 bool update = false;
9171 int off = 0;
9172
9173 netdev_for_each_mc_addr(ha, dev) {
9174 if (mc_count >= BNXT_MAX_MC_ADDRS) {
9175 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9176 vnic->mc_list_count = 0;
9177 return false;
9178 }
9179 haddr = ha->addr;
9180 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
9181 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
9182 update = true;
9183 }
9184 off += ETH_ALEN;
9185 mc_count++;
9186 }
9187 if (mc_count)
9188 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
9189
9190 if (mc_count != vnic->mc_list_count) {
9191 vnic->mc_list_count = mc_count;
9192 update = true;
9193 }
9194 return update;
9195}
9196
9197static bool bnxt_uc_list_updated(struct bnxt *bp)
9198{
9199 struct net_device *dev = bp->dev;
9200 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9201 struct netdev_hw_addr *ha;
9202 int off = 0;
9203
9204 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
9205 return true;
9206
9207 netdev_for_each_uc_addr(ha, dev) {
9208 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
9209 return true;
9210
9211 off += ETH_ALEN;
9212 }
9213 return false;
9214}
9215
9216static void bnxt_set_rx_mode(struct net_device *dev)
9217{
9218 struct bnxt *bp = netdev_priv(dev);
9219 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9220 u32 mask = vnic->rx_mask;
9221 bool mc_update = false;
9222 bool uc_update;
9223
9224 if (!netif_running(dev))
9225 return;
9226
9227 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
9228 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
30e33848
MC
9229 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
9230 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
c0c050c5 9231
17c71ac3 9232 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
c0c050c5
MC
9233 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
9234
9235 uc_update = bnxt_uc_list_updated(bp);
9236
30e33848
MC
9237 if (dev->flags & IFF_BROADCAST)
9238 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
c0c050c5
MC
9239 if (dev->flags & IFF_ALLMULTI) {
9240 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9241 vnic->mc_list_count = 0;
9242 } else {
9243 mc_update = bnxt_mc_list_updated(bp, &mask);
9244 }
9245
9246 if (mask != vnic->rx_mask || uc_update || mc_update) {
9247 vnic->rx_mask = mask;
9248
9249 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
c213eae8 9250 bnxt_queue_sp_work(bp);
c0c050c5
MC
9251 }
9252}
9253
b664f008 9254static int bnxt_cfg_rx_mode(struct bnxt *bp)
c0c050c5
MC
9255{
9256 struct net_device *dev = bp->dev;
9257 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9258 struct netdev_hw_addr *ha;
9259 int i, off = 0, rc;
9260 bool uc_update;
9261
9262 netif_addr_lock_bh(dev);
9263 uc_update = bnxt_uc_list_updated(bp);
9264 netif_addr_unlock_bh(dev);
9265
9266 if (!uc_update)
9267 goto skip_uc;
9268
9269 mutex_lock(&bp->hwrm_cmd_lock);
9270 for (i = 1; i < vnic->uc_filter_count; i++) {
9271 struct hwrm_cfa_l2_filter_free_input req = {0};
9272
9273 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
9274 -1);
9275
9276 req.l2_filter_id = vnic->fw_l2_filter_id[i];
9277
9278 rc = _hwrm_send_message(bp, &req, sizeof(req),
9279 HWRM_CMD_TIMEOUT);
9280 }
9281 mutex_unlock(&bp->hwrm_cmd_lock);
9282
9283 vnic->uc_filter_count = 1;
9284
9285 netif_addr_lock_bh(dev);
9286 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
9287 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
9288 } else {
9289 netdev_for_each_uc_addr(ha, dev) {
9290 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
9291 off += ETH_ALEN;
9292 vnic->uc_filter_count++;
9293 }
9294 }
9295 netif_addr_unlock_bh(dev);
9296
9297 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
9298 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
9299 if (rc) {
9300 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
9301 rc);
9302 vnic->uc_filter_count = i;
b664f008 9303 return rc;
c0c050c5
MC
9304 }
9305 }
9306
9307skip_uc:
9308 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
b4e30e8e
MC
9309 if (rc && vnic->mc_list_count) {
9310 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
9311 rc);
9312 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9313 vnic->mc_list_count = 0;
9314 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
9315 }
c0c050c5 9316 if (rc)
b4e30e8e 9317 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
c0c050c5 9318 rc);
b664f008
MC
9319
9320 return rc;
c0c050c5
MC
9321}
9322
2773dfb2
MC
9323static bool bnxt_can_reserve_rings(struct bnxt *bp)
9324{
9325#ifdef CONFIG_BNXT_SRIOV
f1ca94de 9326 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
2773dfb2
MC
9327 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9328
9329 /* No minimum rings were provisioned by the PF. Don't
9330 * reserve rings by default when device is down.
9331 */
9332 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
9333 return true;
9334
9335 if (!netif_running(bp->dev))
9336 return false;
9337 }
9338#endif
9339 return true;
9340}
9341
8079e8f1
MC
9342/* If the chip and firmware supports RFS */
9343static bool bnxt_rfs_supported(struct bnxt *bp)
9344{
e969ae5b
MC
9345 if (bp->flags & BNXT_FLAG_CHIP_P5) {
9346 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX)
9347 return true;
41e8d798 9348 return false;
e969ae5b 9349 }
8079e8f1
MC
9350 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
9351 return true;
ae10ae74
MC
9352 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
9353 return true;
8079e8f1
MC
9354 return false;
9355}
9356
9357/* If runtime conditions support RFS */
2bcfa6f6
MC
9358static bool bnxt_rfs_capable(struct bnxt *bp)
9359{
9360#ifdef CONFIG_RFS_ACCEL
8079e8f1 9361 int vnics, max_vnics, max_rss_ctxs;
2bcfa6f6 9362
41e8d798 9363 if (bp->flags & BNXT_FLAG_CHIP_P5)
ac33906c 9364 return bnxt_rfs_supported(bp);
2773dfb2 9365 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
2bcfa6f6
MC
9366 return false;
9367
9368 vnics = 1 + bp->rx_nr_rings;
8079e8f1
MC
9369 max_vnics = bnxt_get_max_func_vnics(bp);
9370 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
ae10ae74
MC
9371
9372 /* RSS contexts not a limiting factor */
9373 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
9374 max_rss_ctxs = max_vnics;
8079e8f1 9375 if (vnics > max_vnics || vnics > max_rss_ctxs) {
6a1eef5b
MC
9376 if (bp->rx_nr_rings > 1)
9377 netdev_warn(bp->dev,
9378 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
9379 min(max_rss_ctxs - 1, max_vnics - 1));
2bcfa6f6 9380 return false;
a2304909 9381 }
2bcfa6f6 9382
f1ca94de 9383 if (!BNXT_NEW_RM(bp))
6a1eef5b
MC
9384 return true;
9385
9386 if (vnics == bp->hw_resc.resv_vnics)
9387 return true;
9388
780baad4 9389 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
6a1eef5b
MC
9390 if (vnics <= bp->hw_resc.resv_vnics)
9391 return true;
9392
9393 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
780baad4 9394 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
6a1eef5b 9395 return false;
2bcfa6f6
MC
9396#else
9397 return false;
9398#endif
9399}
9400
c0c050c5
MC
9401static netdev_features_t bnxt_fix_features(struct net_device *dev,
9402 netdev_features_t features)
9403{
2bcfa6f6
MC
9404 struct bnxt *bp = netdev_priv(dev);
9405
a2304909 9406 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
2bcfa6f6 9407 features &= ~NETIF_F_NTUPLE;
5a9f6b23 9408
1054aee8
MC
9409 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
9410 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
9411
9412 if (!(features & NETIF_F_GRO))
9413 features &= ~NETIF_F_GRO_HW;
9414
9415 if (features & NETIF_F_GRO_HW)
9416 features &= ~NETIF_F_LRO;
9417
5a9f6b23
MC
9418 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
9419 * turned on or off together.
9420 */
9421 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
9422 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
9423 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
9424 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
9425 NETIF_F_HW_VLAN_STAG_RX);
9426 else
9427 features |= NETIF_F_HW_VLAN_CTAG_RX |
9428 NETIF_F_HW_VLAN_STAG_RX;
9429 }
cf6645f8
MC
9430#ifdef CONFIG_BNXT_SRIOV
9431 if (BNXT_VF(bp)) {
9432 if (bp->vf.vlan) {
9433 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
9434 NETIF_F_HW_VLAN_STAG_RX);
9435 }
9436 }
9437#endif
c0c050c5
MC
9438 return features;
9439}
9440
9441static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
9442{
9443 struct bnxt *bp = netdev_priv(dev);
9444 u32 flags = bp->flags;
9445 u32 changes;
9446 int rc = 0;
9447 bool re_init = false;
9448 bool update_tpa = false;
9449
9450 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
1054aee8 9451 if (features & NETIF_F_GRO_HW)
c0c050c5 9452 flags |= BNXT_FLAG_GRO;
1054aee8 9453 else if (features & NETIF_F_LRO)
c0c050c5
MC
9454 flags |= BNXT_FLAG_LRO;
9455
bdbd1eb5
MC
9456 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
9457 flags &= ~BNXT_FLAG_TPA;
9458
c0c050c5
MC
9459 if (features & NETIF_F_HW_VLAN_CTAG_RX)
9460 flags |= BNXT_FLAG_STRIP_VLAN;
9461
9462 if (features & NETIF_F_NTUPLE)
9463 flags |= BNXT_FLAG_RFS;
9464
9465 changes = flags ^ bp->flags;
9466 if (changes & BNXT_FLAG_TPA) {
9467 update_tpa = true;
9468 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
f45b7b78
MC
9469 (flags & BNXT_FLAG_TPA) == 0 ||
9470 (bp->flags & BNXT_FLAG_CHIP_P5))
c0c050c5
MC
9471 re_init = true;
9472 }
9473
9474 if (changes & ~BNXT_FLAG_TPA)
9475 re_init = true;
9476
9477 if (flags != bp->flags) {
9478 u32 old_flags = bp->flags;
9479
2bcfa6f6 9480 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
f45b7b78 9481 bp->flags = flags;
c0c050c5
MC
9482 if (update_tpa)
9483 bnxt_set_ring_params(bp);
9484 return rc;
9485 }
9486
9487 if (re_init) {
9488 bnxt_close_nic(bp, false, false);
f45b7b78 9489 bp->flags = flags;
c0c050c5
MC
9490 if (update_tpa)
9491 bnxt_set_ring_params(bp);
9492
9493 return bnxt_open_nic(bp, false, false);
9494 }
9495 if (update_tpa) {
f45b7b78 9496 bp->flags = flags;
c0c050c5
MC
9497 rc = bnxt_set_tpa(bp,
9498 (flags & BNXT_FLAG_TPA) ?
9499 true : false);
9500 if (rc)
9501 bp->flags = old_flags;
9502 }
9503 }
9504 return rc;
9505}
9506
ffd77621
MC
9507static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
9508 u32 ring_id, u32 *prod, u32 *cons)
9509{
9510 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
9511 struct hwrm_dbg_ring_info_get_input req = {0};
9512 int rc;
9513
9514 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
9515 req.ring_type = ring_type;
9516 req.fw_ring_id = cpu_to_le32(ring_id);
9517 mutex_lock(&bp->hwrm_cmd_lock);
9518 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9519 if (!rc) {
9520 *prod = le32_to_cpu(resp->producer_index);
9521 *cons = le32_to_cpu(resp->consumer_index);
9522 }
9523 mutex_unlock(&bp->hwrm_cmd_lock);
9524 return rc;
9525}
9526
9f554590
MC
9527static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
9528{
b6ab4b01 9529 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
9f554590
MC
9530 int i = bnapi->index;
9531
3b2b7d9d
MC
9532 if (!txr)
9533 return;
9534
9f554590
MC
9535 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
9536 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
9537 txr->tx_cons);
9538}
9539
9540static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
9541{
b6ab4b01 9542 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
9f554590
MC
9543 int i = bnapi->index;
9544
3b2b7d9d
MC
9545 if (!rxr)
9546 return;
9547
9f554590
MC
9548 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
9549 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
9550 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
9551 rxr->rx_sw_agg_prod);
9552}
9553
9554static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
9555{
9556 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
9557 int i = bnapi->index;
9558
9559 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
9560 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
9561}
9562
c0c050c5
MC
9563static void bnxt_dbg_dump_states(struct bnxt *bp)
9564{
9565 int i;
9566 struct bnxt_napi *bnapi;
c0c050c5
MC
9567
9568 for (i = 0; i < bp->cp_nr_rings; i++) {
9569 bnapi = bp->bnapi[i];
c0c050c5 9570 if (netif_msg_drv(bp)) {
9f554590
MC
9571 bnxt_dump_tx_sw_state(bnapi);
9572 bnxt_dump_rx_sw_state(bnapi);
9573 bnxt_dump_cp_sw_state(bnapi);
c0c050c5
MC
9574 }
9575 }
9576}
9577
6988bd92 9578static void bnxt_reset_task(struct bnxt *bp, bool silent)
c0c050c5 9579{
6988bd92
MC
9580 if (!silent)
9581 bnxt_dbg_dump_states(bp);
028de140 9582 if (netif_running(bp->dev)) {
b386cd36
MC
9583 int rc;
9584
9585 if (!silent)
9586 bnxt_ulp_stop(bp);
028de140 9587 bnxt_close_nic(bp, false, false);
b386cd36
MC
9588 rc = bnxt_open_nic(bp, false, false);
9589 if (!silent && !rc)
9590 bnxt_ulp_start(bp);
028de140 9591 }
c0c050c5
MC
9592}
9593
9594static void bnxt_tx_timeout(struct net_device *dev)
9595{
9596 struct bnxt *bp = netdev_priv(dev);
9597
9598 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
9599 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
c213eae8 9600 bnxt_queue_sp_work(bp);
c0c050c5
MC
9601}
9602
e99e88a9 9603static void bnxt_timer(struct timer_list *t)
c0c050c5 9604{
e99e88a9 9605 struct bnxt *bp = from_timer(bp, t, timer);
c0c050c5
MC
9606 struct net_device *dev = bp->dev;
9607
9608 if (!netif_running(dev))
9609 return;
9610
9611 if (atomic_read(&bp->intr_sem) != 0)
9612 goto bnxt_restart_timer;
9613
adcc331e
MC
9614 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
9615 bp->stats_coal_ticks) {
3bdf56c4 9616 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
c213eae8 9617 bnxt_queue_sp_work(bp);
3bdf56c4 9618 }
5a84acbe
SP
9619
9620 if (bnxt_tc_flower_enabled(bp)) {
9621 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
9622 bnxt_queue_sp_work(bp);
9623 }
a1ef4a79
MC
9624
9625 if (bp->link_info.phy_retry) {
9626 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
9627 bp->link_info.phy_retry = 0;
9628 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
9629 } else {
9630 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
9631 bnxt_queue_sp_work(bp);
9632 }
9633 }
ffd77621
MC
9634
9635 if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) {
9636 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
9637 bnxt_queue_sp_work(bp);
9638 }
c0c050c5
MC
9639bnxt_restart_timer:
9640 mod_timer(&bp->timer, jiffies + bp->current_interval);
9641}
9642
a551ee94 9643static void bnxt_rtnl_lock_sp(struct bnxt *bp)
6988bd92 9644{
a551ee94
MC
9645 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
9646 * set. If the device is being closed, bnxt_close() may be holding
6988bd92
MC
9647 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
9648 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
9649 */
9650 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9651 rtnl_lock();
a551ee94
MC
9652}
9653
9654static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
9655{
6988bd92
MC
9656 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9657 rtnl_unlock();
9658}
9659
a551ee94
MC
9660/* Only called from bnxt_sp_task() */
9661static void bnxt_reset(struct bnxt *bp, bool silent)
9662{
9663 bnxt_rtnl_lock_sp(bp);
9664 if (test_bit(BNXT_STATE_OPEN, &bp->state))
9665 bnxt_reset_task(bp, silent);
9666 bnxt_rtnl_unlock_sp(bp);
9667}
9668
ffd77621
MC
9669static void bnxt_chk_missed_irq(struct bnxt *bp)
9670{
9671 int i;
9672
9673 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
9674 return;
9675
9676 for (i = 0; i < bp->cp_nr_rings; i++) {
9677 struct bnxt_napi *bnapi = bp->bnapi[i];
9678 struct bnxt_cp_ring_info *cpr;
9679 u32 fw_ring_id;
9680 int j;
9681
9682 if (!bnapi)
9683 continue;
9684
9685 cpr = &bnapi->cp_ring;
9686 for (j = 0; j < 2; j++) {
9687 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
9688 u32 val[2];
9689
9690 if (!cpr2 || cpr2->has_more_work ||
9691 !bnxt_has_work(bp, cpr2))
9692 continue;
9693
9694 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
9695 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
9696 continue;
9697 }
9698 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
9699 bnxt_dbg_hwrm_ring_info_get(bp,
9700 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
9701 fw_ring_id, &val[0], &val[1]);
83eb5c5c 9702 cpr->missed_irqs++;
ffd77621
MC
9703 }
9704 }
9705}
9706
c0c050c5
MC
9707static void bnxt_cfg_ntp_filters(struct bnxt *);
9708
9709static void bnxt_sp_task(struct work_struct *work)
9710{
9711 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
c0c050c5 9712
4cebdcec
MC
9713 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9714 smp_mb__after_atomic();
9715 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9716 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5 9717 return;
4cebdcec 9718 }
c0c050c5
MC
9719
9720 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
9721 bnxt_cfg_rx_mode(bp);
9722
9723 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
9724 bnxt_cfg_ntp_filters(bp);
c0c050c5
MC
9725 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
9726 bnxt_hwrm_exec_fwd_req(bp);
9727 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
9728 bnxt_hwrm_tunnel_dst_port_alloc(
9729 bp, bp->vxlan_port,
9730 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
9731 }
9732 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
9733 bnxt_hwrm_tunnel_dst_port_free(
9734 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
9735 }
7cdd5fc3
AD
9736 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
9737 bnxt_hwrm_tunnel_dst_port_alloc(
9738 bp, bp->nge_port,
9739 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
9740 }
9741 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
9742 bnxt_hwrm_tunnel_dst_port_free(
9743 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
9744 }
00db3cba 9745 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
3bdf56c4 9746 bnxt_hwrm_port_qstats(bp);
00db3cba 9747 bnxt_hwrm_port_qstats_ext(bp);
55e4398d 9748 bnxt_hwrm_pcie_qstats(bp);
00db3cba 9749 }
3bdf56c4 9750
0eaa24b9 9751 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
e2dc9b6e 9752 int rc;
0eaa24b9 9753
e2dc9b6e 9754 mutex_lock(&bp->link_lock);
0eaa24b9
MC
9755 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
9756 &bp->sp_event))
9757 bnxt_hwrm_phy_qcaps(bp);
9758
e2dc9b6e
MC
9759 rc = bnxt_update_link(bp, true);
9760 mutex_unlock(&bp->link_lock);
0eaa24b9
MC
9761 if (rc)
9762 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
9763 rc);
9764 }
a1ef4a79
MC
9765 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
9766 int rc;
9767
9768 mutex_lock(&bp->link_lock);
9769 rc = bnxt_update_phy_setting(bp);
9770 mutex_unlock(&bp->link_lock);
9771 if (rc) {
9772 netdev_warn(bp->dev, "update phy settings retry failed\n");
9773 } else {
9774 bp->link_info.phy_retry = false;
9775 netdev_info(bp->dev, "update phy settings retry succeeded\n");
9776 }
9777 }
90c694bb 9778 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
e2dc9b6e
MC
9779 mutex_lock(&bp->link_lock);
9780 bnxt_get_port_module_status(bp);
9781 mutex_unlock(&bp->link_lock);
90c694bb 9782 }
5a84acbe
SP
9783
9784 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
9785 bnxt_tc_flow_stats_work(bp);
9786
ffd77621
MC
9787 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
9788 bnxt_chk_missed_irq(bp);
9789
e2dc9b6e
MC
9790 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
9791 * must be the last functions to be called before exiting.
9792 */
6988bd92
MC
9793 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
9794 bnxt_reset(bp, false);
4cebdcec 9795
fc0f1929
MC
9796 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
9797 bnxt_reset(bp, true);
9798
4cebdcec
MC
9799 smp_mb__before_atomic();
9800 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5
MC
9801}
9802
d1e7925e 9803/* Under rtnl_lock */
98fdbe73
MC
9804int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
9805 int tx_xdp)
d1e7925e
MC
9806{
9807 int max_rx, max_tx, tx_sets = 1;
780baad4 9808 int tx_rings_needed, stats;
8f23d638 9809 int rx_rings = rx;
6fc2ffdf 9810 int cp, vnics, rc;
d1e7925e 9811
d1e7925e
MC
9812 if (tcs)
9813 tx_sets = tcs;
9814
9815 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
9816 if (rc)
9817 return rc;
9818
9819 if (max_rx < rx)
9820 return -ENOMEM;
9821
5f449249 9822 tx_rings_needed = tx * tx_sets + tx_xdp;
d1e7925e
MC
9823 if (max_tx < tx_rings_needed)
9824 return -ENOMEM;
9825
6fc2ffdf 9826 vnics = 1;
9b3d15e6 9827 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
6fc2ffdf
EW
9828 vnics += rx_rings;
9829
8f23d638
MC
9830 if (bp->flags & BNXT_FLAG_AGG_RINGS)
9831 rx_rings <<= 1;
9832 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
780baad4
VV
9833 stats = cp;
9834 if (BNXT_NEW_RM(bp)) {
11c3ec7b 9835 cp += bnxt_get_ulp_msix_num(bp);
780baad4
VV
9836 stats += bnxt_get_ulp_stat_ctxs(bp);
9837 }
6fc2ffdf 9838 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
780baad4 9839 stats, vnics);
d1e7925e
MC
9840}
9841
17086399
SP
9842static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
9843{
9844 if (bp->bar2) {
9845 pci_iounmap(pdev, bp->bar2);
9846 bp->bar2 = NULL;
9847 }
9848
9849 if (bp->bar1) {
9850 pci_iounmap(pdev, bp->bar1);
9851 bp->bar1 = NULL;
9852 }
9853
9854 if (bp->bar0) {
9855 pci_iounmap(pdev, bp->bar0);
9856 bp->bar0 = NULL;
9857 }
9858}
9859
9860static void bnxt_cleanup_pci(struct bnxt *bp)
9861{
9862 bnxt_unmap_bars(bp, bp->pdev);
9863 pci_release_regions(bp->pdev);
9864 pci_disable_device(bp->pdev);
9865}
9866
18775aa8
MC
9867static void bnxt_init_dflt_coal(struct bnxt *bp)
9868{
9869 struct bnxt_coal *coal;
9870
9871 /* Tick values in micro seconds.
9872 * 1 coal_buf x bufs_per_record = 1 completion record.
9873 */
9874 coal = &bp->rx_coal;
0c2ff8d7 9875 coal->coal_ticks = 10;
18775aa8
MC
9876 coal->coal_bufs = 30;
9877 coal->coal_ticks_irq = 1;
9878 coal->coal_bufs_irq = 2;
05abe4dd 9879 coal->idle_thresh = 50;
18775aa8
MC
9880 coal->bufs_per_record = 2;
9881 coal->budget = 64; /* NAPI budget */
9882
9883 coal = &bp->tx_coal;
9884 coal->coal_ticks = 28;
9885 coal->coal_bufs = 30;
9886 coal->coal_ticks_irq = 2;
9887 coal->coal_bufs_irq = 2;
9888 coal->bufs_per_record = 1;
9889
9890 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
9891}
9892
c0c050c5
MC
9893static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
9894{
9895 int rc;
9896 struct bnxt *bp = netdev_priv(dev);
9897
9898 SET_NETDEV_DEV(dev, &pdev->dev);
9899
9900 /* enable device (incl. PCI PM wakeup), and bus-mastering */
9901 rc = pci_enable_device(pdev);
9902 if (rc) {
9903 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
9904 goto init_err;
9905 }
9906
9907 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9908 dev_err(&pdev->dev,
9909 "Cannot find PCI device base address, aborting\n");
9910 rc = -ENODEV;
9911 goto init_err_disable;
9912 }
9913
9914 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9915 if (rc) {
9916 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
9917 goto init_err_disable;
9918 }
9919
9920 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
9921 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
9922 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
9923 goto init_err_disable;
9924 }
9925
9926 pci_set_master(pdev);
9927
9928 bp->dev = dev;
9929 bp->pdev = pdev;
9930
9931 bp->bar0 = pci_ioremap_bar(pdev, 0);
9932 if (!bp->bar0) {
9933 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
9934 rc = -ENOMEM;
9935 goto init_err_release;
9936 }
9937
9938 bp->bar1 = pci_ioremap_bar(pdev, 2);
9939 if (!bp->bar1) {
9940 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
9941 rc = -ENOMEM;
9942 goto init_err_release;
9943 }
9944
9945 bp->bar2 = pci_ioremap_bar(pdev, 4);
9946 if (!bp->bar2) {
9947 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
9948 rc = -ENOMEM;
9949 goto init_err_release;
9950 }
9951
6316ea6d
SB
9952 pci_enable_pcie_error_reporting(pdev);
9953
c0c050c5
MC
9954 INIT_WORK(&bp->sp_task, bnxt_sp_task);
9955
9956 spin_lock_init(&bp->ntp_fltr_lock);
697197e5
MC
9957#if BITS_PER_LONG == 32
9958 spin_lock_init(&bp->db_lock);
9959#endif
c0c050c5
MC
9960
9961 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
9962 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
9963
18775aa8 9964 bnxt_init_dflt_coal(bp);
51f30785 9965
e99e88a9 9966 timer_setup(&bp->timer, bnxt_timer, 0);
c0c050c5
MC
9967 bp->current_interval = BNXT_TIMER_INTERVAL;
9968
caefe526 9969 clear_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
9970 return 0;
9971
9972init_err_release:
17086399 9973 bnxt_unmap_bars(bp, pdev);
c0c050c5
MC
9974 pci_release_regions(pdev);
9975
9976init_err_disable:
9977 pci_disable_device(pdev);
9978
9979init_err:
9980 return rc;
9981}
9982
9983/* rtnl_lock held */
9984static int bnxt_change_mac_addr(struct net_device *dev, void *p)
9985{
9986 struct sockaddr *addr = p;
1fc2cfd0
JH
9987 struct bnxt *bp = netdev_priv(dev);
9988 int rc = 0;
c0c050c5
MC
9989
9990 if (!is_valid_ether_addr(addr->sa_data))
9991 return -EADDRNOTAVAIL;
9992
c1a7bdff
MC
9993 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
9994 return 0;
9995
28ea334b 9996 rc = bnxt_approve_mac(bp, addr->sa_data, true);
84c33dd3
MC
9997 if (rc)
9998 return rc;
bdd4347b 9999
c0c050c5 10000 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1fc2cfd0
JH
10001 if (netif_running(dev)) {
10002 bnxt_close_nic(bp, false, false);
10003 rc = bnxt_open_nic(bp, false, false);
10004 }
c0c050c5 10005
1fc2cfd0 10006 return rc;
c0c050c5
MC
10007}
10008
10009/* rtnl_lock held */
10010static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
10011{
10012 struct bnxt *bp = netdev_priv(dev);
10013
c0c050c5
MC
10014 if (netif_running(dev))
10015 bnxt_close_nic(bp, false, false);
10016
10017 dev->mtu = new_mtu;
10018 bnxt_set_ring_params(bp);
10019
10020 if (netif_running(dev))
10021 return bnxt_open_nic(bp, false, false);
10022
10023 return 0;
10024}
10025
c5e3deb8 10026int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
c0c050c5
MC
10027{
10028 struct bnxt *bp = netdev_priv(dev);
3ffb6a39 10029 bool sh = false;
d1e7925e 10030 int rc;
16e5cc64 10031
c0c050c5 10032 if (tc > bp->max_tc) {
b451c8b6 10033 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
c0c050c5
MC
10034 tc, bp->max_tc);
10035 return -EINVAL;
10036 }
10037
10038 if (netdev_get_num_tc(dev) == tc)
10039 return 0;
10040
3ffb6a39
MC
10041 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
10042 sh = true;
10043
98fdbe73
MC
10044 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
10045 sh, tc, bp->tx_nr_rings_xdp);
d1e7925e
MC
10046 if (rc)
10047 return rc;
c0c050c5
MC
10048
10049 /* Needs to close the device and do hw resource re-allocations */
10050 if (netif_running(bp->dev))
10051 bnxt_close_nic(bp, true, false);
10052
10053 if (tc) {
10054 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
10055 netdev_set_num_tc(dev, tc);
10056 } else {
10057 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
10058 netdev_reset_tc(dev);
10059 }
87e9b377 10060 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
3ffb6a39
MC
10061 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
10062 bp->tx_nr_rings + bp->rx_nr_rings;
c0c050c5
MC
10063
10064 if (netif_running(bp->dev))
10065 return bnxt_open_nic(bp, true, false);
10066
10067 return 0;
10068}
10069
9e0fd15d
JP
10070static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
10071 void *cb_priv)
c5e3deb8 10072{
9e0fd15d 10073 struct bnxt *bp = cb_priv;
de4784ca 10074
312324f1
JK
10075 if (!bnxt_tc_flower_enabled(bp) ||
10076 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
38cf0426 10077 return -EOPNOTSUPP;
c5e3deb8 10078
9e0fd15d
JP
10079 switch (type) {
10080 case TC_SETUP_CLSFLOWER:
10081 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
10082 default:
10083 return -EOPNOTSUPP;
10084 }
10085}
10086
955bcb6e
PNA
10087static LIST_HEAD(bnxt_block_cb_list);
10088
2ae7408f
SP
10089static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
10090 void *type_data)
10091{
4e95bc26
PNA
10092 struct bnxt *bp = netdev_priv(dev);
10093
2ae7408f 10094 switch (type) {
9e0fd15d 10095 case TC_SETUP_BLOCK:
955bcb6e
PNA
10096 return flow_block_cb_setup_simple(type_data,
10097 &bnxt_block_cb_list,
4e95bc26
PNA
10098 bnxt_setup_tc_block_cb,
10099 bp, bp, true);
575ed7d3 10100 case TC_SETUP_QDISC_MQPRIO: {
2ae7408f
SP
10101 struct tc_mqprio_qopt *mqprio = type_data;
10102
10103 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
56f36acd 10104
2ae7408f
SP
10105 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
10106 }
10107 default:
10108 return -EOPNOTSUPP;
10109 }
c5e3deb8
MC
10110}
10111
c0c050c5
MC
10112#ifdef CONFIG_RFS_ACCEL
10113static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
10114 struct bnxt_ntuple_filter *f2)
10115{
10116 struct flow_keys *keys1 = &f1->fkeys;
10117 struct flow_keys *keys2 = &f2->fkeys;
10118
10119 if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
10120 keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
10121 keys1->ports.ports == keys2->ports.ports &&
10122 keys1->basic.ip_proto == keys2->basic.ip_proto &&
10123 keys1->basic.n_proto == keys2->basic.n_proto &&
61aad724 10124 keys1->control.flags == keys2->control.flags &&
a54c4d74
MC
10125 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
10126 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
c0c050c5
MC
10127 return true;
10128
10129 return false;
10130}
10131
10132static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
10133 u16 rxq_index, u32 flow_id)
10134{
10135 struct bnxt *bp = netdev_priv(dev);
10136 struct bnxt_ntuple_filter *fltr, *new_fltr;
10137 struct flow_keys *fkeys;
10138 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
a54c4d74 10139 int rc = 0, idx, bit_id, l2_idx = 0;
c0c050c5
MC
10140 struct hlist_head *head;
10141
a54c4d74
MC
10142 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
10143 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10144 int off = 0, j;
10145
10146 netif_addr_lock_bh(dev);
10147 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
10148 if (ether_addr_equal(eth->h_dest,
10149 vnic->uc_list + off)) {
10150 l2_idx = j + 1;
10151 break;
10152 }
10153 }
10154 netif_addr_unlock_bh(dev);
10155 if (!l2_idx)
10156 return -EINVAL;
10157 }
c0c050c5
MC
10158 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
10159 if (!new_fltr)
10160 return -ENOMEM;
10161
10162 fkeys = &new_fltr->fkeys;
10163 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
10164 rc = -EPROTONOSUPPORT;
10165 goto err_free;
10166 }
10167
dda0e746
MC
10168 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
10169 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
c0c050c5
MC
10170 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
10171 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
10172 rc = -EPROTONOSUPPORT;
10173 goto err_free;
10174 }
dda0e746
MC
10175 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
10176 bp->hwrm_spec_code < 0x10601) {
10177 rc = -EPROTONOSUPPORT;
10178 goto err_free;
10179 }
61aad724
MC
10180 if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
10181 bp->hwrm_spec_code < 0x10601) {
10182 rc = -EPROTONOSUPPORT;
10183 goto err_free;
10184 }
c0c050c5 10185
a54c4d74 10186 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
c0c050c5
MC
10187 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
10188
10189 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
10190 head = &bp->ntp_fltr_hash_tbl[idx];
10191 rcu_read_lock();
10192 hlist_for_each_entry_rcu(fltr, head, hash) {
10193 if (bnxt_fltr_match(fltr, new_fltr)) {
10194 rcu_read_unlock();
10195 rc = 0;
10196 goto err_free;
10197 }
10198 }
10199 rcu_read_unlock();
10200
10201 spin_lock_bh(&bp->ntp_fltr_lock);
84e86b98
MC
10202 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
10203 BNXT_NTP_FLTR_MAX_FLTR, 0);
10204 if (bit_id < 0) {
c0c050c5
MC
10205 spin_unlock_bh(&bp->ntp_fltr_lock);
10206 rc = -ENOMEM;
10207 goto err_free;
10208 }
10209
84e86b98 10210 new_fltr->sw_id = (u16)bit_id;
c0c050c5 10211 new_fltr->flow_id = flow_id;
a54c4d74 10212 new_fltr->l2_fltr_idx = l2_idx;
c0c050c5
MC
10213 new_fltr->rxq = rxq_index;
10214 hlist_add_head_rcu(&new_fltr->hash, head);
10215 bp->ntp_fltr_count++;
10216 spin_unlock_bh(&bp->ntp_fltr_lock);
10217
10218 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
c213eae8 10219 bnxt_queue_sp_work(bp);
c0c050c5
MC
10220
10221 return new_fltr->sw_id;
10222
10223err_free:
10224 kfree(new_fltr);
10225 return rc;
10226}
10227
10228static void bnxt_cfg_ntp_filters(struct bnxt *bp)
10229{
10230 int i;
10231
10232 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
10233 struct hlist_head *head;
10234 struct hlist_node *tmp;
10235 struct bnxt_ntuple_filter *fltr;
10236 int rc;
10237
10238 head = &bp->ntp_fltr_hash_tbl[i];
10239 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
10240 bool del = false;
10241
10242 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
10243 if (rps_may_expire_flow(bp->dev, fltr->rxq,
10244 fltr->flow_id,
10245 fltr->sw_id)) {
10246 bnxt_hwrm_cfa_ntuple_filter_free(bp,
10247 fltr);
10248 del = true;
10249 }
10250 } else {
10251 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
10252 fltr);
10253 if (rc)
10254 del = true;
10255 else
10256 set_bit(BNXT_FLTR_VALID, &fltr->state);
10257 }
10258
10259 if (del) {
10260 spin_lock_bh(&bp->ntp_fltr_lock);
10261 hlist_del_rcu(&fltr->hash);
10262 bp->ntp_fltr_count--;
10263 spin_unlock_bh(&bp->ntp_fltr_lock);
10264 synchronize_rcu();
10265 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
10266 kfree(fltr);
10267 }
10268 }
10269 }
19241368
JH
10270 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
10271 netdev_info(bp->dev, "Receive PF driver unload event!");
c0c050c5
MC
10272}
10273
10274#else
10275
10276static void bnxt_cfg_ntp_filters(struct bnxt *bp)
10277{
10278}
10279
10280#endif /* CONFIG_RFS_ACCEL */
10281
ad51b8e9
AD
10282static void bnxt_udp_tunnel_add(struct net_device *dev,
10283 struct udp_tunnel_info *ti)
c0c050c5
MC
10284{
10285 struct bnxt *bp = netdev_priv(dev);
10286
ad51b8e9 10287 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
c0c050c5
MC
10288 return;
10289
ad51b8e9 10290 if (!netif_running(dev))
c0c050c5
MC
10291 return;
10292
ad51b8e9
AD
10293 switch (ti->type) {
10294 case UDP_TUNNEL_TYPE_VXLAN:
10295 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
10296 return;
c0c050c5 10297
ad51b8e9
AD
10298 bp->vxlan_port_cnt++;
10299 if (bp->vxlan_port_cnt == 1) {
10300 bp->vxlan_port = ti->port;
10301 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
c213eae8 10302 bnxt_queue_sp_work(bp);
ad51b8e9
AD
10303 }
10304 break;
7cdd5fc3
AD
10305 case UDP_TUNNEL_TYPE_GENEVE:
10306 if (bp->nge_port_cnt && bp->nge_port != ti->port)
10307 return;
10308
10309 bp->nge_port_cnt++;
10310 if (bp->nge_port_cnt == 1) {
10311 bp->nge_port = ti->port;
10312 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
10313 }
10314 break;
ad51b8e9
AD
10315 default:
10316 return;
c0c050c5 10317 }
ad51b8e9 10318
c213eae8 10319 bnxt_queue_sp_work(bp);
c0c050c5
MC
10320}
10321
ad51b8e9
AD
10322static void bnxt_udp_tunnel_del(struct net_device *dev,
10323 struct udp_tunnel_info *ti)
c0c050c5
MC
10324{
10325 struct bnxt *bp = netdev_priv(dev);
10326
ad51b8e9 10327 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
c0c050c5
MC
10328 return;
10329
ad51b8e9 10330 if (!netif_running(dev))
c0c050c5
MC
10331 return;
10332
ad51b8e9
AD
10333 switch (ti->type) {
10334 case UDP_TUNNEL_TYPE_VXLAN:
10335 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
10336 return;
c0c050c5
MC
10337 bp->vxlan_port_cnt--;
10338
ad51b8e9
AD
10339 if (bp->vxlan_port_cnt != 0)
10340 return;
10341
10342 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
10343 break;
7cdd5fc3
AD
10344 case UDP_TUNNEL_TYPE_GENEVE:
10345 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
10346 return;
10347 bp->nge_port_cnt--;
10348
10349 if (bp->nge_port_cnt != 0)
10350 return;
10351
10352 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
10353 break;
ad51b8e9
AD
10354 default:
10355 return;
c0c050c5 10356 }
ad51b8e9 10357
c213eae8 10358 bnxt_queue_sp_work(bp);
c0c050c5
MC
10359}
10360
39d8ba2e
MC
10361static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
10362 struct net_device *dev, u32 filter_mask,
10363 int nlflags)
10364{
10365 struct bnxt *bp = netdev_priv(dev);
10366
10367 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
10368 nlflags, filter_mask, NULL);
10369}
10370
10371static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
2fd527b7 10372 u16 flags, struct netlink_ext_ack *extack)
39d8ba2e
MC
10373{
10374 struct bnxt *bp = netdev_priv(dev);
10375 struct nlattr *attr, *br_spec;
10376 int rem, rc = 0;
10377
10378 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
10379 return -EOPNOTSUPP;
10380
10381 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
10382 if (!br_spec)
10383 return -EINVAL;
10384
10385 nla_for_each_nested(attr, br_spec, rem) {
10386 u16 mode;
10387
10388 if (nla_type(attr) != IFLA_BRIDGE_MODE)
10389 continue;
10390
10391 if (nla_len(attr) < sizeof(mode))
10392 return -EINVAL;
10393
10394 mode = nla_get_u16(attr);
10395 if (mode == bp->br_mode)
10396 break;
10397
10398 rc = bnxt_hwrm_set_br_mode(bp, mode);
10399 if (!rc)
10400 bp->br_mode = mode;
10401 break;
10402 }
10403 return rc;
10404}
10405
52d5254a
FF
10406int bnxt_get_port_parent_id(struct net_device *dev,
10407 struct netdev_phys_item_id *ppid)
c124a62f 10408{
52d5254a
FF
10409 struct bnxt *bp = netdev_priv(dev);
10410
c124a62f
SP
10411 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
10412 return -EOPNOTSUPP;
10413
10414 /* The PF and it's VF-reps only support the switchdev framework */
10415 if (!BNXT_PF(bp))
10416 return -EOPNOTSUPP;
10417
52d5254a
FF
10418 ppid->id_len = sizeof(bp->switch_id);
10419 memcpy(ppid->id, bp->switch_id, ppid->id_len);
c124a62f 10420
52d5254a 10421 return 0;
c124a62f
SP
10422}
10423
c9c49a65
JP
10424static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
10425{
10426 struct bnxt *bp = netdev_priv(dev);
10427
10428 return &bp->dl_port;
10429}
10430
c0c050c5
MC
10431static const struct net_device_ops bnxt_netdev_ops = {
10432 .ndo_open = bnxt_open,
10433 .ndo_start_xmit = bnxt_start_xmit,
10434 .ndo_stop = bnxt_close,
10435 .ndo_get_stats64 = bnxt_get_stats64,
10436 .ndo_set_rx_mode = bnxt_set_rx_mode,
10437 .ndo_do_ioctl = bnxt_ioctl,
10438 .ndo_validate_addr = eth_validate_addr,
10439 .ndo_set_mac_address = bnxt_change_mac_addr,
10440 .ndo_change_mtu = bnxt_change_mtu,
10441 .ndo_fix_features = bnxt_fix_features,
10442 .ndo_set_features = bnxt_set_features,
10443 .ndo_tx_timeout = bnxt_tx_timeout,
10444#ifdef CONFIG_BNXT_SRIOV
10445 .ndo_get_vf_config = bnxt_get_vf_config,
10446 .ndo_set_vf_mac = bnxt_set_vf_mac,
10447 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
10448 .ndo_set_vf_rate = bnxt_set_vf_bw,
10449 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
10450 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
746df139 10451 .ndo_set_vf_trust = bnxt_set_vf_trust,
c0c050c5
MC
10452#endif
10453 .ndo_setup_tc = bnxt_setup_tc,
10454#ifdef CONFIG_RFS_ACCEL
10455 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
10456#endif
ad51b8e9
AD
10457 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
10458 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
f4e63525 10459 .ndo_bpf = bnxt_xdp,
f18c2b77 10460 .ndo_xdp_xmit = bnxt_xdp_xmit,
39d8ba2e
MC
10461 .ndo_bridge_getlink = bnxt_bridge_getlink,
10462 .ndo_bridge_setlink = bnxt_bridge_setlink,
c9c49a65 10463 .ndo_get_devlink_port = bnxt_get_devlink_port,
c0c050c5
MC
10464};
10465
10466static void bnxt_remove_one(struct pci_dev *pdev)
10467{
10468 struct net_device *dev = pci_get_drvdata(pdev);
10469 struct bnxt *bp = netdev_priv(dev);
10470
4ab0c6a8 10471 if (BNXT_PF(bp)) {
c0c050c5 10472 bnxt_sriov_disable(bp);
4ab0c6a8
SP
10473 bnxt_dl_unregister(bp);
10474 }
c0c050c5 10475
6316ea6d 10476 pci_disable_pcie_error_reporting(pdev);
c0c050c5 10477 unregister_netdev(dev);
2ae7408f 10478 bnxt_shutdown_tc(bp);
c213eae8 10479 bnxt_cancel_sp_work(bp);
c0c050c5
MC
10480 bp->sp_event = 0;
10481
7809592d 10482 bnxt_clear_int_mode(bp);
be58a0da 10483 bnxt_hwrm_func_drv_unrgtr(bp);
c0c050c5 10484 bnxt_free_hwrm_resources(bp);
e605db80 10485 bnxt_free_hwrm_short_cmd_req(bp);
eb513658 10486 bnxt_ethtool_free(bp);
7df4ae9f 10487 bnxt_dcb_free(bp);
a588e458
MC
10488 kfree(bp->edev);
10489 bp->edev = NULL;
c20dc142 10490 bnxt_cleanup_pci(bp);
98f04cf0
MC
10491 bnxt_free_ctx_mem(bp);
10492 kfree(bp->ctx);
10493 bp->ctx = NULL;
fd3ab1c7 10494 bnxt_free_port_stats(bp);
c0c050c5 10495 free_netdev(dev);
c0c050c5
MC
10496}
10497
10498static int bnxt_probe_phy(struct bnxt *bp)
10499{
10500 int rc = 0;
10501 struct bnxt_link_info *link_info = &bp->link_info;
c0c050c5 10502
170ce013
MC
10503 rc = bnxt_hwrm_phy_qcaps(bp);
10504 if (rc) {
10505 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
10506 rc);
10507 return rc;
10508 }
e2dc9b6e 10509 mutex_init(&bp->link_lock);
170ce013 10510
c0c050c5
MC
10511 rc = bnxt_update_link(bp, false);
10512 if (rc) {
10513 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
10514 rc);
10515 return rc;
10516 }
10517
93ed8117
MC
10518 /* Older firmware does not have supported_auto_speeds, so assume
10519 * that all supported speeds can be autonegotiated.
10520 */
10521 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
10522 link_info->support_auto_speeds = link_info->support_speeds;
10523
c0c050c5 10524 /*initialize the ethool setting copy with NVM settings */
0d8abf02 10525 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
c9ee9516
MC
10526 link_info->autoneg = BNXT_AUTONEG_SPEED;
10527 if (bp->hwrm_spec_code >= 0x10201) {
10528 if (link_info->auto_pause_setting &
10529 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
10530 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10531 } else {
10532 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10533 }
0d8abf02 10534 link_info->advertising = link_info->auto_link_speeds;
0d8abf02
MC
10535 } else {
10536 link_info->req_link_speed = link_info->force_link_speed;
10537 link_info->req_duplex = link_info->duplex_setting;
c0c050c5 10538 }
c9ee9516
MC
10539 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
10540 link_info->req_flow_ctrl =
10541 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
10542 else
10543 link_info->req_flow_ctrl = link_info->force_pause_setting;
c0c050c5
MC
10544 return rc;
10545}
10546
10547static int bnxt_get_max_irq(struct pci_dev *pdev)
10548{
10549 u16 ctrl;
10550
10551 if (!pdev->msix_cap)
10552 return 1;
10553
10554 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
10555 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
10556}
10557
6e6c5a57
MC
10558static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
10559 int *max_cp)
c0c050c5 10560{
6a4f2947 10561 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
e30fbc33 10562 int max_ring_grps = 0, max_irq;
c0c050c5 10563
6a4f2947
MC
10564 *max_tx = hw_resc->max_tx_rings;
10565 *max_rx = hw_resc->max_rx_rings;
e30fbc33
MC
10566 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
10567 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
10568 bnxt_get_ulp_msix_num(bp),
c027c6b4 10569 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
e30fbc33
MC
10570 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
10571 *max_cp = min_t(int, *max_cp, max_irq);
6a4f2947 10572 max_ring_grps = hw_resc->max_hw_ring_grps;
76595193
PS
10573 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
10574 *max_cp -= 1;
10575 *max_rx -= 2;
10576 }
c0c050c5
MC
10577 if (bp->flags & BNXT_FLAG_AGG_RINGS)
10578 *max_rx >>= 1;
e30fbc33
MC
10579 if (bp->flags & BNXT_FLAG_CHIP_P5) {
10580 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
10581 /* On P5 chips, max_cp output param should be available NQs */
10582 *max_cp = max_irq;
10583 }
b72d4a68 10584 *max_rx = min_t(int, *max_rx, max_ring_grps);
6e6c5a57
MC
10585}
10586
10587int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
10588{
10589 int rx, tx, cp;
10590
10591 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
78f058a4
MC
10592 *max_rx = rx;
10593 *max_tx = tx;
6e6c5a57
MC
10594 if (!rx || !tx || !cp)
10595 return -ENOMEM;
10596
6e6c5a57
MC
10597 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
10598}
10599
e4060d30
MC
10600static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
10601 bool shared)
10602{
10603 int rc;
10604
10605 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
bdbd1eb5
MC
10606 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
10607 /* Not enough rings, try disabling agg rings. */
10608 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
10609 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
07f4fde5
MC
10610 if (rc) {
10611 /* set BNXT_FLAG_AGG_RINGS back for consistency */
10612 bp->flags |= BNXT_FLAG_AGG_RINGS;
bdbd1eb5 10613 return rc;
07f4fde5 10614 }
bdbd1eb5 10615 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
1054aee8
MC
10616 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10617 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
bdbd1eb5
MC
10618 bnxt_set_ring_params(bp);
10619 }
e4060d30
MC
10620
10621 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
10622 int max_cp, max_stat, max_irq;
10623
10624 /* Reserve minimum resources for RoCE */
10625 max_cp = bnxt_get_max_func_cp_rings(bp);
10626 max_stat = bnxt_get_max_func_stat_ctxs(bp);
10627 max_irq = bnxt_get_max_func_irqs(bp);
10628 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
10629 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
10630 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
10631 return 0;
10632
10633 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
10634 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
10635 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
10636 max_cp = min_t(int, max_cp, max_irq);
10637 max_cp = min_t(int, max_cp, max_stat);
10638 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
10639 if (rc)
10640 rc = 0;
10641 }
10642 return rc;
10643}
10644
58ea801a
MC
10645/* In initial default shared ring setting, each shared ring must have a
10646 * RX/TX ring pair.
10647 */
10648static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
10649{
10650 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
10651 bp->rx_nr_rings = bp->cp_nr_rings;
10652 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
10653 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
10654}
10655
702c221c 10656static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
6e6c5a57
MC
10657{
10658 int dflt_rings, max_rx_rings, max_tx_rings, rc;
6e6c5a57 10659
2773dfb2
MC
10660 if (!bnxt_can_reserve_rings(bp))
10661 return 0;
10662
6e6c5a57
MC
10663 if (sh)
10664 bp->flags |= BNXT_FLAG_SHARED_RINGS;
d629522e 10665 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
1d3ef13d
MC
10666 /* Reduce default rings on multi-port cards so that total default
10667 * rings do not exceed CPU count.
10668 */
10669 if (bp->port_count > 1) {
10670 int max_rings =
10671 max_t(int, num_online_cpus() / bp->port_count, 1);
10672
10673 dflt_rings = min_t(int, dflt_rings, max_rings);
10674 }
e4060d30 10675 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
6e6c5a57
MC
10676 if (rc)
10677 return rc;
10678 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
10679 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
58ea801a
MC
10680 if (sh)
10681 bnxt_trim_dflt_sh_rings(bp);
10682 else
10683 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
10684 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
391be5c2 10685
674f50a5 10686 rc = __bnxt_reserve_rings(bp);
391be5c2
MC
10687 if (rc)
10688 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
58ea801a
MC
10689 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10690 if (sh)
10691 bnxt_trim_dflt_sh_rings(bp);
391be5c2 10692
674f50a5
MC
10693 /* Rings may have been trimmed, re-reserve the trimmed rings. */
10694 if (bnxt_need_reserve_rings(bp)) {
10695 rc = __bnxt_reserve_rings(bp);
10696 if (rc)
10697 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
10698 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10699 }
76595193
PS
10700 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10701 bp->rx_nr_rings++;
10702 bp->cp_nr_rings++;
10703 }
6e6c5a57 10704 return rc;
c0c050c5
MC
10705}
10706
47558acd
MC
10707static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
10708{
10709 int rc;
10710
10711 if (bp->tx_nr_rings)
10712 return 0;
10713
6b95c3e9
MC
10714 bnxt_ulp_irq_stop(bp);
10715 bnxt_clear_int_mode(bp);
47558acd
MC
10716 rc = bnxt_set_dflt_rings(bp, true);
10717 if (rc) {
10718 netdev_err(bp->dev, "Not enough rings available.\n");
6b95c3e9 10719 goto init_dflt_ring_err;
47558acd
MC
10720 }
10721 rc = bnxt_init_int_mode(bp);
10722 if (rc)
6b95c3e9
MC
10723 goto init_dflt_ring_err;
10724
47558acd
MC
10725 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10726 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
10727 bp->flags |= BNXT_FLAG_RFS;
10728 bp->dev->features |= NETIF_F_NTUPLE;
10729 }
6b95c3e9
MC
10730init_dflt_ring_err:
10731 bnxt_ulp_irq_restart(bp, rc);
10732 return rc;
47558acd
MC
10733}
10734
80fcaf46 10735int bnxt_restore_pf_fw_resources(struct bnxt *bp)
7b08f661 10736{
80fcaf46
MC
10737 int rc;
10738
7b08f661
MC
10739 ASSERT_RTNL();
10740 bnxt_hwrm_func_qcaps(bp);
1a037782
VD
10741
10742 if (netif_running(bp->dev))
10743 __bnxt_close_nic(bp, true, false);
10744
ec86f14e 10745 bnxt_ulp_irq_stop(bp);
80fcaf46
MC
10746 bnxt_clear_int_mode(bp);
10747 rc = bnxt_init_int_mode(bp);
ec86f14e 10748 bnxt_ulp_irq_restart(bp, rc);
1a037782
VD
10749
10750 if (netif_running(bp->dev)) {
10751 if (rc)
10752 dev_close(bp->dev);
10753 else
10754 rc = bnxt_open_nic(bp, true, false);
10755 }
10756
80fcaf46 10757 return rc;
7b08f661
MC
10758}
10759
a22a6ac2
MC
10760static int bnxt_init_mac_addr(struct bnxt *bp)
10761{
10762 int rc = 0;
10763
10764 if (BNXT_PF(bp)) {
10765 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
10766 } else {
10767#ifdef CONFIG_BNXT_SRIOV
10768 struct bnxt_vf_info *vf = &bp->vf;
28ea334b 10769 bool strict_approval = true;
a22a6ac2
MC
10770
10771 if (is_valid_ether_addr(vf->mac_addr)) {
91cdda40 10772 /* overwrite netdev dev_addr with admin VF MAC */
a22a6ac2 10773 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
28ea334b
MC
10774 /* Older PF driver or firmware may not approve this
10775 * correctly.
10776 */
10777 strict_approval = false;
a22a6ac2
MC
10778 } else {
10779 eth_hw_addr_random(bp->dev);
a22a6ac2 10780 }
28ea334b 10781 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
a22a6ac2
MC
10782#endif
10783 }
10784 return rc;
10785}
10786
03213a99
JP
10787static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
10788{
10789 struct pci_dev *pdev = bp->pdev;
10790 int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
10791 u32 dw;
10792
10793 if (!pos) {
10794 netdev_info(bp->dev, "Unable do read adapter's DSN");
10795 return -EOPNOTSUPP;
10796 }
10797
10798 /* DSN (two dw) is at an offset of 4 from the cap pos */
10799 pos += 4;
10800 pci_read_config_dword(pdev, pos, &dw);
10801 put_unaligned_le32(dw, &dsn[0]);
10802 pci_read_config_dword(pdev, pos + 4, &dw);
10803 put_unaligned_le32(dw, &dsn[4]);
10804 return 0;
10805}
10806
c0c050c5
MC
10807static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
10808{
10809 static int version_printed;
10810 struct net_device *dev;
10811 struct bnxt *bp;
6e6c5a57 10812 int rc, max_irqs;
c0c050c5 10813
4e00338a 10814 if (pci_is_bridge(pdev))
fa853dda
PS
10815 return -ENODEV;
10816
c0c050c5
MC
10817 if (version_printed++ == 0)
10818 pr_info("%s", version);
10819
10820 max_irqs = bnxt_get_max_irq(pdev);
10821 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
10822 if (!dev)
10823 return -ENOMEM;
10824
10825 bp = netdev_priv(dev);
9c1fabdf 10826 bnxt_set_max_func_irqs(bp, max_irqs);
c0c050c5
MC
10827
10828 if (bnxt_vf_pciid(ent->driver_data))
10829 bp->flags |= BNXT_FLAG_VF;
10830
2bcfa6f6 10831 if (pdev->msix_cap)
c0c050c5 10832 bp->flags |= BNXT_FLAG_MSIX_CAP;
c0c050c5
MC
10833
10834 rc = bnxt_init_board(pdev, dev);
10835 if (rc < 0)
10836 goto init_err_free;
10837
10838 dev->netdev_ops = &bnxt_netdev_ops;
10839 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
10840 dev->ethtool_ops = &bnxt_ethtool_ops;
c0c050c5
MC
10841 pci_set_drvdata(pdev, dev);
10842
3e8060fa
PS
10843 rc = bnxt_alloc_hwrm_resources(bp);
10844 if (rc)
17086399 10845 goto init_err_pci_clean;
3e8060fa
PS
10846
10847 mutex_init(&bp->hwrm_cmd_lock);
10848 rc = bnxt_hwrm_ver_get(bp);
10849 if (rc)
17086399 10850 goto init_err_pci_clean;
3e8060fa 10851
760b6d33
VD
10852 if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
10853 rc = bnxt_alloc_kong_hwrm_resources(bp);
10854 if (rc)
10855 bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
10856 }
10857
1dfddc41
MC
10858 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
10859 bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
e605db80
DK
10860 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
10861 if (rc)
10862 goto init_err_pci_clean;
10863 }
10864
e38287b7
MC
10865 if (BNXT_CHIP_P5(bp))
10866 bp->flags |= BNXT_FLAG_CHIP_P5;
10867
3c2217a6
MC
10868 rc = bnxt_hwrm_func_reset(bp);
10869 if (rc)
10870 goto init_err_pci_clean;
10871
5ac67d8b
RS
10872 bnxt_hwrm_fw_set_time(bp);
10873
c0c050c5
MC
10874 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
10875 NETIF_F_TSO | NETIF_F_TSO6 |
10876 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
7e13318d 10877 NETIF_F_GSO_IPXIP4 |
152971ee
AD
10878 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
10879 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
3e8060fa
PS
10880 NETIF_F_RXCSUM | NETIF_F_GRO;
10881
e38287b7 10882 if (BNXT_SUPPORTS_TPA(bp))
3e8060fa 10883 dev->hw_features |= NETIF_F_LRO;
c0c050c5 10884
c0c050c5
MC
10885 dev->hw_enc_features =
10886 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
10887 NETIF_F_TSO | NETIF_F_TSO6 |
10888 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
152971ee 10889 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
7e13318d 10890 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
152971ee
AD
10891 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
10892 NETIF_F_GSO_GRE_CSUM;
c0c050c5
MC
10893 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
10894 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
10895 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
e38287b7 10896 if (BNXT_SUPPORTS_TPA(bp))
1054aee8 10897 dev->hw_features |= NETIF_F_GRO_HW;
c0c050c5 10898 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
1054aee8
MC
10899 if (dev->features & NETIF_F_GRO_HW)
10900 dev->features &= ~NETIF_F_LRO;
c0c050c5
MC
10901 dev->priv_flags |= IFF_UNICAST_FLT;
10902
10903#ifdef CONFIG_BNXT_SRIOV
10904 init_waitqueue_head(&bp->sriov_cfg_wait);
4ab0c6a8 10905 mutex_init(&bp->sriov_lock);
c0c050c5 10906#endif
e38287b7
MC
10907 if (BNXT_SUPPORTS_TPA(bp)) {
10908 bp->gro_func = bnxt_gro_func_5730x;
67912c36 10909 if (BNXT_CHIP_P4(bp))
e38287b7 10910 bp->gro_func = bnxt_gro_func_5731x;
67912c36
MC
10911 else if (BNXT_CHIP_P5(bp))
10912 bp->gro_func = bnxt_gro_func_5750x;
e38287b7
MC
10913 }
10914 if (!BNXT_CHIP_P4_PLUS(bp))
434c975a 10915 bp->flags |= BNXT_FLAG_DOUBLE_DB;
309369c9 10916
c0c050c5
MC
10917 rc = bnxt_hwrm_func_drv_rgtr(bp);
10918 if (rc)
17086399 10919 goto init_err_pci_clean;
c0c050c5 10920
a1653b13
MC
10921 rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
10922 if (rc)
17086399 10923 goto init_err_pci_clean;
a1653b13 10924
a588e458
MC
10925 bp->ulp_probe = bnxt_ulp_probe;
10926
98f04cf0
MC
10927 rc = bnxt_hwrm_queue_qportcfg(bp);
10928 if (rc) {
10929 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
10930 rc);
10931 rc = -1;
10932 goto init_err_pci_clean;
10933 }
c0c050c5
MC
10934 /* Get the MAX capabilities for this function */
10935 rc = bnxt_hwrm_func_qcaps(bp);
10936 if (rc) {
10937 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
10938 rc);
10939 rc = -1;
17086399 10940 goto init_err_pci_clean;
c0c050c5 10941 }
e969ae5b
MC
10942
10943 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
10944 if (rc)
10945 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
10946 rc);
10947
a22a6ac2
MC
10948 rc = bnxt_init_mac_addr(bp);
10949 if (rc) {
10950 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
10951 rc = -EADDRNOTAVAIL;
10952 goto init_err_pci_clean;
10953 }
c0c050c5 10954
2e9217d1
VV
10955 if (BNXT_PF(bp)) {
10956 /* Read the adapter's DSN to use as the eswitch switch_id */
10957 rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
10958 if (rc)
10959 goto init_err_pci_clean;
10960 }
567b2abe 10961 bnxt_hwrm_func_qcfg(bp);
6ba99038 10962 bnxt_hwrm_vnic_qcaps(bp);
5ad2cbee 10963 bnxt_hwrm_port_led_qcaps(bp);
eb513658 10964 bnxt_ethtool_init(bp);
87fe6032 10965 bnxt_dcb_init(bp);
567b2abe 10966
7eb9bb3a
MC
10967 /* MTU range: 60 - FW defined max */
10968 dev->min_mtu = ETH_ZLEN;
10969 dev->max_mtu = bp->max_mtu;
10970
d5430d31
MC
10971 rc = bnxt_probe_phy(bp);
10972 if (rc)
10973 goto init_err_pci_clean;
10974
c61fb99c 10975 bnxt_set_rx_skb_mode(bp, false);
c0c050c5
MC
10976 bnxt_set_tpa_flags(bp);
10977 bnxt_set_ring_params(bp);
702c221c 10978 rc = bnxt_set_dflt_rings(bp, true);
bdbd1eb5
MC
10979 if (rc) {
10980 netdev_err(bp->dev, "Not enough rings available.\n");
10981 rc = -ENOMEM;
17086399 10982 goto init_err_pci_clean;
bdbd1eb5 10983 }
c0c050c5 10984
87da7f79
MC
10985 /* Default RSS hash cfg. */
10986 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
10987 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
10988 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
10989 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
e38287b7 10990 if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
87da7f79
MC
10991 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
10992 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
10993 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
10994 }
10995
8079e8f1 10996 if (bnxt_rfs_supported(bp)) {
2bcfa6f6
MC
10997 dev->hw_features |= NETIF_F_NTUPLE;
10998 if (bnxt_rfs_capable(bp)) {
10999 bp->flags |= BNXT_FLAG_RFS;
11000 dev->features |= NETIF_F_NTUPLE;
11001 }
11002 }
11003
c0c050c5
MC
11004 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
11005 bp->flags |= BNXT_FLAG_STRIP_VLAN;
11006
7809592d 11007 rc = bnxt_init_int_mode(bp);
c0c050c5 11008 if (rc)
17086399 11009 goto init_err_pci_clean;
c0c050c5 11010
832aed16
MC
11011 /* No TC has been set yet and rings may have been trimmed due to
11012 * limited MSIX, so we re-initialize the TX rings per TC.
11013 */
11014 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11015
c1ef146a 11016 bnxt_get_wol_settings(bp);
d196ece7
MC
11017 if (bp->flags & BNXT_FLAG_WOL_CAP)
11018 device_set_wakeup_enable(&pdev->dev, bp->wol);
11019 else
11020 device_set_wakeup_capable(&pdev->dev, false);
c1ef146a 11021
c3480a60
MC
11022 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
11023
74706afa
MC
11024 bnxt_hwrm_coal_params_qcaps(bp);
11025
c213eae8
MC
11026 if (BNXT_PF(bp)) {
11027 if (!bnxt_pf_wq) {
11028 bnxt_pf_wq =
11029 create_singlethread_workqueue("bnxt_pf_wq");
11030 if (!bnxt_pf_wq) {
11031 dev_err(&pdev->dev, "Unable to create workqueue.\n");
11032 goto init_err_pci_clean;
11033 }
11034 }
2ae7408f 11035 bnxt_init_tc(bp);
c213eae8 11036 }
2ae7408f 11037
7809592d
MC
11038 rc = register_netdev(dev);
11039 if (rc)
2ae7408f 11040 goto init_err_cleanup_tc;
7809592d 11041
4ab0c6a8
SP
11042 if (BNXT_PF(bp))
11043 bnxt_dl_register(bp);
11044
c0c050c5
MC
11045 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
11046 board_info[ent->driver_data].name,
11047 (long)pci_resource_start(pdev, 0), dev->dev_addr);
af125b75 11048 pcie_print_link_status(pdev);
90c4f788 11049
c0c050c5
MC
11050 return 0;
11051
2ae7408f
SP
11052init_err_cleanup_tc:
11053 bnxt_shutdown_tc(bp);
7809592d
MC
11054 bnxt_clear_int_mode(bp);
11055
17086399 11056init_err_pci_clean:
f9099d61 11057 bnxt_free_hwrm_short_cmd_req(bp);
a2bf74f4 11058 bnxt_free_hwrm_resources(bp);
98f04cf0
MC
11059 bnxt_free_ctx_mem(bp);
11060 kfree(bp->ctx);
11061 bp->ctx = NULL;
17086399 11062 bnxt_cleanup_pci(bp);
c0c050c5
MC
11063
11064init_err_free:
11065 free_netdev(dev);
11066 return rc;
11067}
11068
d196ece7
MC
11069static void bnxt_shutdown(struct pci_dev *pdev)
11070{
11071 struct net_device *dev = pci_get_drvdata(pdev);
11072 struct bnxt *bp;
11073
11074 if (!dev)
11075 return;
11076
11077 rtnl_lock();
11078 bp = netdev_priv(dev);
11079 if (!bp)
11080 goto shutdown_exit;
11081
11082 if (netif_running(dev))
11083 dev_close(dev);
11084
a7f3f939
RJ
11085 bnxt_ulp_shutdown(bp);
11086
d196ece7
MC
11087 if (system_state == SYSTEM_POWER_OFF) {
11088 bnxt_clear_int_mode(bp);
c20dc142 11089 pci_disable_device(pdev);
d196ece7
MC
11090 pci_wake_from_d3(pdev, bp->wol);
11091 pci_set_power_state(pdev, PCI_D3hot);
11092 }
11093
11094shutdown_exit:
11095 rtnl_unlock();
11096}
11097
f65a2044
MC
11098#ifdef CONFIG_PM_SLEEP
11099static int bnxt_suspend(struct device *device)
11100{
f521eaa9 11101 struct net_device *dev = dev_get_drvdata(device);
f65a2044
MC
11102 struct bnxt *bp = netdev_priv(dev);
11103 int rc = 0;
11104
11105 rtnl_lock();
11106 if (netif_running(dev)) {
11107 netif_device_detach(dev);
11108 rc = bnxt_close(dev);
11109 }
11110 bnxt_hwrm_func_drv_unrgtr(bp);
11111 rtnl_unlock();
11112 return rc;
11113}
11114
11115static int bnxt_resume(struct device *device)
11116{
f521eaa9 11117 struct net_device *dev = dev_get_drvdata(device);
f65a2044
MC
11118 struct bnxt *bp = netdev_priv(dev);
11119 int rc = 0;
11120
11121 rtnl_lock();
11122 if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
11123 rc = -ENODEV;
11124 goto resume_exit;
11125 }
11126 rc = bnxt_hwrm_func_reset(bp);
11127 if (rc) {
11128 rc = -EBUSY;
11129 goto resume_exit;
11130 }
11131 bnxt_get_wol_settings(bp);
11132 if (netif_running(dev)) {
11133 rc = bnxt_open(dev);
11134 if (!rc)
11135 netif_device_attach(dev);
11136 }
11137
11138resume_exit:
11139 rtnl_unlock();
11140 return rc;
11141}
11142
11143static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
11144#define BNXT_PM_OPS (&bnxt_pm_ops)
11145
11146#else
11147
11148#define BNXT_PM_OPS NULL
11149
11150#endif /* CONFIG_PM_SLEEP */
11151
6316ea6d
SB
11152/**
11153 * bnxt_io_error_detected - called when PCI error is detected
11154 * @pdev: Pointer to PCI device
11155 * @state: The current pci connection state
11156 *
11157 * This function is called after a PCI bus error affecting
11158 * this device has been detected.
11159 */
11160static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
11161 pci_channel_state_t state)
11162{
11163 struct net_device *netdev = pci_get_drvdata(pdev);
a588e458 11164 struct bnxt *bp = netdev_priv(netdev);
6316ea6d
SB
11165
11166 netdev_info(netdev, "PCI I/O error detected\n");
11167
11168 rtnl_lock();
11169 netif_device_detach(netdev);
11170
a588e458
MC
11171 bnxt_ulp_stop(bp);
11172
6316ea6d
SB
11173 if (state == pci_channel_io_perm_failure) {
11174 rtnl_unlock();
11175 return PCI_ERS_RESULT_DISCONNECT;
11176 }
11177
11178 if (netif_running(netdev))
11179 bnxt_close(netdev);
11180
11181 pci_disable_device(pdev);
11182 rtnl_unlock();
11183
11184 /* Request a slot slot reset. */
11185 return PCI_ERS_RESULT_NEED_RESET;
11186}
11187
11188/**
11189 * bnxt_io_slot_reset - called after the pci bus has been reset.
11190 * @pdev: Pointer to PCI device
11191 *
11192 * Restart the card from scratch, as if from a cold-boot.
11193 * At this point, the card has exprienced a hard reset,
11194 * followed by fixups by BIOS, and has its config space
11195 * set up identically to what it was at cold boot.
11196 */
11197static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
11198{
11199 struct net_device *netdev = pci_get_drvdata(pdev);
11200 struct bnxt *bp = netdev_priv(netdev);
11201 int err = 0;
11202 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
11203
11204 netdev_info(bp->dev, "PCI Slot Reset\n");
11205
11206 rtnl_lock();
11207
11208 if (pci_enable_device(pdev)) {
11209 dev_err(&pdev->dev,
11210 "Cannot re-enable PCI device after reset.\n");
11211 } else {
11212 pci_set_master(pdev);
11213
aa8ed021
MC
11214 err = bnxt_hwrm_func_reset(bp);
11215 if (!err && netif_running(netdev))
6316ea6d
SB
11216 err = bnxt_open(netdev);
11217
a588e458 11218 if (!err) {
6316ea6d 11219 result = PCI_ERS_RESULT_RECOVERED;
a588e458
MC
11220 bnxt_ulp_start(bp);
11221 }
6316ea6d
SB
11222 }
11223
11224 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
11225 dev_close(netdev);
11226
11227 rtnl_unlock();
11228
6316ea6d
SB
11229 return PCI_ERS_RESULT_RECOVERED;
11230}
11231
11232/**
11233 * bnxt_io_resume - called when traffic can start flowing again.
11234 * @pdev: Pointer to PCI device
11235 *
11236 * This callback is called when the error recovery driver tells
11237 * us that its OK to resume normal operation.
11238 */
11239static void bnxt_io_resume(struct pci_dev *pdev)
11240{
11241 struct net_device *netdev = pci_get_drvdata(pdev);
11242
11243 rtnl_lock();
11244
11245 netif_device_attach(netdev);
11246
11247 rtnl_unlock();
11248}
11249
11250static const struct pci_error_handlers bnxt_err_handler = {
11251 .error_detected = bnxt_io_error_detected,
11252 .slot_reset = bnxt_io_slot_reset,
11253 .resume = bnxt_io_resume
11254};
11255
c0c050c5
MC
11256static struct pci_driver bnxt_pci_driver = {
11257 .name = DRV_MODULE_NAME,
11258 .id_table = bnxt_pci_tbl,
11259 .probe = bnxt_init_one,
11260 .remove = bnxt_remove_one,
d196ece7 11261 .shutdown = bnxt_shutdown,
f65a2044 11262 .driver.pm = BNXT_PM_OPS,
6316ea6d 11263 .err_handler = &bnxt_err_handler,
c0c050c5
MC
11264#if defined(CONFIG_BNXT_SRIOV)
11265 .sriov_configure = bnxt_sriov_configure,
11266#endif
11267};
11268
c213eae8
MC
11269static int __init bnxt_init(void)
11270{
cabfb09d 11271 bnxt_debug_init();
c213eae8
MC
11272 return pci_register_driver(&bnxt_pci_driver);
11273}
11274
11275static void __exit bnxt_exit(void)
11276{
11277 pci_unregister_driver(&bnxt_pci_driver);
11278 if (bnxt_pf_wq)
11279 destroy_workqueue(bnxt_pf_wq);
cabfb09d 11280 bnxt_debug_exit();
c213eae8
MC
11281}
11282
11283module_init(bnxt_init);
11284module_exit(bnxt_exit);