]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/broadcom/bnxt/bnxt.c
bnxt_en: Update firmware interface spec. to 1.10.0.33.
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
CommitLineData
c0c050c5
MC
1/* Broadcom NetXtreme-C/E network driver.
2 *
11f15ed3 3 * Copyright (c) 2014-2016 Broadcom Corporation
894aa69a 4 * Copyright (c) 2016-2018 Broadcom Limited
c0c050c5
MC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12
13#include <linux/stringify.h>
14#include <linux/kernel.h>
15#include <linux/timer.h>
16#include <linux/errno.h>
17#include <linux/ioport.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/interrupt.h>
21#include <linux/pci.h>
22#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/skbuff.h>
25#include <linux/dma-mapping.h>
26#include <linux/bitops.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29#include <linux/delay.h>
30#include <asm/byteorder.h>
31#include <asm/page.h>
32#include <linux/time.h>
33#include <linux/mii.h>
34#include <linux/if.h>
35#include <linux/if_vlan.h>
32e8239c 36#include <linux/if_bridge.h>
5ac67d8b 37#include <linux/rtc.h>
c6d30e83 38#include <linux/bpf.h>
c0c050c5
MC
39#include <net/ip.h>
40#include <net/tcp.h>
41#include <net/udp.h>
42#include <net/checksum.h>
43#include <net/ip6_checksum.h>
ad51b8e9 44#include <net/udp_tunnel.h>
c0c050c5
MC
45#include <linux/workqueue.h>
46#include <linux/prefetch.h>
47#include <linux/cache.h>
48#include <linux/log2.h>
49#include <linux/aer.h>
50#include <linux/bitmap.h>
51#include <linux/cpu_rmap.h>
56f0fd80 52#include <linux/cpumask.h>
2ae7408f 53#include <net/pkt_cls.h>
cde49a42
VV
54#include <linux/hwmon.h>
55#include <linux/hwmon-sysfs.h>
c0c050c5
MC
56
57#include "bnxt_hsi.h"
58#include "bnxt.h"
a588e458 59#include "bnxt_ulp.h"
c0c050c5
MC
60#include "bnxt_sriov.h"
61#include "bnxt_ethtool.h"
7df4ae9f 62#include "bnxt_dcb.h"
c6d30e83 63#include "bnxt_xdp.h"
4ab0c6a8 64#include "bnxt_vfr.h"
2ae7408f 65#include "bnxt_tc.h"
3c467bf3 66#include "bnxt_devlink.h"
cabfb09d 67#include "bnxt_debugfs.h"
c0c050c5
MC
68
69#define BNXT_TX_TIMEOUT (5 * HZ)
70
71static const char version[] =
72 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
73
74MODULE_LICENSE("GPL");
75MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
76MODULE_VERSION(DRV_MODULE_VERSION);
77
78#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
79#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
80#define BNXT_RX_COPY_THRESH 256
81
4419dbe6 82#define BNXT_TX_PUSH_THRESH 164
c0c050c5
MC
83
84enum board_idx {
fbc9a523 85 BCM57301,
c0c050c5
MC
86 BCM57302,
87 BCM57304,
1f681688 88 BCM57417_NPAR,
fa853dda 89 BCM58700,
b24eb6ae
MC
90 BCM57311,
91 BCM57312,
fbc9a523 92 BCM57402,
c0c050c5
MC
93 BCM57404,
94 BCM57406,
1f681688
MC
95 BCM57402_NPAR,
96 BCM57407,
b24eb6ae
MC
97 BCM57412,
98 BCM57414,
99 BCM57416,
100 BCM57417,
1f681688 101 BCM57412_NPAR,
5049e33b 102 BCM57314,
1f681688
MC
103 BCM57417_SFP,
104 BCM57416_SFP,
105 BCM57404_NPAR,
106 BCM57406_NPAR,
107 BCM57407_SFP,
adbc8305 108 BCM57407_NPAR,
1f681688
MC
109 BCM57414_NPAR,
110 BCM57416_NPAR,
32b40798
DK
111 BCM57452,
112 BCM57454,
92abef36 113 BCM5745x_NPAR,
1ab968d2 114 BCM57508,
4a58139b 115 BCM58802,
8ed693b7 116 BCM58804,
4a58139b 117 BCM58808,
adbc8305
MC
118 NETXTREME_E_VF,
119 NETXTREME_C_VF,
618784e3 120 NETXTREME_S_VF,
b16b6891 121 NETXTREME_E_P5_VF,
c0c050c5
MC
122};
123
124/* indexed by enum above */
125static const struct {
126 char *name;
127} board_info[] = {
27573a7d
SB
128 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
129 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
130 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
131 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
132 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
133 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
134 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
135 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
136 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
137 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
138 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
139 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
140 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
141 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
142 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
143 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
144 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
145 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
146 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
147 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
148 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
149 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
150 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
151 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
152 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
153 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
154 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
155 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
92abef36 156 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
1ab968d2 157 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
27573a7d 158 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
8ed693b7 159 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
27573a7d
SB
160 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
161 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
162 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
618784e3 163 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
b16b6891 164 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
c0c050c5
MC
165};
166
167static const struct pci_device_id bnxt_pci_tbl[] = {
92abef36
VV
168 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
169 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
4a58139b 170 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
adbc8305 171 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
fbc9a523 172 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
c0c050c5
MC
173 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
174 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
1f681688 175 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
fa853dda 176 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
b24eb6ae
MC
177 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
178 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
fbc9a523 179 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
c0c050c5
MC
180 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
181 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
1f681688
MC
182 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
183 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
b24eb6ae
MC
184 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
185 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
186 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
187 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
1f681688 188 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
5049e33b 189 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
1f681688
MC
190 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
191 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
192 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
193 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
194 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
adbc8305
MC
195 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
196 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
1f681688 197 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
adbc8305 198 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
1f681688 199 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
adbc8305 200 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
4a58139b 201 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
32b40798 202 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
1ab968d2 203 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
4a58139b 204 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
8ed693b7 205 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
c0c050c5 206#ifdef CONFIG_BNXT_SRIOV
c7ef35eb
DK
207 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
208 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
adbc8305
MC
209 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
210 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
211 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
212 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
213 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
214 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
b16b6891 215 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
618784e3 216 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
c0c050c5
MC
217#endif
218 { 0 }
219};
220
221MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
222
223static const u16 bnxt_vf_req_snif[] = {
224 HWRM_FUNC_CFG,
91cdda40 225 HWRM_FUNC_VF_CFG,
c0c050c5
MC
226 HWRM_PORT_PHY_QCFG,
227 HWRM_CFA_L2_FILTER_ALLOC,
228};
229
25be8623 230static const u16 bnxt_async_events_arr[] = {
87c374de
MC
231 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
232 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
233 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
234 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
235 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
25be8623
MC
236};
237
c213eae8
MC
238static struct workqueue_struct *bnxt_pf_wq;
239
c0c050c5
MC
240static bool bnxt_vf_pciid(enum board_idx idx)
241{
618784e3 242 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
b16b6891 243 idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
c0c050c5
MC
244}
245
246#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
247#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
248#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
249
c0c050c5
MC
250#define BNXT_CP_DB_IRQ_DIS(db) \
251 writel(DB_CP_IRQ_DIS_FLAGS, db)
252
697197e5
MC
253#define BNXT_DB_CQ(db, idx) \
254 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
255
256#define BNXT_DB_NQ_P5(db, idx) \
257 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
258
259#define BNXT_DB_CQ_ARM(db, idx) \
260 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
261
262#define BNXT_DB_NQ_ARM_P5(db, idx) \
263 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
264
265static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
266{
267 if (bp->flags & BNXT_FLAG_CHIP_P5)
268 BNXT_DB_NQ_P5(db, idx);
269 else
270 BNXT_DB_CQ(db, idx);
271}
272
273static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
274{
275 if (bp->flags & BNXT_FLAG_CHIP_P5)
276 BNXT_DB_NQ_ARM_P5(db, idx);
277 else
278 BNXT_DB_CQ_ARM(db, idx);
279}
280
281static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
282{
283 if (bp->flags & BNXT_FLAG_CHIP_P5)
284 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
285 db->doorbell);
286 else
287 BNXT_DB_CQ(db, idx);
288}
289
38413406 290const u16 bnxt_lhint_arr[] = {
c0c050c5
MC
291 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
292 TX_BD_FLAGS_LHINT_512_TO_1023,
293 TX_BD_FLAGS_LHINT_1024_TO_2047,
294 TX_BD_FLAGS_LHINT_1024_TO_2047,
295 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
296 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
297 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
298 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
299 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
300 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
301 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
302 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
303 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
304 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
305 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
306 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
307 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
308 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
309 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
310};
311
ee5c7fb3
SP
312static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
313{
314 struct metadata_dst *md_dst = skb_metadata_dst(skb);
315
316 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
317 return 0;
318
319 return md_dst->u.port_info.port_id;
320}
321
c0c050c5
MC
322static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
323{
324 struct bnxt *bp = netdev_priv(dev);
325 struct tx_bd *txbd;
326 struct tx_bd_ext *txbd1;
327 struct netdev_queue *txq;
328 int i;
329 dma_addr_t mapping;
330 unsigned int length, pad = 0;
331 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
332 u16 prod, last_frag;
333 struct pci_dev *pdev = bp->pdev;
c0c050c5
MC
334 struct bnxt_tx_ring_info *txr;
335 struct bnxt_sw_tx_bd *tx_buf;
336
337 i = skb_get_queue_mapping(skb);
338 if (unlikely(i >= bp->tx_nr_rings)) {
339 dev_kfree_skb_any(skb);
340 return NETDEV_TX_OK;
341 }
342
c0c050c5 343 txq = netdev_get_tx_queue(dev, i);
a960dec9 344 txr = &bp->tx_ring[bp->tx_ring_map[i]];
c0c050c5
MC
345 prod = txr->tx_prod;
346
347 free_size = bnxt_tx_avail(bp, txr);
348 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
349 netif_tx_stop_queue(txq);
350 return NETDEV_TX_BUSY;
351 }
352
353 length = skb->len;
354 len = skb_headlen(skb);
355 last_frag = skb_shinfo(skb)->nr_frags;
356
357 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
358
359 txbd->tx_bd_opaque = prod;
360
361 tx_buf = &txr->tx_buf_ring[prod];
362 tx_buf->skb = skb;
363 tx_buf->nr_frags = last_frag;
364
365 vlan_tag_flags = 0;
ee5c7fb3 366 cfa_action = bnxt_xmit_get_cfa_action(skb);
c0c050c5
MC
367 if (skb_vlan_tag_present(skb)) {
368 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
369 skb_vlan_tag_get(skb);
370 /* Currently supports 8021Q, 8021AD vlan offloads
371 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
372 */
373 if (skb->vlan_proto == htons(ETH_P_8021Q))
374 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
375 }
376
377 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
4419dbe6
MC
378 struct tx_push_buffer *tx_push_buf = txr->tx_push;
379 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
380 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
697197e5 381 void __iomem *db = txr->tx_db.doorbell;
4419dbe6
MC
382 void *pdata = tx_push_buf->data;
383 u64 *end;
384 int j, push_len;
c0c050c5
MC
385
386 /* Set COAL_NOW to be ready quickly for the next push */
387 tx_push->tx_bd_len_flags_type =
388 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
389 TX_BD_TYPE_LONG_TX_BD |
390 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
391 TX_BD_FLAGS_COAL_NOW |
392 TX_BD_FLAGS_PACKET_END |
393 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
394
395 if (skb->ip_summed == CHECKSUM_PARTIAL)
396 tx_push1->tx_bd_hsize_lflags =
397 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
398 else
399 tx_push1->tx_bd_hsize_lflags = 0;
400
401 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
ee5c7fb3
SP
402 tx_push1->tx_bd_cfa_action =
403 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
c0c050c5 404
fbb0fa8b
MC
405 end = pdata + length;
406 end = PTR_ALIGN(end, 8) - 1;
4419dbe6
MC
407 *end = 0;
408
c0c050c5
MC
409 skb_copy_from_linear_data(skb, pdata, len);
410 pdata += len;
411 for (j = 0; j < last_frag; j++) {
412 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
413 void *fptr;
414
415 fptr = skb_frag_address_safe(frag);
416 if (!fptr)
417 goto normal_tx;
418
419 memcpy(pdata, fptr, skb_frag_size(frag));
420 pdata += skb_frag_size(frag);
421 }
422
4419dbe6
MC
423 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
424 txbd->tx_bd_haddr = txr->data_mapping;
c0c050c5
MC
425 prod = NEXT_TX(prod);
426 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
427 memcpy(txbd, tx_push1, sizeof(*txbd));
428 prod = NEXT_TX(prod);
4419dbe6 429 tx_push->doorbell =
c0c050c5
MC
430 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
431 txr->tx_prod = prod;
432
b9a8460a 433 tx_buf->is_push = 1;
c0c050c5 434 netdev_tx_sent_queue(txq, skb->len);
b9a8460a 435 wmb(); /* Sync is_push and byte queue before pushing data */
c0c050c5 436
4419dbe6
MC
437 push_len = (length + sizeof(*tx_push) + 7) / 8;
438 if (push_len > 16) {
697197e5
MC
439 __iowrite64_copy(db, tx_push_buf, 16);
440 __iowrite32_copy(db + 4, tx_push_buf + 1,
9d13744b 441 (push_len - 16) << 1);
4419dbe6 442 } else {
697197e5 443 __iowrite64_copy(db, tx_push_buf, push_len);
4419dbe6 444 }
c0c050c5 445
c0c050c5
MC
446 goto tx_done;
447 }
448
449normal_tx:
450 if (length < BNXT_MIN_PKT_SIZE) {
451 pad = BNXT_MIN_PKT_SIZE - length;
452 if (skb_pad(skb, pad)) {
453 /* SKB already freed. */
454 tx_buf->skb = NULL;
455 return NETDEV_TX_OK;
456 }
457 length = BNXT_MIN_PKT_SIZE;
458 }
459
460 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
461
462 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
463 dev_kfree_skb_any(skb);
464 tx_buf->skb = NULL;
465 return NETDEV_TX_OK;
466 }
467
468 dma_unmap_addr_set(tx_buf, mapping, mapping);
469 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
470 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
471
472 txbd->tx_bd_haddr = cpu_to_le64(mapping);
473
474 prod = NEXT_TX(prod);
475 txbd1 = (struct tx_bd_ext *)
476 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
477
478 txbd1->tx_bd_hsize_lflags = 0;
479 if (skb_is_gso(skb)) {
480 u32 hdr_len;
481
482 if (skb->encapsulation)
483 hdr_len = skb_inner_network_offset(skb) +
484 skb_inner_network_header_len(skb) +
485 inner_tcp_hdrlen(skb);
486 else
487 hdr_len = skb_transport_offset(skb) +
488 tcp_hdrlen(skb);
489
490 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
491 TX_BD_FLAGS_T_IPID |
492 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
493 length = skb_shinfo(skb)->gso_size;
494 txbd1->tx_bd_mss = cpu_to_le32(length);
495 length += hdr_len;
496 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
497 txbd1->tx_bd_hsize_lflags =
498 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
499 txbd1->tx_bd_mss = 0;
500 }
501
502 length >>= 9;
503 flags |= bnxt_lhint_arr[length];
504 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
505
506 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
ee5c7fb3
SP
507 txbd1->tx_bd_cfa_action =
508 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
c0c050c5
MC
509 for (i = 0; i < last_frag; i++) {
510 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
511
512 prod = NEXT_TX(prod);
513 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
514
515 len = skb_frag_size(frag);
516 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
517 DMA_TO_DEVICE);
518
519 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
520 goto tx_dma_error;
521
522 tx_buf = &txr->tx_buf_ring[prod];
523 dma_unmap_addr_set(tx_buf, mapping, mapping);
524
525 txbd->tx_bd_haddr = cpu_to_le64(mapping);
526
527 flags = len << TX_BD_LEN_SHIFT;
528 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
529 }
530
531 flags &= ~TX_BD_LEN;
532 txbd->tx_bd_len_flags_type =
533 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
534 TX_BD_FLAGS_PACKET_END);
535
536 netdev_tx_sent_queue(txq, skb->len);
537
538 /* Sync BD data before updating doorbell */
539 wmb();
540
541 prod = NEXT_TX(prod);
542 txr->tx_prod = prod;
543
ffe40645 544 if (!skb->xmit_more || netif_xmit_stopped(txq))
697197e5 545 bnxt_db_write(bp, &txr->tx_db, prod);
c0c050c5
MC
546
547tx_done:
548
549 mmiowb();
550
551 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
4d172f21 552 if (skb->xmit_more && !tx_buf->is_push)
697197e5 553 bnxt_db_write(bp, &txr->tx_db, prod);
4d172f21 554
c0c050c5
MC
555 netif_tx_stop_queue(txq);
556
557 /* netif_tx_stop_queue() must be done before checking
558 * tx index in bnxt_tx_avail() below, because in
559 * bnxt_tx_int(), we update tx index before checking for
560 * netif_tx_queue_stopped().
561 */
562 smp_mb();
563 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
564 netif_tx_wake_queue(txq);
565 }
566 return NETDEV_TX_OK;
567
568tx_dma_error:
569 last_frag = i;
570
571 /* start back at beginning and unmap skb */
572 prod = txr->tx_prod;
573 tx_buf = &txr->tx_buf_ring[prod];
574 tx_buf->skb = NULL;
575 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
576 skb_headlen(skb), PCI_DMA_TODEVICE);
577 prod = NEXT_TX(prod);
578
579 /* unmap remaining mapped pages */
580 for (i = 0; i < last_frag; i++) {
581 prod = NEXT_TX(prod);
582 tx_buf = &txr->tx_buf_ring[prod];
583 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
584 skb_frag_size(&skb_shinfo(skb)->frags[i]),
585 PCI_DMA_TODEVICE);
586 }
587
588 dev_kfree_skb_any(skb);
589 return NETDEV_TX_OK;
590}
591
592static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
593{
b6ab4b01 594 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
a960dec9 595 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
c0c050c5
MC
596 u16 cons = txr->tx_cons;
597 struct pci_dev *pdev = bp->pdev;
598 int i;
599 unsigned int tx_bytes = 0;
600
601 for (i = 0; i < nr_pkts; i++) {
602 struct bnxt_sw_tx_bd *tx_buf;
603 struct sk_buff *skb;
604 int j, last;
605
606 tx_buf = &txr->tx_buf_ring[cons];
607 cons = NEXT_TX(cons);
608 skb = tx_buf->skb;
609 tx_buf->skb = NULL;
610
611 if (tx_buf->is_push) {
612 tx_buf->is_push = 0;
613 goto next_tx_int;
614 }
615
616 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
617 skb_headlen(skb), PCI_DMA_TODEVICE);
618 last = tx_buf->nr_frags;
619
620 for (j = 0; j < last; j++) {
621 cons = NEXT_TX(cons);
622 tx_buf = &txr->tx_buf_ring[cons];
623 dma_unmap_page(
624 &pdev->dev,
625 dma_unmap_addr(tx_buf, mapping),
626 skb_frag_size(&skb_shinfo(skb)->frags[j]),
627 PCI_DMA_TODEVICE);
628 }
629
630next_tx_int:
631 cons = NEXT_TX(cons);
632
633 tx_bytes += skb->len;
634 dev_kfree_skb_any(skb);
635 }
636
637 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
638 txr->tx_cons = cons;
639
640 /* Need to make the tx_cons update visible to bnxt_start_xmit()
641 * before checking for netif_tx_queue_stopped(). Without the
642 * memory barrier, there is a small possibility that bnxt_start_xmit()
643 * will miss it and cause the queue to be stopped forever.
644 */
645 smp_mb();
646
647 if (unlikely(netif_tx_queue_stopped(txq)) &&
648 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
649 __netif_tx_lock(txq, smp_processor_id());
650 if (netif_tx_queue_stopped(txq) &&
651 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
652 txr->dev_state != BNXT_DEV_STATE_CLOSING)
653 netif_tx_wake_queue(txq);
654 __netif_tx_unlock(txq);
655 }
656}
657
c61fb99c
MC
658static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
659 gfp_t gfp)
660{
661 struct device *dev = &bp->pdev->dev;
662 struct page *page;
663
664 page = alloc_page(gfp);
665 if (!page)
666 return NULL;
667
c519fe9a
SN
668 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
669 DMA_ATTR_WEAK_ORDERING);
c61fb99c
MC
670 if (dma_mapping_error(dev, *mapping)) {
671 __free_page(page);
672 return NULL;
673 }
674 *mapping += bp->rx_dma_offset;
675 return page;
676}
677
c0c050c5
MC
678static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
679 gfp_t gfp)
680{
681 u8 *data;
682 struct pci_dev *pdev = bp->pdev;
683
684 data = kmalloc(bp->rx_buf_size, gfp);
685 if (!data)
686 return NULL;
687
c519fe9a
SN
688 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
689 bp->rx_buf_use_size, bp->rx_dir,
690 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
691
692 if (dma_mapping_error(&pdev->dev, *mapping)) {
693 kfree(data);
694 data = NULL;
695 }
696 return data;
697}
698
38413406
MC
699int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
700 u16 prod, gfp_t gfp)
c0c050c5
MC
701{
702 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
703 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
c0c050c5
MC
704 dma_addr_t mapping;
705
c61fb99c
MC
706 if (BNXT_RX_PAGE_MODE(bp)) {
707 struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp);
c0c050c5 708
c61fb99c
MC
709 if (!page)
710 return -ENOMEM;
711
712 rx_buf->data = page;
713 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
714 } else {
715 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
716
717 if (!data)
718 return -ENOMEM;
719
720 rx_buf->data = data;
721 rx_buf->data_ptr = data + bp->rx_offset;
722 }
11cd119d 723 rx_buf->mapping = mapping;
c0c050c5
MC
724
725 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
c0c050c5
MC
726 return 0;
727}
728
c6d30e83 729void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
c0c050c5
MC
730{
731 u16 prod = rxr->rx_prod;
732 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
733 struct rx_bd *cons_bd, *prod_bd;
734
735 prod_rx_buf = &rxr->rx_buf_ring[prod];
736 cons_rx_buf = &rxr->rx_buf_ring[cons];
737
738 prod_rx_buf->data = data;
6bb19474 739 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
c0c050c5 740
11cd119d 741 prod_rx_buf->mapping = cons_rx_buf->mapping;
c0c050c5
MC
742
743 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
744 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
745
746 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
747}
748
749static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
750{
751 u16 next, max = rxr->rx_agg_bmap_size;
752
753 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
754 if (next >= max)
755 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
756 return next;
757}
758
759static inline int bnxt_alloc_rx_page(struct bnxt *bp,
760 struct bnxt_rx_ring_info *rxr,
761 u16 prod, gfp_t gfp)
762{
763 struct rx_bd *rxbd =
764 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
765 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
766 struct pci_dev *pdev = bp->pdev;
767 struct page *page;
768 dma_addr_t mapping;
769 u16 sw_prod = rxr->rx_sw_agg_prod;
89d0a06c 770 unsigned int offset = 0;
c0c050c5 771
89d0a06c
MC
772 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
773 page = rxr->rx_page;
774 if (!page) {
775 page = alloc_page(gfp);
776 if (!page)
777 return -ENOMEM;
778 rxr->rx_page = page;
779 rxr->rx_page_offset = 0;
780 }
781 offset = rxr->rx_page_offset;
782 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
783 if (rxr->rx_page_offset == PAGE_SIZE)
784 rxr->rx_page = NULL;
785 else
786 get_page(page);
787 } else {
788 page = alloc_page(gfp);
789 if (!page)
790 return -ENOMEM;
791 }
c0c050c5 792
c519fe9a
SN
793 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
794 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
795 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
796 if (dma_mapping_error(&pdev->dev, mapping)) {
797 __free_page(page);
798 return -EIO;
799 }
800
801 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
802 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
803
804 __set_bit(sw_prod, rxr->rx_agg_bmap);
805 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
806 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
807
808 rx_agg_buf->page = page;
89d0a06c 809 rx_agg_buf->offset = offset;
c0c050c5
MC
810 rx_agg_buf->mapping = mapping;
811 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
812 rxbd->rx_bd_opaque = sw_prod;
813 return 0;
814}
815
e44758b7 816static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
c0c050c5
MC
817 u32 agg_bufs)
818{
e44758b7 819 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5 820 struct bnxt *bp = bnapi->bp;
b6ab4b01 821 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
822 u16 prod = rxr->rx_agg_prod;
823 u16 sw_prod = rxr->rx_sw_agg_prod;
824 u32 i;
825
826 for (i = 0; i < agg_bufs; i++) {
827 u16 cons;
828 struct rx_agg_cmp *agg;
829 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
830 struct rx_bd *prod_bd;
831 struct page *page;
832
833 agg = (struct rx_agg_cmp *)
834 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
835 cons = agg->rx_agg_cmp_opaque;
836 __clear_bit(cons, rxr->rx_agg_bmap);
837
838 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
839 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
840
841 __set_bit(sw_prod, rxr->rx_agg_bmap);
842 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
843 cons_rx_buf = &rxr->rx_agg_ring[cons];
844
845 /* It is possible for sw_prod to be equal to cons, so
846 * set cons_rx_buf->page to NULL first.
847 */
848 page = cons_rx_buf->page;
849 cons_rx_buf->page = NULL;
850 prod_rx_buf->page = page;
89d0a06c 851 prod_rx_buf->offset = cons_rx_buf->offset;
c0c050c5
MC
852
853 prod_rx_buf->mapping = cons_rx_buf->mapping;
854
855 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
856
857 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
858 prod_bd->rx_bd_opaque = sw_prod;
859
860 prod = NEXT_RX_AGG(prod);
861 sw_prod = NEXT_RX_AGG(sw_prod);
862 cp_cons = NEXT_CMP(cp_cons);
863 }
864 rxr->rx_agg_prod = prod;
865 rxr->rx_sw_agg_prod = sw_prod;
866}
867
c61fb99c
MC
868static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
869 struct bnxt_rx_ring_info *rxr,
870 u16 cons, void *data, u8 *data_ptr,
871 dma_addr_t dma_addr,
872 unsigned int offset_and_len)
873{
874 unsigned int payload = offset_and_len >> 16;
875 unsigned int len = offset_and_len & 0xffff;
876 struct skb_frag_struct *frag;
877 struct page *page = data;
878 u16 prod = rxr->rx_prod;
879 struct sk_buff *skb;
880 int off, err;
881
882 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
883 if (unlikely(err)) {
884 bnxt_reuse_rx_data(rxr, cons, data);
885 return NULL;
886 }
887 dma_addr -= bp->rx_dma_offset;
c519fe9a
SN
888 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
889 DMA_ATTR_WEAK_ORDERING);
c61fb99c
MC
890
891 if (unlikely(!payload))
892 payload = eth_get_headlen(data_ptr, len);
893
894 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
895 if (!skb) {
896 __free_page(page);
897 return NULL;
898 }
899
900 off = (void *)data_ptr - page_address(page);
901 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
902 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
903 payload + NET_IP_ALIGN);
904
905 frag = &skb_shinfo(skb)->frags[0];
906 skb_frag_size_sub(frag, payload);
907 frag->page_offset += payload;
908 skb->data_len -= payload;
909 skb->tail += payload;
910
911 return skb;
912}
913
c0c050c5
MC
914static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
915 struct bnxt_rx_ring_info *rxr, u16 cons,
6bb19474
MC
916 void *data, u8 *data_ptr,
917 dma_addr_t dma_addr,
918 unsigned int offset_and_len)
c0c050c5 919{
6bb19474 920 u16 prod = rxr->rx_prod;
c0c050c5 921 struct sk_buff *skb;
6bb19474 922 int err;
c0c050c5
MC
923
924 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
925 if (unlikely(err)) {
926 bnxt_reuse_rx_data(rxr, cons, data);
927 return NULL;
928 }
929
930 skb = build_skb(data, 0);
c519fe9a
SN
931 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
932 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
933 if (!skb) {
934 kfree(data);
935 return NULL;
936 }
937
b3dba77c 938 skb_reserve(skb, bp->rx_offset);
6bb19474 939 skb_put(skb, offset_and_len & 0xffff);
c0c050c5
MC
940 return skb;
941}
942
e44758b7
MC
943static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
944 struct bnxt_cp_ring_info *cpr,
c0c050c5
MC
945 struct sk_buff *skb, u16 cp_cons,
946 u32 agg_bufs)
947{
e44758b7 948 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5 949 struct pci_dev *pdev = bp->pdev;
b6ab4b01 950 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
951 u16 prod = rxr->rx_agg_prod;
952 u32 i;
953
954 for (i = 0; i < agg_bufs; i++) {
955 u16 cons, frag_len;
956 struct rx_agg_cmp *agg;
957 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
958 struct page *page;
959 dma_addr_t mapping;
960
961 agg = (struct rx_agg_cmp *)
962 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
963 cons = agg->rx_agg_cmp_opaque;
964 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
965 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
966
967 cons_rx_buf = &rxr->rx_agg_ring[cons];
89d0a06c
MC
968 skb_fill_page_desc(skb, i, cons_rx_buf->page,
969 cons_rx_buf->offset, frag_len);
c0c050c5
MC
970 __clear_bit(cons, rxr->rx_agg_bmap);
971
972 /* It is possible for bnxt_alloc_rx_page() to allocate
973 * a sw_prod index that equals the cons index, so we
974 * need to clear the cons entry now.
975 */
11cd119d 976 mapping = cons_rx_buf->mapping;
c0c050c5
MC
977 page = cons_rx_buf->page;
978 cons_rx_buf->page = NULL;
979
980 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
981 struct skb_shared_info *shinfo;
982 unsigned int nr_frags;
983
984 shinfo = skb_shinfo(skb);
985 nr_frags = --shinfo->nr_frags;
986 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
987
988 dev_kfree_skb(skb);
989
990 cons_rx_buf->page = page;
991
992 /* Update prod since possibly some pages have been
993 * allocated already.
994 */
995 rxr->rx_agg_prod = prod;
e44758b7 996 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs - i);
c0c050c5
MC
997 return NULL;
998 }
999
c519fe9a
SN
1000 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1001 PCI_DMA_FROMDEVICE,
1002 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
1003
1004 skb->data_len += frag_len;
1005 skb->len += frag_len;
1006 skb->truesize += PAGE_SIZE;
1007
1008 prod = NEXT_RX_AGG(prod);
1009 cp_cons = NEXT_CMP(cp_cons);
1010 }
1011 rxr->rx_agg_prod = prod;
1012 return skb;
1013}
1014
1015static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1016 u8 agg_bufs, u32 *raw_cons)
1017{
1018 u16 last;
1019 struct rx_agg_cmp *agg;
1020
1021 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1022 last = RING_CMP(*raw_cons);
1023 agg = (struct rx_agg_cmp *)
1024 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1025 return RX_AGG_CMP_VALID(agg, *raw_cons);
1026}
1027
1028static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1029 unsigned int len,
1030 dma_addr_t mapping)
1031{
1032 struct bnxt *bp = bnapi->bp;
1033 struct pci_dev *pdev = bp->pdev;
1034 struct sk_buff *skb;
1035
1036 skb = napi_alloc_skb(&bnapi->napi, len);
1037 if (!skb)
1038 return NULL;
1039
745fc05c
MC
1040 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1041 bp->rx_dir);
c0c050c5 1042
6bb19474
MC
1043 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1044 len + NET_IP_ALIGN);
c0c050c5 1045
745fc05c
MC
1046 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1047 bp->rx_dir);
c0c050c5
MC
1048
1049 skb_put(skb, len);
1050 return skb;
1051}
1052
e44758b7 1053static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
fa7e2812
MC
1054 u32 *raw_cons, void *cmp)
1055{
fa7e2812
MC
1056 struct rx_cmp *rxcmp = cmp;
1057 u32 tmp_raw_cons = *raw_cons;
1058 u8 cmp_type, agg_bufs = 0;
1059
1060 cmp_type = RX_CMP_TYPE(rxcmp);
1061
1062 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1063 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1064 RX_CMP_AGG_BUFS) >>
1065 RX_CMP_AGG_BUFS_SHIFT;
1066 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1067 struct rx_tpa_end_cmp *tpa_end = cmp;
1068
1069 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1070 RX_TPA_END_CMP_AGG_BUFS) >>
1071 RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1072 }
1073
1074 if (agg_bufs) {
1075 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1076 return -EBUSY;
1077 }
1078 *raw_cons = tmp_raw_cons;
1079 return 0;
1080}
1081
c213eae8
MC
1082static void bnxt_queue_sp_work(struct bnxt *bp)
1083{
1084 if (BNXT_PF(bp))
1085 queue_work(bnxt_pf_wq, &bp->sp_task);
1086 else
1087 schedule_work(&bp->sp_task);
1088}
1089
1090static void bnxt_cancel_sp_work(struct bnxt *bp)
1091{
1092 if (BNXT_PF(bp))
1093 flush_workqueue(bnxt_pf_wq);
1094 else
1095 cancel_work_sync(&bp->sp_task);
1096}
1097
fa7e2812
MC
1098static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1099{
1100 if (!rxr->bnapi->in_reset) {
1101 rxr->bnapi->in_reset = true;
1102 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
c213eae8 1103 bnxt_queue_sp_work(bp);
fa7e2812
MC
1104 }
1105 rxr->rx_next_cons = 0xffff;
1106}
1107
c0c050c5
MC
1108static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1109 struct rx_tpa_start_cmp *tpa_start,
1110 struct rx_tpa_start_cmp_ext *tpa_start1)
1111{
1112 u8 agg_id = TPA_START_AGG_ID(tpa_start);
1113 u16 cons, prod;
1114 struct bnxt_tpa_info *tpa_info;
1115 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1116 struct rx_bd *prod_bd;
1117 dma_addr_t mapping;
1118
1119 cons = tpa_start->rx_tpa_start_cmp_opaque;
1120 prod = rxr->rx_prod;
1121 cons_rx_buf = &rxr->rx_buf_ring[cons];
1122 prod_rx_buf = &rxr->rx_buf_ring[prod];
1123 tpa_info = &rxr->rx_tpa[agg_id];
1124
fa7e2812
MC
1125 if (unlikely(cons != rxr->rx_next_cons)) {
1126 bnxt_sched_reset(bp, rxr);
1127 return;
1128 }
ee5c7fb3
SP
1129 /* Store cfa_code in tpa_info to use in tpa_end
1130 * completion processing.
1131 */
1132 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
c0c050c5 1133 prod_rx_buf->data = tpa_info->data;
6bb19474 1134 prod_rx_buf->data_ptr = tpa_info->data_ptr;
c0c050c5
MC
1135
1136 mapping = tpa_info->mapping;
11cd119d 1137 prod_rx_buf->mapping = mapping;
c0c050c5
MC
1138
1139 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1140
1141 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1142
1143 tpa_info->data = cons_rx_buf->data;
6bb19474 1144 tpa_info->data_ptr = cons_rx_buf->data_ptr;
c0c050c5 1145 cons_rx_buf->data = NULL;
11cd119d 1146 tpa_info->mapping = cons_rx_buf->mapping;
c0c050c5
MC
1147
1148 tpa_info->len =
1149 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1150 RX_TPA_START_CMP_LEN_SHIFT;
1151 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1152 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1153
1154 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1155 tpa_info->gso_type = SKB_GSO_TCPV4;
1156 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
50f011b6 1157 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
c0c050c5
MC
1158 tpa_info->gso_type = SKB_GSO_TCPV6;
1159 tpa_info->rss_hash =
1160 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1161 } else {
1162 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1163 tpa_info->gso_type = 0;
1164 if (netif_msg_rx_err(bp))
1165 netdev_warn(bp->dev, "TPA packet without valid hash\n");
1166 }
1167 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1168 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
94758f8d 1169 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
c0c050c5
MC
1170
1171 rxr->rx_prod = NEXT_RX(prod);
1172 cons = NEXT_RX(cons);
376a5b86 1173 rxr->rx_next_cons = NEXT_RX(cons);
c0c050c5
MC
1174 cons_rx_buf = &rxr->rx_buf_ring[cons];
1175
1176 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1177 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1178 cons_rx_buf->data = NULL;
1179}
1180
e44758b7
MC
1181static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
1182 u32 agg_bufs)
c0c050c5
MC
1183{
1184 if (agg_bufs)
e44758b7 1185 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
c0c050c5
MC
1186}
1187
94758f8d
MC
1188static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1189 int payload_off, int tcp_ts,
1190 struct sk_buff *skb)
1191{
1192#ifdef CONFIG_INET
1193 struct tcphdr *th;
1194 int len, nw_off;
1195 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1196 u32 hdr_info = tpa_info->hdr_info;
1197 bool loopback = false;
1198
1199 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1200 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1201 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1202
1203 /* If the packet is an internal loopback packet, the offsets will
1204 * have an extra 4 bytes.
1205 */
1206 if (inner_mac_off == 4) {
1207 loopback = true;
1208 } else if (inner_mac_off > 4) {
1209 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1210 ETH_HLEN - 2));
1211
1212 /* We only support inner iPv4/ipv6. If we don't see the
1213 * correct protocol ID, it must be a loopback packet where
1214 * the offsets are off by 4.
1215 */
09a7636a 1216 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
94758f8d
MC
1217 loopback = true;
1218 }
1219 if (loopback) {
1220 /* internal loopback packet, subtract all offsets by 4 */
1221 inner_ip_off -= 4;
1222 inner_mac_off -= 4;
1223 outer_ip_off -= 4;
1224 }
1225
1226 nw_off = inner_ip_off - ETH_HLEN;
1227 skb_set_network_header(skb, nw_off);
1228 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1229 struct ipv6hdr *iph = ipv6_hdr(skb);
1230
1231 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1232 len = skb->len - skb_transport_offset(skb);
1233 th = tcp_hdr(skb);
1234 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1235 } else {
1236 struct iphdr *iph = ip_hdr(skb);
1237
1238 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1239 len = skb->len - skb_transport_offset(skb);
1240 th = tcp_hdr(skb);
1241 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1242 }
1243
1244 if (inner_mac_off) { /* tunnel */
1245 struct udphdr *uh = NULL;
1246 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1247 ETH_HLEN - 2));
1248
1249 if (proto == htons(ETH_P_IP)) {
1250 struct iphdr *iph = (struct iphdr *)skb->data;
1251
1252 if (iph->protocol == IPPROTO_UDP)
1253 uh = (struct udphdr *)(iph + 1);
1254 } else {
1255 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1256
1257 if (iph->nexthdr == IPPROTO_UDP)
1258 uh = (struct udphdr *)(iph + 1);
1259 }
1260 if (uh) {
1261 if (uh->check)
1262 skb_shinfo(skb)->gso_type |=
1263 SKB_GSO_UDP_TUNNEL_CSUM;
1264 else
1265 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1266 }
1267 }
1268#endif
1269 return skb;
1270}
1271
c0c050c5
MC
1272#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1273#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1274
309369c9
MC
1275static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1276 int payload_off, int tcp_ts,
c0c050c5
MC
1277 struct sk_buff *skb)
1278{
d1611c3a 1279#ifdef CONFIG_INET
c0c050c5 1280 struct tcphdr *th;
719ca811 1281 int len, nw_off, tcp_opt_len = 0;
27e24189 1282
309369c9 1283 if (tcp_ts)
c0c050c5
MC
1284 tcp_opt_len = 12;
1285
c0c050c5
MC
1286 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1287 struct iphdr *iph;
1288
1289 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1290 ETH_HLEN;
1291 skb_set_network_header(skb, nw_off);
1292 iph = ip_hdr(skb);
1293 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1294 len = skb->len - skb_transport_offset(skb);
1295 th = tcp_hdr(skb);
1296 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1297 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1298 struct ipv6hdr *iph;
1299
1300 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1301 ETH_HLEN;
1302 skb_set_network_header(skb, nw_off);
1303 iph = ipv6_hdr(skb);
1304 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1305 len = skb->len - skb_transport_offset(skb);
1306 th = tcp_hdr(skb);
1307 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1308 } else {
1309 dev_kfree_skb_any(skb);
1310 return NULL;
1311 }
c0c050c5
MC
1312
1313 if (nw_off) { /* tunnel */
1314 struct udphdr *uh = NULL;
1315
1316 if (skb->protocol == htons(ETH_P_IP)) {
1317 struct iphdr *iph = (struct iphdr *)skb->data;
1318
1319 if (iph->protocol == IPPROTO_UDP)
1320 uh = (struct udphdr *)(iph + 1);
1321 } else {
1322 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1323
1324 if (iph->nexthdr == IPPROTO_UDP)
1325 uh = (struct udphdr *)(iph + 1);
1326 }
1327 if (uh) {
1328 if (uh->check)
1329 skb_shinfo(skb)->gso_type |=
1330 SKB_GSO_UDP_TUNNEL_CSUM;
1331 else
1332 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1333 }
1334 }
1335#endif
1336 return skb;
1337}
1338
309369c9
MC
1339static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1340 struct bnxt_tpa_info *tpa_info,
1341 struct rx_tpa_end_cmp *tpa_end,
1342 struct rx_tpa_end_cmp_ext *tpa_end1,
1343 struct sk_buff *skb)
1344{
1345#ifdef CONFIG_INET
1346 int payload_off;
1347 u16 segs;
1348
1349 segs = TPA_END_TPA_SEGS(tpa_end);
1350 if (segs == 1)
1351 return skb;
1352
1353 NAPI_GRO_CB(skb)->count = segs;
1354 skb_shinfo(skb)->gso_size =
1355 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1356 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1357 payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1358 RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
1359 RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
1360 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
5910906c
MC
1361 if (likely(skb))
1362 tcp_gro_complete(skb);
309369c9
MC
1363#endif
1364 return skb;
1365}
1366
ee5c7fb3
SP
1367/* Given the cfa_code of a received packet determine which
1368 * netdev (vf-rep or PF) the packet is destined to.
1369 */
1370static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1371{
1372 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1373
1374 /* if vf-rep dev is NULL, the must belongs to the PF */
1375 return dev ? dev : bp->dev;
1376}
1377
c0c050c5 1378static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
e44758b7 1379 struct bnxt_cp_ring_info *cpr,
c0c050c5
MC
1380 u32 *raw_cons,
1381 struct rx_tpa_end_cmp *tpa_end,
1382 struct rx_tpa_end_cmp_ext *tpa_end1,
4e5dbbda 1383 u8 *event)
c0c050c5 1384{
e44758b7 1385 struct bnxt_napi *bnapi = cpr->bnapi;
b6ab4b01 1386 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5 1387 u8 agg_id = TPA_END_AGG_ID(tpa_end);
6bb19474 1388 u8 *data_ptr, agg_bufs;
c0c050c5
MC
1389 u16 cp_cons = RING_CMP(*raw_cons);
1390 unsigned int len;
1391 struct bnxt_tpa_info *tpa_info;
1392 dma_addr_t mapping;
1393 struct sk_buff *skb;
6bb19474 1394 void *data;
c0c050c5 1395
fa7e2812 1396 if (unlikely(bnapi->in_reset)) {
e44758b7 1397 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
fa7e2812
MC
1398
1399 if (rc < 0)
1400 return ERR_PTR(-EBUSY);
1401 return NULL;
1402 }
1403
c0c050c5
MC
1404 tpa_info = &rxr->rx_tpa[agg_id];
1405 data = tpa_info->data;
6bb19474
MC
1406 data_ptr = tpa_info->data_ptr;
1407 prefetch(data_ptr);
c0c050c5
MC
1408 len = tpa_info->len;
1409 mapping = tpa_info->mapping;
1410
1411 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1412 RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1413
1414 if (agg_bufs) {
1415 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1416 return ERR_PTR(-EBUSY);
1417
4e5dbbda 1418 *event |= BNXT_AGG_EVENT;
c0c050c5
MC
1419 cp_cons = NEXT_CMP(cp_cons);
1420 }
1421
69c149e2 1422 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
e44758b7 1423 bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
69c149e2
MC
1424 if (agg_bufs > MAX_SKB_FRAGS)
1425 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1426 agg_bufs, (int)MAX_SKB_FRAGS);
c0c050c5
MC
1427 return NULL;
1428 }
1429
1430 if (len <= bp->rx_copy_thresh) {
6bb19474 1431 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
c0c050c5 1432 if (!skb) {
e44758b7 1433 bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
c0c050c5
MC
1434 return NULL;
1435 }
1436 } else {
1437 u8 *new_data;
1438 dma_addr_t new_mapping;
1439
1440 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1441 if (!new_data) {
e44758b7 1442 bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
c0c050c5
MC
1443 return NULL;
1444 }
1445
1446 tpa_info->data = new_data;
b3dba77c 1447 tpa_info->data_ptr = new_data + bp->rx_offset;
c0c050c5
MC
1448 tpa_info->mapping = new_mapping;
1449
1450 skb = build_skb(data, 0);
c519fe9a
SN
1451 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1452 bp->rx_buf_use_size, bp->rx_dir,
1453 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
1454
1455 if (!skb) {
1456 kfree(data);
e44758b7 1457 bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
c0c050c5
MC
1458 return NULL;
1459 }
b3dba77c 1460 skb_reserve(skb, bp->rx_offset);
c0c050c5
MC
1461 skb_put(skb, len);
1462 }
1463
1464 if (agg_bufs) {
e44758b7 1465 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
c0c050c5
MC
1466 if (!skb) {
1467 /* Page reuse already handled by bnxt_rx_pages(). */
1468 return NULL;
1469 }
1470 }
ee5c7fb3
SP
1471
1472 skb->protocol =
1473 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
c0c050c5
MC
1474
1475 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1476 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1477
8852ddb4
MC
1478 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1479 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
c0c050c5
MC
1480 u16 vlan_proto = tpa_info->metadata >>
1481 RX_CMP_FLAGS2_METADATA_TPID_SFT;
ed7bc602 1482 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
c0c050c5 1483
8852ddb4 1484 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
c0c050c5
MC
1485 }
1486
1487 skb_checksum_none_assert(skb);
1488 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1489 skb->ip_summed = CHECKSUM_UNNECESSARY;
1490 skb->csum_level =
1491 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1492 }
1493
1494 if (TPA_END_GRO(tpa_end))
309369c9 1495 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
c0c050c5
MC
1496
1497 return skb;
1498}
1499
ee5c7fb3
SP
1500static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1501 struct sk_buff *skb)
1502{
1503 if (skb->dev != bp->dev) {
1504 /* this packet belongs to a vf-rep */
1505 bnxt_vf_rep_rx(bp, skb);
1506 return;
1507 }
1508 skb_record_rx_queue(skb, bnapi->index);
1509 napi_gro_receive(&bnapi->napi, skb);
1510}
1511
c0c050c5
MC
1512/* returns the following:
1513 * 1 - 1 packet successfully received
1514 * 0 - successful TPA_START, packet not completed yet
1515 * -EBUSY - completion ring does not have all the agg buffers yet
1516 * -ENOMEM - packet aborted due to out of memory
1517 * -EIO - packet aborted due to hw error indicated in BD
1518 */
e44758b7
MC
1519static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1520 u32 *raw_cons, u8 *event)
c0c050c5 1521{
e44758b7 1522 struct bnxt_napi *bnapi = cpr->bnapi;
b6ab4b01 1523 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
1524 struct net_device *dev = bp->dev;
1525 struct rx_cmp *rxcmp;
1526 struct rx_cmp_ext *rxcmp1;
1527 u32 tmp_raw_cons = *raw_cons;
ee5c7fb3 1528 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
c0c050c5
MC
1529 struct bnxt_sw_rx_bd *rx_buf;
1530 unsigned int len;
6bb19474 1531 u8 *data_ptr, agg_bufs, cmp_type;
c0c050c5
MC
1532 dma_addr_t dma_addr;
1533 struct sk_buff *skb;
6bb19474 1534 void *data;
c0c050c5 1535 int rc = 0;
c61fb99c 1536 u32 misc;
c0c050c5
MC
1537
1538 rxcmp = (struct rx_cmp *)
1539 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1540
1541 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1542 cp_cons = RING_CMP(tmp_raw_cons);
1543 rxcmp1 = (struct rx_cmp_ext *)
1544 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1545
1546 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1547 return -EBUSY;
1548
1549 cmp_type = RX_CMP_TYPE(rxcmp);
1550
1551 prod = rxr->rx_prod;
1552
1553 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1554 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1555 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1556
4e5dbbda 1557 *event |= BNXT_RX_EVENT;
e7e70fa6 1558 goto next_rx_no_prod_no_len;
c0c050c5
MC
1559
1560 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
e44758b7 1561 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
c0c050c5 1562 (struct rx_tpa_end_cmp *)rxcmp,
4e5dbbda 1563 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
c0c050c5 1564
1fac4b2f 1565 if (IS_ERR(skb))
c0c050c5
MC
1566 return -EBUSY;
1567
1568 rc = -ENOMEM;
1569 if (likely(skb)) {
ee5c7fb3 1570 bnxt_deliver_skb(bp, bnapi, skb);
c0c050c5
MC
1571 rc = 1;
1572 }
4e5dbbda 1573 *event |= BNXT_RX_EVENT;
e7e70fa6 1574 goto next_rx_no_prod_no_len;
c0c050c5
MC
1575 }
1576
1577 cons = rxcmp->rx_cmp_opaque;
1578 rx_buf = &rxr->rx_buf_ring[cons];
1579 data = rx_buf->data;
6bb19474 1580 data_ptr = rx_buf->data_ptr;
fa7e2812 1581 if (unlikely(cons != rxr->rx_next_cons)) {
e44758b7 1582 int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
fa7e2812
MC
1583
1584 bnxt_sched_reset(bp, rxr);
1585 return rc1;
1586 }
6bb19474 1587 prefetch(data_ptr);
c0c050c5 1588
c61fb99c
MC
1589 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1590 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
c0c050c5
MC
1591
1592 if (agg_bufs) {
1593 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1594 return -EBUSY;
1595
1596 cp_cons = NEXT_CMP(cp_cons);
4e5dbbda 1597 *event |= BNXT_AGG_EVENT;
c0c050c5 1598 }
4e5dbbda 1599 *event |= BNXT_RX_EVENT;
c0c050c5
MC
1600
1601 rx_buf->data = NULL;
1602 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1603 bnxt_reuse_rx_data(rxr, cons, data);
1604 if (agg_bufs)
e44758b7 1605 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
c0c050c5
MC
1606
1607 rc = -EIO;
1608 goto next_rx;
1609 }
1610
1611 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
11cd119d 1612 dma_addr = rx_buf->mapping;
c0c050c5 1613
c6d30e83
MC
1614 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1615 rc = 1;
1616 goto next_rx;
1617 }
1618
c0c050c5 1619 if (len <= bp->rx_copy_thresh) {
6bb19474 1620 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
c0c050c5
MC
1621 bnxt_reuse_rx_data(rxr, cons, data);
1622 if (!skb) {
1623 rc = -ENOMEM;
1624 goto next_rx;
1625 }
1626 } else {
c61fb99c
MC
1627 u32 payload;
1628
c6d30e83
MC
1629 if (rx_buf->data_ptr == data_ptr)
1630 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1631 else
1632 payload = 0;
6bb19474 1633 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
c61fb99c 1634 payload | len);
c0c050c5
MC
1635 if (!skb) {
1636 rc = -ENOMEM;
1637 goto next_rx;
1638 }
1639 }
1640
1641 if (agg_bufs) {
e44758b7 1642 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
c0c050c5
MC
1643 if (!skb) {
1644 rc = -ENOMEM;
1645 goto next_rx;
1646 }
1647 }
1648
1649 if (RX_CMP_HASH_VALID(rxcmp)) {
1650 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1651 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1652
1653 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1654 if (hash_type != 1 && hash_type != 3)
1655 type = PKT_HASH_TYPE_L3;
1656 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1657 }
1658
ee5c7fb3
SP
1659 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1660 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
c0c050c5 1661
8852ddb4
MC
1662 if ((rxcmp1->rx_cmp_flags2 &
1663 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1664 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
c0c050c5 1665 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
ed7bc602 1666 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
c0c050c5
MC
1667 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1668
8852ddb4 1669 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
c0c050c5
MC
1670 }
1671
1672 skb_checksum_none_assert(skb);
1673 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1674 if (dev->features & NETIF_F_RXCSUM) {
1675 skb->ip_summed = CHECKSUM_UNNECESSARY;
1676 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1677 }
1678 } else {
665e350d
SB
1679 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1680 if (dev->features & NETIF_F_RXCSUM)
d1981929 1681 bnapi->cp_ring.rx_l4_csum_errors++;
665e350d 1682 }
c0c050c5
MC
1683 }
1684
ee5c7fb3 1685 bnxt_deliver_skb(bp, bnapi, skb);
c0c050c5
MC
1686 rc = 1;
1687
1688next_rx:
1689 rxr->rx_prod = NEXT_RX(prod);
376a5b86 1690 rxr->rx_next_cons = NEXT_RX(cons);
c0c050c5 1691
6a8788f2
AG
1692 cpr->rx_packets += 1;
1693 cpr->rx_bytes += len;
e7e70fa6
CIK
1694
1695next_rx_no_prod_no_len:
c0c050c5
MC
1696 *raw_cons = tmp_raw_cons;
1697
1698 return rc;
1699}
1700
2270bc5d
MC
1701/* In netpoll mode, if we are using a combined completion ring, we need to
1702 * discard the rx packets and recycle the buffers.
1703 */
e44758b7
MC
1704static int bnxt_force_rx_discard(struct bnxt *bp,
1705 struct bnxt_cp_ring_info *cpr,
2270bc5d
MC
1706 u32 *raw_cons, u8 *event)
1707{
2270bc5d
MC
1708 u32 tmp_raw_cons = *raw_cons;
1709 struct rx_cmp_ext *rxcmp1;
1710 struct rx_cmp *rxcmp;
1711 u16 cp_cons;
1712 u8 cmp_type;
1713
1714 cp_cons = RING_CMP(tmp_raw_cons);
1715 rxcmp = (struct rx_cmp *)
1716 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1717
1718 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1719 cp_cons = RING_CMP(tmp_raw_cons);
1720 rxcmp1 = (struct rx_cmp_ext *)
1721 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1722
1723 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1724 return -EBUSY;
1725
1726 cmp_type = RX_CMP_TYPE(rxcmp);
1727 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1728 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1729 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1730 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1731 struct rx_tpa_end_cmp_ext *tpa_end1;
1732
1733 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1734 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1735 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1736 }
e44758b7 1737 return bnxt_rx_pkt(bp, cpr, raw_cons, event);
2270bc5d
MC
1738}
1739
4bb13abf 1740#define BNXT_GET_EVENT_PORT(data) \
87c374de
MC
1741 ((data) & \
1742 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
4bb13abf 1743
c0c050c5
MC
1744static int bnxt_async_event_process(struct bnxt *bp,
1745 struct hwrm_async_event_cmpl *cmpl)
1746{
1747 u16 event_id = le16_to_cpu(cmpl->event_id);
1748
1749 /* TODO CHIMP_FW: Define event id's for link change, error etc */
1750 switch (event_id) {
87c374de 1751 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
8cbde117
MC
1752 u32 data1 = le32_to_cpu(cmpl->event_data1);
1753 struct bnxt_link_info *link_info = &bp->link_info;
1754
1755 if (BNXT_VF(bp))
1756 goto async_event_process_exit;
a8168b6c
MC
1757
1758 /* print unsupported speed warning in forced speed mode only */
1759 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
1760 (data1 & 0x20000)) {
8cbde117
MC
1761 u16 fw_speed = link_info->force_link_speed;
1762 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1763
a8168b6c
MC
1764 if (speed != SPEED_UNKNOWN)
1765 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1766 speed);
8cbde117 1767 }
286ef9d6 1768 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
8cbde117 1769 }
bc171e87 1770 /* fall through */
87c374de 1771 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
c0c050c5 1772 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
19241368 1773 break;
87c374de 1774 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
19241368 1775 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
c0c050c5 1776 break;
87c374de 1777 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
4bb13abf
MC
1778 u32 data1 = le32_to_cpu(cmpl->event_data1);
1779 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1780
1781 if (BNXT_VF(bp))
1782 break;
1783
1784 if (bp->pf.port_id != port_id)
1785 break;
1786
4bb13abf
MC
1787 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1788 break;
1789 }
87c374de 1790 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
fc0f1929
MC
1791 if (BNXT_PF(bp))
1792 goto async_event_process_exit;
1793 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1794 break;
c0c050c5 1795 default:
19241368 1796 goto async_event_process_exit;
c0c050c5 1797 }
c213eae8 1798 bnxt_queue_sp_work(bp);
19241368 1799async_event_process_exit:
a588e458 1800 bnxt_ulp_async_events(bp, cmpl);
c0c050c5
MC
1801 return 0;
1802}
1803
1804static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1805{
1806 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1807 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1808 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1809 (struct hwrm_fwd_req_cmpl *)txcmp;
1810
1811 switch (cmpl_type) {
1812 case CMPL_BASE_TYPE_HWRM_DONE:
1813 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1814 if (seq_id == bp->hwrm_intr_seq_id)
1815 bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
1816 else
1817 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1818 break;
1819
1820 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1821 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1822
1823 if ((vf_id < bp->pf.first_vf_id) ||
1824 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1825 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1826 vf_id);
1827 return -EINVAL;
1828 }
1829
1830 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1831 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
c213eae8 1832 bnxt_queue_sp_work(bp);
c0c050c5
MC
1833 break;
1834
1835 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1836 bnxt_async_event_process(bp,
1837 (struct hwrm_async_event_cmpl *)txcmp);
1838
1839 default:
1840 break;
1841 }
1842
1843 return 0;
1844}
1845
1846static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1847{
1848 struct bnxt_napi *bnapi = dev_instance;
1849 struct bnxt *bp = bnapi->bp;
1850 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1851 u32 cons = RING_CMP(cpr->cp_raw_cons);
1852
6a8788f2 1853 cpr->event_ctr++;
c0c050c5
MC
1854 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1855 napi_schedule(&bnapi->napi);
1856 return IRQ_HANDLED;
1857}
1858
1859static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1860{
1861 u32 raw_cons = cpr->cp_raw_cons;
1862 u16 cons = RING_CMP(raw_cons);
1863 struct tx_cmp *txcmp;
1864
1865 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1866
1867 return TX_CMP_VALID(txcmp, raw_cons);
1868}
1869
c0c050c5
MC
1870static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1871{
1872 struct bnxt_napi *bnapi = dev_instance;
1873 struct bnxt *bp = bnapi->bp;
1874 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1875 u32 cons = RING_CMP(cpr->cp_raw_cons);
1876 u32 int_status;
1877
1878 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1879
1880 if (!bnxt_has_work(bp, cpr)) {
11809490 1881 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
c0c050c5
MC
1882 /* return if erroneous interrupt */
1883 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1884 return IRQ_NONE;
1885 }
1886
1887 /* disable ring IRQ */
697197e5 1888 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
c0c050c5
MC
1889
1890 /* Return here if interrupt is shared and is disabled. */
1891 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1892 return IRQ_HANDLED;
1893
1894 napi_schedule(&bnapi->napi);
1895 return IRQ_HANDLED;
1896}
1897
3675b92f
MC
1898static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1899 int budget)
c0c050c5 1900{
e44758b7 1901 struct bnxt_napi *bnapi = cpr->bnapi;
c0c050c5
MC
1902 u32 raw_cons = cpr->cp_raw_cons;
1903 u32 cons;
1904 int tx_pkts = 0;
1905 int rx_pkts = 0;
4e5dbbda 1906 u8 event = 0;
c0c050c5
MC
1907 struct tx_cmp *txcmp;
1908
0fcec985 1909 cpr->has_more_work = 0;
c0c050c5
MC
1910 while (1) {
1911 int rc;
1912
1913 cons = RING_CMP(raw_cons);
1914 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1915
1916 if (!TX_CMP_VALID(txcmp, raw_cons))
1917 break;
1918
67a95e20
MC
1919 /* The valid test of the entry must be done first before
1920 * reading any further.
1921 */
b67daab0 1922 dma_rmb();
3675b92f 1923 cpr->had_work_done = 1;
c0c050c5
MC
1924 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1925 tx_pkts++;
1926 /* return full budget so NAPI will complete. */
73f21c65 1927 if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
c0c050c5 1928 rx_pkts = budget;
73f21c65 1929 raw_cons = NEXT_RAW_CMP(raw_cons);
0fcec985
MC
1930 if (budget)
1931 cpr->has_more_work = 1;
73f21c65
MC
1932 break;
1933 }
c0c050c5 1934 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2270bc5d 1935 if (likely(budget))
e44758b7 1936 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2270bc5d 1937 else
e44758b7 1938 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2270bc5d 1939 &event);
c0c050c5
MC
1940 if (likely(rc >= 0))
1941 rx_pkts += rc;
903649e7
MC
1942 /* Increment rx_pkts when rc is -ENOMEM to count towards
1943 * the NAPI budget. Otherwise, we may potentially loop
1944 * here forever if we consistently cannot allocate
1945 * buffers.
1946 */
2edbdb31 1947 else if (rc == -ENOMEM && budget)
903649e7 1948 rx_pkts++;
c0c050c5
MC
1949 else if (rc == -EBUSY) /* partial completion */
1950 break;
c0c050c5
MC
1951 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
1952 CMPL_BASE_TYPE_HWRM_DONE) ||
1953 (TX_CMP_TYPE(txcmp) ==
1954 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1955 (TX_CMP_TYPE(txcmp) ==
1956 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1957 bnxt_hwrm_handler(bp, txcmp);
1958 }
1959 raw_cons = NEXT_RAW_CMP(raw_cons);
1960
0fcec985
MC
1961 if (rx_pkts && rx_pkts == budget) {
1962 cpr->has_more_work = 1;
c0c050c5 1963 break;
0fcec985 1964 }
c0c050c5
MC
1965 }
1966
38413406
MC
1967 if (event & BNXT_TX_EVENT) {
1968 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
38413406
MC
1969 u16 prod = txr->tx_prod;
1970
1971 /* Sync BD data before updating doorbell */
1972 wmb();
1973
697197e5 1974 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
38413406
MC
1975 }
1976
c0c050c5 1977 cpr->cp_raw_cons = raw_cons;
3675b92f
MC
1978 bnapi->tx_pkts += tx_pkts;
1979 bnapi->events |= event;
1980 return rx_pkts;
1981}
c0c050c5 1982
3675b92f
MC
1983static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
1984{
1985 if (bnapi->tx_pkts) {
1986 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
1987 bnapi->tx_pkts = 0;
1988 }
c0c050c5 1989
3675b92f 1990 if (bnapi->events & BNXT_RX_EVENT) {
b6ab4b01 1991 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5 1992
697197e5 1993 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
3675b92f 1994 if (bnapi->events & BNXT_AGG_EVENT)
697197e5 1995 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
c0c050c5 1996 }
3675b92f
MC
1997 bnapi->events = 0;
1998}
1999
2000static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2001 int budget)
2002{
2003 struct bnxt_napi *bnapi = cpr->bnapi;
2004 int rx_pkts;
2005
2006 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2007
2008 /* ACK completion ring before freeing tx ring and producing new
2009 * buffers in rx/agg rings to prevent overflowing the completion
2010 * ring.
2011 */
2012 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2013
2014 __bnxt_poll_work_done(bp, bnapi);
c0c050c5
MC
2015 return rx_pkts;
2016}
2017
10bbdaf5
PS
2018static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2019{
2020 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2021 struct bnxt *bp = bnapi->bp;
2022 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2023 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2024 struct tx_cmp *txcmp;
2025 struct rx_cmp_ext *rxcmp1;
2026 u32 cp_cons, tmp_raw_cons;
2027 u32 raw_cons = cpr->cp_raw_cons;
2028 u32 rx_pkts = 0;
4e5dbbda 2029 u8 event = 0;
10bbdaf5
PS
2030
2031 while (1) {
2032 int rc;
2033
2034 cp_cons = RING_CMP(raw_cons);
2035 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2036
2037 if (!TX_CMP_VALID(txcmp, raw_cons))
2038 break;
2039
2040 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2041 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2042 cp_cons = RING_CMP(tmp_raw_cons);
2043 rxcmp1 = (struct rx_cmp_ext *)
2044 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2045
2046 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2047 break;
2048
2049 /* force an error to recycle the buffer */
2050 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2051 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2052
e44758b7 2053 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2edbdb31 2054 if (likely(rc == -EIO) && budget)
10bbdaf5
PS
2055 rx_pkts++;
2056 else if (rc == -EBUSY) /* partial completion */
2057 break;
2058 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2059 CMPL_BASE_TYPE_HWRM_DONE)) {
2060 bnxt_hwrm_handler(bp, txcmp);
2061 } else {
2062 netdev_err(bp->dev,
2063 "Invalid completion received on special ring\n");
2064 }
2065 raw_cons = NEXT_RAW_CMP(raw_cons);
2066
2067 if (rx_pkts == budget)
2068 break;
2069 }
2070
2071 cpr->cp_raw_cons = raw_cons;
697197e5
MC
2072 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2073 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
10bbdaf5 2074
434c975a 2075 if (event & BNXT_AGG_EVENT)
697197e5 2076 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
10bbdaf5
PS
2077
2078 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
6ad20165 2079 napi_complete_done(napi, rx_pkts);
697197e5 2080 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
10bbdaf5
PS
2081 }
2082 return rx_pkts;
2083}
2084
c0c050c5
MC
2085static int bnxt_poll(struct napi_struct *napi, int budget)
2086{
2087 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2088 struct bnxt *bp = bnapi->bp;
2089 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2090 int work_done = 0;
2091
c0c050c5 2092 while (1) {
e44758b7 2093 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
c0c050c5 2094
73f21c65
MC
2095 if (work_done >= budget) {
2096 if (!budget)
697197e5 2097 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
c0c050c5 2098 break;
73f21c65 2099 }
c0c050c5
MC
2100
2101 if (!bnxt_has_work(bp, cpr)) {
e7b95691 2102 if (napi_complete_done(napi, work_done))
697197e5 2103 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
c0c050c5
MC
2104 break;
2105 }
2106 }
6a8788f2
AG
2107 if (bp->flags & BNXT_FLAG_DIM) {
2108 struct net_dim_sample dim_sample;
2109
2110 net_dim_sample(cpr->event_ctr,
2111 cpr->rx_packets,
2112 cpr->rx_bytes,
2113 &dim_sample);
2114 net_dim(&cpr->dim, dim_sample);
2115 }
c0c050c5 2116 mmiowb();
c0c050c5
MC
2117 return work_done;
2118}
2119
0fcec985
MC
2120static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2121{
2122 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2123 int i, work_done = 0;
2124
2125 for (i = 0; i < 2; i++) {
2126 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2127
2128 if (cpr2) {
2129 work_done += __bnxt_poll_work(bp, cpr2,
2130 budget - work_done);
2131 cpr->has_more_work |= cpr2->has_more_work;
2132 }
2133 }
2134 return work_done;
2135}
2136
2137static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2138 u64 dbr_type, bool all)
2139{
2140 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2141 int i;
2142
2143 for (i = 0; i < 2; i++) {
2144 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2145 struct bnxt_db_info *db;
2146
2147 if (cpr2 && (all || cpr2->had_work_done)) {
2148 db = &cpr2->cp_db;
2149 writeq(db->db_key64 | dbr_type |
2150 RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2151 cpr2->had_work_done = 0;
2152 }
2153 }
2154 __bnxt_poll_work_done(bp, bnapi);
2155}
2156
2157static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2158{
2159 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2160 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2161 u32 raw_cons = cpr->cp_raw_cons;
2162 struct bnxt *bp = bnapi->bp;
2163 struct nqe_cn *nqcmp;
2164 int work_done = 0;
2165 u32 cons;
2166
2167 if (cpr->has_more_work) {
2168 cpr->has_more_work = 0;
2169 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2170 if (cpr->has_more_work) {
2171 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false);
2172 return work_done;
2173 }
2174 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true);
2175 if (napi_complete_done(napi, work_done))
2176 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons);
2177 return work_done;
2178 }
2179 while (1) {
2180 cons = RING_CMP(raw_cons);
2181 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2182
2183 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2184 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
2185 false);
2186 cpr->cp_raw_cons = raw_cons;
2187 if (napi_complete_done(napi, work_done))
2188 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2189 cpr->cp_raw_cons);
2190 return work_done;
2191 }
2192
2193 /* The valid test of the entry must be done first before
2194 * reading any further.
2195 */
2196 dma_rmb();
2197
2198 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2199 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2200 struct bnxt_cp_ring_info *cpr2;
2201
2202 cpr2 = cpr->cp_ring_arr[idx];
2203 work_done += __bnxt_poll_work(bp, cpr2,
2204 budget - work_done);
2205 cpr->has_more_work = cpr2->has_more_work;
2206 } else {
2207 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2208 }
2209 raw_cons = NEXT_RAW_CMP(raw_cons);
2210 if (cpr->has_more_work)
2211 break;
2212 }
2213 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true);
2214 cpr->cp_raw_cons = raw_cons;
2215 return work_done;
2216}
2217
c0c050c5
MC
2218static void bnxt_free_tx_skbs(struct bnxt *bp)
2219{
2220 int i, max_idx;
2221 struct pci_dev *pdev = bp->pdev;
2222
b6ab4b01 2223 if (!bp->tx_ring)
c0c050c5
MC
2224 return;
2225
2226 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2227 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2228 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
2229 int j;
2230
c0c050c5
MC
2231 for (j = 0; j < max_idx;) {
2232 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2233 struct sk_buff *skb = tx_buf->skb;
2234 int k, last;
2235
2236 if (!skb) {
2237 j++;
2238 continue;
2239 }
2240
2241 tx_buf->skb = NULL;
2242
2243 if (tx_buf->is_push) {
2244 dev_kfree_skb(skb);
2245 j += 2;
2246 continue;
2247 }
2248
2249 dma_unmap_single(&pdev->dev,
2250 dma_unmap_addr(tx_buf, mapping),
2251 skb_headlen(skb),
2252 PCI_DMA_TODEVICE);
2253
2254 last = tx_buf->nr_frags;
2255 j += 2;
d612a579
MC
2256 for (k = 0; k < last; k++, j++) {
2257 int ring_idx = j & bp->tx_ring_mask;
c0c050c5
MC
2258 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2259
d612a579 2260 tx_buf = &txr->tx_buf_ring[ring_idx];
c0c050c5
MC
2261 dma_unmap_page(
2262 &pdev->dev,
2263 dma_unmap_addr(tx_buf, mapping),
2264 skb_frag_size(frag), PCI_DMA_TODEVICE);
2265 }
2266 dev_kfree_skb(skb);
2267 }
2268 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2269 }
2270}
2271
2272static void bnxt_free_rx_skbs(struct bnxt *bp)
2273{
2274 int i, max_idx, max_agg_idx;
2275 struct pci_dev *pdev = bp->pdev;
2276
b6ab4b01 2277 if (!bp->rx_ring)
c0c050c5
MC
2278 return;
2279
2280 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2281 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2282 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 2283 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
2284 int j;
2285
c0c050c5
MC
2286 if (rxr->rx_tpa) {
2287 for (j = 0; j < MAX_TPA; j++) {
2288 struct bnxt_tpa_info *tpa_info =
2289 &rxr->rx_tpa[j];
2290 u8 *data = tpa_info->data;
2291
2292 if (!data)
2293 continue;
2294
c519fe9a
SN
2295 dma_unmap_single_attrs(&pdev->dev,
2296 tpa_info->mapping,
2297 bp->rx_buf_use_size,
2298 bp->rx_dir,
2299 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
2300
2301 tpa_info->data = NULL;
2302
2303 kfree(data);
2304 }
2305 }
2306
2307 for (j = 0; j < max_idx; j++) {
2308 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
3ed3a83e 2309 dma_addr_t mapping = rx_buf->mapping;
6bb19474 2310 void *data = rx_buf->data;
c0c050c5
MC
2311
2312 if (!data)
2313 continue;
2314
c0c050c5
MC
2315 rx_buf->data = NULL;
2316
3ed3a83e
MC
2317 if (BNXT_RX_PAGE_MODE(bp)) {
2318 mapping -= bp->rx_dma_offset;
c519fe9a
SN
2319 dma_unmap_page_attrs(&pdev->dev, mapping,
2320 PAGE_SIZE, bp->rx_dir,
2321 DMA_ATTR_WEAK_ORDERING);
c61fb99c 2322 __free_page(data);
3ed3a83e 2323 } else {
c519fe9a
SN
2324 dma_unmap_single_attrs(&pdev->dev, mapping,
2325 bp->rx_buf_use_size,
2326 bp->rx_dir,
2327 DMA_ATTR_WEAK_ORDERING);
c61fb99c 2328 kfree(data);
3ed3a83e 2329 }
c0c050c5
MC
2330 }
2331
2332 for (j = 0; j < max_agg_idx; j++) {
2333 struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2334 &rxr->rx_agg_ring[j];
2335 struct page *page = rx_agg_buf->page;
2336
2337 if (!page)
2338 continue;
2339
c519fe9a
SN
2340 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2341 BNXT_RX_PAGE_SIZE,
2342 PCI_DMA_FROMDEVICE,
2343 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
2344
2345 rx_agg_buf->page = NULL;
2346 __clear_bit(j, rxr->rx_agg_bmap);
2347
2348 __free_page(page);
2349 }
89d0a06c
MC
2350 if (rxr->rx_page) {
2351 __free_page(rxr->rx_page);
2352 rxr->rx_page = NULL;
2353 }
c0c050c5
MC
2354 }
2355}
2356
2357static void bnxt_free_skbs(struct bnxt *bp)
2358{
2359 bnxt_free_tx_skbs(bp);
2360 bnxt_free_rx_skbs(bp);
2361}
2362
6fe19886 2363static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
c0c050c5
MC
2364{
2365 struct pci_dev *pdev = bp->pdev;
2366 int i;
2367
6fe19886
MC
2368 for (i = 0; i < rmem->nr_pages; i++) {
2369 if (!rmem->pg_arr[i])
c0c050c5
MC
2370 continue;
2371
6fe19886
MC
2372 dma_free_coherent(&pdev->dev, rmem->page_size,
2373 rmem->pg_arr[i], rmem->dma_arr[i]);
c0c050c5 2374
6fe19886 2375 rmem->pg_arr[i] = NULL;
c0c050c5 2376 }
6fe19886
MC
2377 if (rmem->pg_tbl) {
2378 dma_free_coherent(&pdev->dev, rmem->nr_pages * 8,
2379 rmem->pg_tbl, rmem->pg_tbl_map);
2380 rmem->pg_tbl = NULL;
c0c050c5 2381 }
6fe19886
MC
2382 if (rmem->vmem_size && *rmem->vmem) {
2383 vfree(*rmem->vmem);
2384 *rmem->vmem = NULL;
c0c050c5
MC
2385 }
2386}
2387
6fe19886 2388static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
c0c050c5 2389{
c0c050c5 2390 struct pci_dev *pdev = bp->pdev;
66cca20a 2391 u64 valid_bit = 0;
6fe19886 2392 int i;
c0c050c5 2393
66cca20a
MC
2394 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2395 valid_bit = PTU_PTE_VALID;
6fe19886
MC
2396 if (rmem->nr_pages > 1) {
2397 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev,
2398 rmem->nr_pages * 8,
2399 &rmem->pg_tbl_map,
c0c050c5 2400 GFP_KERNEL);
6fe19886 2401 if (!rmem->pg_tbl)
c0c050c5
MC
2402 return -ENOMEM;
2403 }
2404
6fe19886 2405 for (i = 0; i < rmem->nr_pages; i++) {
66cca20a
MC
2406 u64 extra_bits = valid_bit;
2407
6fe19886
MC
2408 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2409 rmem->page_size,
2410 &rmem->dma_arr[i],
c0c050c5 2411 GFP_KERNEL);
6fe19886 2412 if (!rmem->pg_arr[i])
c0c050c5
MC
2413 return -ENOMEM;
2414
66cca20a
MC
2415 if (rmem->nr_pages > 1) {
2416 if (i == rmem->nr_pages - 2 &&
2417 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2418 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2419 else if (i == rmem->nr_pages - 1 &&
2420 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2421 extra_bits |= PTU_PTE_LAST;
2422 rmem->pg_tbl[i] =
2423 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2424 }
c0c050c5
MC
2425 }
2426
6fe19886
MC
2427 if (rmem->vmem_size) {
2428 *rmem->vmem = vzalloc(rmem->vmem_size);
2429 if (!(*rmem->vmem))
c0c050c5
MC
2430 return -ENOMEM;
2431 }
2432 return 0;
2433}
2434
2435static void bnxt_free_rx_rings(struct bnxt *bp)
2436{
2437 int i;
2438
b6ab4b01 2439 if (!bp->rx_ring)
c0c050c5
MC
2440 return;
2441
2442 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 2443 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
2444 struct bnxt_ring_struct *ring;
2445
c6d30e83
MC
2446 if (rxr->xdp_prog)
2447 bpf_prog_put(rxr->xdp_prog);
2448
96a8604f
JDB
2449 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2450 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2451
c0c050c5
MC
2452 kfree(rxr->rx_tpa);
2453 rxr->rx_tpa = NULL;
2454
2455 kfree(rxr->rx_agg_bmap);
2456 rxr->rx_agg_bmap = NULL;
2457
2458 ring = &rxr->rx_ring_struct;
6fe19886 2459 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
2460
2461 ring = &rxr->rx_agg_ring_struct;
6fe19886 2462 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
2463 }
2464}
2465
2466static int bnxt_alloc_rx_rings(struct bnxt *bp)
2467{
2468 int i, rc, agg_rings = 0, tpa_rings = 0;
2469
b6ab4b01
MC
2470 if (!bp->rx_ring)
2471 return -ENOMEM;
2472
c0c050c5
MC
2473 if (bp->flags & BNXT_FLAG_AGG_RINGS)
2474 agg_rings = 1;
2475
2476 if (bp->flags & BNXT_FLAG_TPA)
2477 tpa_rings = 1;
2478
2479 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 2480 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
2481 struct bnxt_ring_struct *ring;
2482
c0c050c5
MC
2483 ring = &rxr->rx_ring_struct;
2484
96a8604f
JDB
2485 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
2486 if (rc < 0)
2487 return rc;
2488
6fe19886 2489 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
2490 if (rc)
2491 return rc;
2492
2c61d211 2493 ring->grp_idx = i;
c0c050c5
MC
2494 if (agg_rings) {
2495 u16 mem_size;
2496
2497 ring = &rxr->rx_agg_ring_struct;
6fe19886 2498 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
2499 if (rc)
2500 return rc;
2501
9899bb59 2502 ring->grp_idx = i;
c0c050c5
MC
2503 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2504 mem_size = rxr->rx_agg_bmap_size / 8;
2505 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2506 if (!rxr->rx_agg_bmap)
2507 return -ENOMEM;
2508
2509 if (tpa_rings) {
2510 rxr->rx_tpa = kcalloc(MAX_TPA,
2511 sizeof(struct bnxt_tpa_info),
2512 GFP_KERNEL);
2513 if (!rxr->rx_tpa)
2514 return -ENOMEM;
2515 }
2516 }
2517 }
2518 return 0;
2519}
2520
2521static void bnxt_free_tx_rings(struct bnxt *bp)
2522{
2523 int i;
2524 struct pci_dev *pdev = bp->pdev;
2525
b6ab4b01 2526 if (!bp->tx_ring)
c0c050c5
MC
2527 return;
2528
2529 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2530 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
2531 struct bnxt_ring_struct *ring;
2532
c0c050c5
MC
2533 if (txr->tx_push) {
2534 dma_free_coherent(&pdev->dev, bp->tx_push_size,
2535 txr->tx_push, txr->tx_push_mapping);
2536 txr->tx_push = NULL;
2537 }
2538
2539 ring = &txr->tx_ring_struct;
2540
6fe19886 2541 bnxt_free_ring(bp, &ring->ring_mem);
c0c050c5
MC
2542 }
2543}
2544
2545static int bnxt_alloc_tx_rings(struct bnxt *bp)
2546{
2547 int i, j, rc;
2548 struct pci_dev *pdev = bp->pdev;
2549
2550 bp->tx_push_size = 0;
2551 if (bp->tx_push_thresh) {
2552 int push_size;
2553
2554 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2555 bp->tx_push_thresh);
2556
4419dbe6 2557 if (push_size > 256) {
c0c050c5
MC
2558 push_size = 0;
2559 bp->tx_push_thresh = 0;
2560 }
2561
2562 bp->tx_push_size = push_size;
2563 }
2564
2565 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2566 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5 2567 struct bnxt_ring_struct *ring;
2e8ef77e 2568 u8 qidx;
c0c050c5 2569
c0c050c5
MC
2570 ring = &txr->tx_ring_struct;
2571
6fe19886 2572 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
2573 if (rc)
2574 return rc;
2575
9899bb59 2576 ring->grp_idx = txr->bnapi->index;
c0c050c5 2577 if (bp->tx_push_size) {
c0c050c5
MC
2578 dma_addr_t mapping;
2579
2580 /* One pre-allocated DMA buffer to backup
2581 * TX push operation
2582 */
2583 txr->tx_push = dma_alloc_coherent(&pdev->dev,
2584 bp->tx_push_size,
2585 &txr->tx_push_mapping,
2586 GFP_KERNEL);
2587
2588 if (!txr->tx_push)
2589 return -ENOMEM;
2590
c0c050c5
MC
2591 mapping = txr->tx_push_mapping +
2592 sizeof(struct tx_push_bd);
4419dbe6 2593 txr->data_mapping = cpu_to_le64(mapping);
c0c050c5 2594
4419dbe6 2595 memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
c0c050c5 2596 }
2e8ef77e
MC
2597 qidx = bp->tc_to_qidx[j];
2598 ring->queue_id = bp->q_info[qidx].queue_id;
5f449249
MC
2599 if (i < bp->tx_nr_rings_xdp)
2600 continue;
c0c050c5
MC
2601 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2602 j++;
2603 }
2604 return 0;
2605}
2606
2607static void bnxt_free_cp_rings(struct bnxt *bp)
2608{
2609 int i;
2610
2611 if (!bp->bnapi)
2612 return;
2613
2614 for (i = 0; i < bp->cp_nr_rings; i++) {
2615 struct bnxt_napi *bnapi = bp->bnapi[i];
2616 struct bnxt_cp_ring_info *cpr;
2617 struct bnxt_ring_struct *ring;
50e3ab78 2618 int j;
c0c050c5
MC
2619
2620 if (!bnapi)
2621 continue;
2622
2623 cpr = &bnapi->cp_ring;
2624 ring = &cpr->cp_ring_struct;
2625
6fe19886 2626 bnxt_free_ring(bp, &ring->ring_mem);
50e3ab78
MC
2627
2628 for (j = 0; j < 2; j++) {
2629 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2630
2631 if (cpr2) {
2632 ring = &cpr2->cp_ring_struct;
2633 bnxt_free_ring(bp, &ring->ring_mem);
2634 kfree(cpr2);
2635 cpr->cp_ring_arr[j] = NULL;
2636 }
2637 }
c0c050c5
MC
2638 }
2639}
2640
50e3ab78
MC
2641static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
2642{
2643 struct bnxt_ring_mem_info *rmem;
2644 struct bnxt_ring_struct *ring;
2645 struct bnxt_cp_ring_info *cpr;
2646 int rc;
2647
2648 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
2649 if (!cpr)
2650 return NULL;
2651
2652 ring = &cpr->cp_ring_struct;
2653 rmem = &ring->ring_mem;
2654 rmem->nr_pages = bp->cp_nr_pages;
2655 rmem->page_size = HW_CMPD_RING_SIZE;
2656 rmem->pg_arr = (void **)cpr->cp_desc_ring;
2657 rmem->dma_arr = cpr->cp_desc_mapping;
2658 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
2659 rc = bnxt_alloc_ring(bp, rmem);
2660 if (rc) {
2661 bnxt_free_ring(bp, rmem);
2662 kfree(cpr);
2663 cpr = NULL;
2664 }
2665 return cpr;
2666}
2667
c0c050c5
MC
2668static int bnxt_alloc_cp_rings(struct bnxt *bp)
2669{
50e3ab78 2670 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
e5811b8c 2671 int i, rc, ulp_base_vec, ulp_msix;
c0c050c5 2672
e5811b8c
MC
2673 ulp_msix = bnxt_get_ulp_msix_num(bp);
2674 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
c0c050c5
MC
2675 for (i = 0; i < bp->cp_nr_rings; i++) {
2676 struct bnxt_napi *bnapi = bp->bnapi[i];
2677 struct bnxt_cp_ring_info *cpr;
2678 struct bnxt_ring_struct *ring;
2679
2680 if (!bnapi)
2681 continue;
2682
2683 cpr = &bnapi->cp_ring;
50e3ab78 2684 cpr->bnapi = bnapi;
c0c050c5
MC
2685 ring = &cpr->cp_ring_struct;
2686
6fe19886 2687 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
c0c050c5
MC
2688 if (rc)
2689 return rc;
e5811b8c
MC
2690
2691 if (ulp_msix && i >= ulp_base_vec)
2692 ring->map_idx = i + ulp_msix;
2693 else
2694 ring->map_idx = i;
50e3ab78
MC
2695
2696 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2697 continue;
2698
2699 if (i < bp->rx_nr_rings) {
2700 struct bnxt_cp_ring_info *cpr2 =
2701 bnxt_alloc_cp_sub_ring(bp);
2702
2703 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
2704 if (!cpr2)
2705 return -ENOMEM;
2706 cpr2->bnapi = bnapi;
2707 }
2708 if ((sh && i < bp->tx_nr_rings) ||
2709 (!sh && i >= bp->rx_nr_rings)) {
2710 struct bnxt_cp_ring_info *cpr2 =
2711 bnxt_alloc_cp_sub_ring(bp);
2712
2713 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
2714 if (!cpr2)
2715 return -ENOMEM;
2716 cpr2->bnapi = bnapi;
2717 }
c0c050c5
MC
2718 }
2719 return 0;
2720}
2721
2722static void bnxt_init_ring_struct(struct bnxt *bp)
2723{
2724 int i;
2725
2726 for (i = 0; i < bp->cp_nr_rings; i++) {
2727 struct bnxt_napi *bnapi = bp->bnapi[i];
6fe19886 2728 struct bnxt_ring_mem_info *rmem;
c0c050c5
MC
2729 struct bnxt_cp_ring_info *cpr;
2730 struct bnxt_rx_ring_info *rxr;
2731 struct bnxt_tx_ring_info *txr;
2732 struct bnxt_ring_struct *ring;
2733
2734 if (!bnapi)
2735 continue;
2736
2737 cpr = &bnapi->cp_ring;
2738 ring = &cpr->cp_ring_struct;
6fe19886
MC
2739 rmem = &ring->ring_mem;
2740 rmem->nr_pages = bp->cp_nr_pages;
2741 rmem->page_size = HW_CMPD_RING_SIZE;
2742 rmem->pg_arr = (void **)cpr->cp_desc_ring;
2743 rmem->dma_arr = cpr->cp_desc_mapping;
2744 rmem->vmem_size = 0;
c0c050c5 2745
b6ab4b01 2746 rxr = bnapi->rx_ring;
3b2b7d9d
MC
2747 if (!rxr)
2748 goto skip_rx;
2749
c0c050c5 2750 ring = &rxr->rx_ring_struct;
6fe19886
MC
2751 rmem = &ring->ring_mem;
2752 rmem->nr_pages = bp->rx_nr_pages;
2753 rmem->page_size = HW_RXBD_RING_SIZE;
2754 rmem->pg_arr = (void **)rxr->rx_desc_ring;
2755 rmem->dma_arr = rxr->rx_desc_mapping;
2756 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
2757 rmem->vmem = (void **)&rxr->rx_buf_ring;
c0c050c5
MC
2758
2759 ring = &rxr->rx_agg_ring_struct;
6fe19886
MC
2760 rmem = &ring->ring_mem;
2761 rmem->nr_pages = bp->rx_agg_nr_pages;
2762 rmem->page_size = HW_RXBD_RING_SIZE;
2763 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
2764 rmem->dma_arr = rxr->rx_agg_desc_mapping;
2765 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
2766 rmem->vmem = (void **)&rxr->rx_agg_ring;
c0c050c5 2767
3b2b7d9d 2768skip_rx:
b6ab4b01 2769 txr = bnapi->tx_ring;
3b2b7d9d
MC
2770 if (!txr)
2771 continue;
2772
c0c050c5 2773 ring = &txr->tx_ring_struct;
6fe19886
MC
2774 rmem = &ring->ring_mem;
2775 rmem->nr_pages = bp->tx_nr_pages;
2776 rmem->page_size = HW_RXBD_RING_SIZE;
2777 rmem->pg_arr = (void **)txr->tx_desc_ring;
2778 rmem->dma_arr = txr->tx_desc_mapping;
2779 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
2780 rmem->vmem = (void **)&txr->tx_buf_ring;
c0c050c5
MC
2781 }
2782}
2783
2784static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
2785{
2786 int i;
2787 u32 prod;
2788 struct rx_bd **rx_buf_ring;
2789
6fe19886
MC
2790 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
2791 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
c0c050c5
MC
2792 int j;
2793 struct rx_bd *rxbd;
2794
2795 rxbd = rx_buf_ring[i];
2796 if (!rxbd)
2797 continue;
2798
2799 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
2800 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
2801 rxbd->rx_bd_opaque = prod;
2802 }
2803 }
2804}
2805
2806static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2807{
2808 struct net_device *dev = bp->dev;
c0c050c5
MC
2809 struct bnxt_rx_ring_info *rxr;
2810 struct bnxt_ring_struct *ring;
2811 u32 prod, type;
2812 int i;
2813
c0c050c5
MC
2814 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
2815 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
2816
2817 if (NET_IP_ALIGN == 2)
2818 type |= RX_BD_FLAGS_SOP;
2819
b6ab4b01 2820 rxr = &bp->rx_ring[ring_nr];
c0c050c5
MC
2821 ring = &rxr->rx_ring_struct;
2822 bnxt_init_rxbd_pages(ring, type);
2823
c6d30e83
MC
2824 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
2825 rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
2826 if (IS_ERR(rxr->xdp_prog)) {
2827 int rc = PTR_ERR(rxr->xdp_prog);
2828
2829 rxr->xdp_prog = NULL;
2830 return rc;
2831 }
2832 }
c0c050c5
MC
2833 prod = rxr->rx_prod;
2834 for (i = 0; i < bp->rx_ring_size; i++) {
2835 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
2836 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
2837 ring_nr, i, bp->rx_ring_size);
2838 break;
2839 }
2840 prod = NEXT_RX(prod);
2841 }
2842 rxr->rx_prod = prod;
2843 ring->fw_ring_id = INVALID_HW_RING_ID;
2844
edd0c2cc
MC
2845 ring = &rxr->rx_agg_ring_struct;
2846 ring->fw_ring_id = INVALID_HW_RING_ID;
2847
c0c050c5
MC
2848 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
2849 return 0;
2850
2839f28b 2851 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
c0c050c5
MC
2852 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
2853
2854 bnxt_init_rxbd_pages(ring, type);
2855
2856 prod = rxr->rx_agg_prod;
2857 for (i = 0; i < bp->rx_agg_ring_size; i++) {
2858 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
2859 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
2860 ring_nr, i, bp->rx_ring_size);
2861 break;
2862 }
2863 prod = NEXT_RX_AGG(prod);
2864 }
2865 rxr->rx_agg_prod = prod;
c0c050c5
MC
2866
2867 if (bp->flags & BNXT_FLAG_TPA) {
2868 if (rxr->rx_tpa) {
2869 u8 *data;
2870 dma_addr_t mapping;
2871
2872 for (i = 0; i < MAX_TPA; i++) {
2873 data = __bnxt_alloc_rx_data(bp, &mapping,
2874 GFP_KERNEL);
2875 if (!data)
2876 return -ENOMEM;
2877
2878 rxr->rx_tpa[i].data = data;
b3dba77c 2879 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
c0c050c5
MC
2880 rxr->rx_tpa[i].mapping = mapping;
2881 }
2882 } else {
2883 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2884 return -ENOMEM;
2885 }
2886 }
2887
2888 return 0;
2889}
2890
2247925f
SP
2891static void bnxt_init_cp_rings(struct bnxt *bp)
2892{
3e08b184 2893 int i, j;
2247925f
SP
2894
2895 for (i = 0; i < bp->cp_nr_rings; i++) {
2896 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
2897 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
2898
2899 ring->fw_ring_id = INVALID_HW_RING_ID;
6a8788f2
AG
2900 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
2901 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3e08b184
MC
2902 for (j = 0; j < 2; j++) {
2903 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2904
2905 if (!cpr2)
2906 continue;
2907
2908 ring = &cpr2->cp_ring_struct;
2909 ring->fw_ring_id = INVALID_HW_RING_ID;
2910 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
2911 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
2912 }
2247925f
SP
2913 }
2914}
2915
c0c050c5
MC
2916static int bnxt_init_rx_rings(struct bnxt *bp)
2917{
2918 int i, rc = 0;
2919
c61fb99c 2920 if (BNXT_RX_PAGE_MODE(bp)) {
c6d30e83
MC
2921 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
2922 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
c61fb99c
MC
2923 } else {
2924 bp->rx_offset = BNXT_RX_OFFSET;
2925 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
2926 }
b3dba77c 2927
c0c050c5
MC
2928 for (i = 0; i < bp->rx_nr_rings; i++) {
2929 rc = bnxt_init_one_rx_ring(bp, i);
2930 if (rc)
2931 break;
2932 }
2933
2934 return rc;
2935}
2936
2937static int bnxt_init_tx_rings(struct bnxt *bp)
2938{
2939 u16 i;
2940
2941 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2942 MAX_SKB_FRAGS + 1);
2943
2944 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2945 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
2946 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2947
2948 ring->fw_ring_id = INVALID_HW_RING_ID;
2949 }
2950
2951 return 0;
2952}
2953
2954static void bnxt_free_ring_grps(struct bnxt *bp)
2955{
2956 kfree(bp->grp_info);
2957 bp->grp_info = NULL;
2958}
2959
2960static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2961{
2962 int i;
2963
2964 if (irq_re_init) {
2965 bp->grp_info = kcalloc(bp->cp_nr_rings,
2966 sizeof(struct bnxt_ring_grp_info),
2967 GFP_KERNEL);
2968 if (!bp->grp_info)
2969 return -ENOMEM;
2970 }
2971 for (i = 0; i < bp->cp_nr_rings; i++) {
2972 if (irq_re_init)
2973 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
2974 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
2975 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
2976 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
2977 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
2978 }
2979 return 0;
2980}
2981
2982static void bnxt_free_vnics(struct bnxt *bp)
2983{
2984 kfree(bp->vnic_info);
2985 bp->vnic_info = NULL;
2986 bp->nr_vnics = 0;
2987}
2988
2989static int bnxt_alloc_vnics(struct bnxt *bp)
2990{
2991 int num_vnics = 1;
2992
2993#ifdef CONFIG_RFS_ACCEL
2994 if (bp->flags & BNXT_FLAG_RFS)
2995 num_vnics += bp->rx_nr_rings;
2996#endif
2997
dc52c6c7
PS
2998 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
2999 num_vnics++;
3000
c0c050c5
MC
3001 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3002 GFP_KERNEL);
3003 if (!bp->vnic_info)
3004 return -ENOMEM;
3005
3006 bp->nr_vnics = num_vnics;
3007 return 0;
3008}
3009
3010static void bnxt_init_vnics(struct bnxt *bp)
3011{
3012 int i;
3013
3014 for (i = 0; i < bp->nr_vnics; i++) {
3015 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
44c6f72a 3016 int j;
c0c050c5
MC
3017
3018 vnic->fw_vnic_id = INVALID_HW_RING_ID;
44c6f72a
MC
3019 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3020 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3021
c0c050c5
MC
3022 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3023
3024 if (bp->vnic_info[i].rss_hash_key) {
3025 if (i == 0)
3026 prandom_bytes(vnic->rss_hash_key,
3027 HW_HASH_KEY_SIZE);
3028 else
3029 memcpy(vnic->rss_hash_key,
3030 bp->vnic_info[0].rss_hash_key,
3031 HW_HASH_KEY_SIZE);
3032 }
3033 }
3034}
3035
3036static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3037{
3038 int pages;
3039
3040 pages = ring_size / desc_per_pg;
3041
3042 if (!pages)
3043 return 1;
3044
3045 pages++;
3046
3047 while (pages & (pages - 1))
3048 pages++;
3049
3050 return pages;
3051}
3052
c6d30e83 3053void bnxt_set_tpa_flags(struct bnxt *bp)
c0c050c5
MC
3054{
3055 bp->flags &= ~BNXT_FLAG_TPA;
341138c3
MC
3056 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3057 return;
c0c050c5
MC
3058 if (bp->dev->features & NETIF_F_LRO)
3059 bp->flags |= BNXT_FLAG_LRO;
1054aee8 3060 else if (bp->dev->features & NETIF_F_GRO_HW)
c0c050c5
MC
3061 bp->flags |= BNXT_FLAG_GRO;
3062}
3063
3064/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3065 * be set on entry.
3066 */
3067void bnxt_set_ring_params(struct bnxt *bp)
3068{
3069 u32 ring_size, rx_size, rx_space;
3070 u32 agg_factor = 0, agg_ring_size = 0;
3071
3072 /* 8 for CRC and VLAN */
3073 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3074
3075 rx_space = rx_size + NET_SKB_PAD +
3076 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3077
3078 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3079 ring_size = bp->rx_ring_size;
3080 bp->rx_agg_ring_size = 0;
3081 bp->rx_agg_nr_pages = 0;
3082
3083 if (bp->flags & BNXT_FLAG_TPA)
2839f28b 3084 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
c0c050c5
MC
3085
3086 bp->flags &= ~BNXT_FLAG_JUMBO;
bdbd1eb5 3087 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
c0c050c5
MC
3088 u32 jumbo_factor;
3089
3090 bp->flags |= BNXT_FLAG_JUMBO;
3091 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3092 if (jumbo_factor > agg_factor)
3093 agg_factor = jumbo_factor;
3094 }
3095 agg_ring_size = ring_size * agg_factor;
3096
3097 if (agg_ring_size) {
3098 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3099 RX_DESC_CNT);
3100 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3101 u32 tmp = agg_ring_size;
3102
3103 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3104 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3105 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3106 tmp, agg_ring_size);
3107 }
3108 bp->rx_agg_ring_size = agg_ring_size;
3109 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3110 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3111 rx_space = rx_size + NET_SKB_PAD +
3112 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3113 }
3114
3115 bp->rx_buf_use_size = rx_size;
3116 bp->rx_buf_size = rx_space;
3117
3118 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3119 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3120
3121 ring_size = bp->tx_ring_size;
3122 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3123 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3124
3125 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
3126 bp->cp_ring_size = ring_size;
3127
3128 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3129 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3130 bp->cp_nr_pages = MAX_CP_PAGES;
3131 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3132 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3133 ring_size, bp->cp_ring_size);
3134 }
3135 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3136 bp->cp_ring_mask = bp->cp_bit - 1;
3137}
3138
96a8604f
JDB
3139/* Changing allocation mode of RX rings.
3140 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3141 */
c61fb99c 3142int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
6bb19474 3143{
c61fb99c
MC
3144 if (page_mode) {
3145 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3146 return -EOPNOTSUPP;
7eb9bb3a
MC
3147 bp->dev->max_mtu =
3148 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
c61fb99c
MC
3149 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3150 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
c61fb99c
MC
3151 bp->rx_dir = DMA_BIDIRECTIONAL;
3152 bp->rx_skb_func = bnxt_rx_page_skb;
1054aee8
MC
3153 /* Disable LRO or GRO_HW */
3154 netdev_update_features(bp->dev);
c61fb99c 3155 } else {
7eb9bb3a 3156 bp->dev->max_mtu = bp->max_mtu;
c61fb99c
MC
3157 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3158 bp->rx_dir = DMA_FROM_DEVICE;
3159 bp->rx_skb_func = bnxt_rx_skb;
3160 }
6bb19474
MC
3161 return 0;
3162}
3163
c0c050c5
MC
3164static void bnxt_free_vnic_attributes(struct bnxt *bp)
3165{
3166 int i;
3167 struct bnxt_vnic_info *vnic;
3168 struct pci_dev *pdev = bp->pdev;
3169
3170 if (!bp->vnic_info)
3171 return;
3172
3173 for (i = 0; i < bp->nr_vnics; i++) {
3174 vnic = &bp->vnic_info[i];
3175
3176 kfree(vnic->fw_grp_ids);
3177 vnic->fw_grp_ids = NULL;
3178
3179 kfree(vnic->uc_list);
3180 vnic->uc_list = NULL;
3181
3182 if (vnic->mc_list) {
3183 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3184 vnic->mc_list, vnic->mc_list_mapping);
3185 vnic->mc_list = NULL;
3186 }
3187
3188 if (vnic->rss_table) {
3189 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3190 vnic->rss_table,
3191 vnic->rss_table_dma_addr);
3192 vnic->rss_table = NULL;
3193 }
3194
3195 vnic->rss_hash_key = NULL;
3196 vnic->flags = 0;
3197 }
3198}
3199
3200static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3201{
3202 int i, rc = 0, size;
3203 struct bnxt_vnic_info *vnic;
3204 struct pci_dev *pdev = bp->pdev;
3205 int max_rings;
3206
3207 for (i = 0; i < bp->nr_vnics; i++) {
3208 vnic = &bp->vnic_info[i];
3209
3210 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3211 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3212
3213 if (mem_size > 0) {
3214 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3215 if (!vnic->uc_list) {
3216 rc = -ENOMEM;
3217 goto out;
3218 }
3219 }
3220 }
3221
3222 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3223 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3224 vnic->mc_list =
3225 dma_alloc_coherent(&pdev->dev,
3226 vnic->mc_list_size,
3227 &vnic->mc_list_mapping,
3228 GFP_KERNEL);
3229 if (!vnic->mc_list) {
3230 rc = -ENOMEM;
3231 goto out;
3232 }
3233 }
3234
44c6f72a
MC
3235 if (bp->flags & BNXT_FLAG_CHIP_P5)
3236 goto vnic_skip_grps;
3237
c0c050c5
MC
3238 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3239 max_rings = bp->rx_nr_rings;
3240 else
3241 max_rings = 1;
3242
3243 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3244 if (!vnic->fw_grp_ids) {
3245 rc = -ENOMEM;
3246 goto out;
3247 }
44c6f72a 3248vnic_skip_grps:
ae10ae74
MC
3249 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3250 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3251 continue;
3252
c0c050c5
MC
3253 /* Allocate rss table and hash key */
3254 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3255 &vnic->rss_table_dma_addr,
3256 GFP_KERNEL);
3257 if (!vnic->rss_table) {
3258 rc = -ENOMEM;
3259 goto out;
3260 }
3261
3262 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3263
3264 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3265 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3266 }
3267 return 0;
3268
3269out:
3270 return rc;
3271}
3272
3273static void bnxt_free_hwrm_resources(struct bnxt *bp)
3274{
3275 struct pci_dev *pdev = bp->pdev;
3276
a2bf74f4
VD
3277 if (bp->hwrm_cmd_resp_addr) {
3278 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3279 bp->hwrm_cmd_resp_dma_addr);
3280 bp->hwrm_cmd_resp_addr = NULL;
3281 }
c0c050c5
MC
3282}
3283
3284static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3285{
3286 struct pci_dev *pdev = bp->pdev;
3287
3288 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3289 &bp->hwrm_cmd_resp_dma_addr,
3290 GFP_KERNEL);
3291 if (!bp->hwrm_cmd_resp_addr)
3292 return -ENOMEM;
c0c050c5
MC
3293
3294 return 0;
3295}
3296
e605db80
DK
3297static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3298{
3299 if (bp->hwrm_short_cmd_req_addr) {
3300 struct pci_dev *pdev = bp->pdev;
3301
1dfddc41 3302 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
e605db80
DK
3303 bp->hwrm_short_cmd_req_addr,
3304 bp->hwrm_short_cmd_req_dma_addr);
3305 bp->hwrm_short_cmd_req_addr = NULL;
3306 }
3307}
3308
3309static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3310{
3311 struct pci_dev *pdev = bp->pdev;
3312
3313 bp->hwrm_short_cmd_req_addr =
1dfddc41 3314 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
e605db80
DK
3315 &bp->hwrm_short_cmd_req_dma_addr,
3316 GFP_KERNEL);
3317 if (!bp->hwrm_short_cmd_req_addr)
3318 return -ENOMEM;
3319
3320 return 0;
3321}
3322
fd3ab1c7 3323static void bnxt_free_port_stats(struct bnxt *bp)
c0c050c5 3324{
c0c050c5
MC
3325 struct pci_dev *pdev = bp->pdev;
3326
00db3cba
VV
3327 bp->flags &= ~BNXT_FLAG_PORT_STATS;
3328 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
3329
3bdf56c4
MC
3330 if (bp->hw_rx_port_stats) {
3331 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
3332 bp->hw_rx_port_stats,
3333 bp->hw_rx_port_stats_map);
3334 bp->hw_rx_port_stats = NULL;
00db3cba
VV
3335 }
3336
36e53349
MC
3337 if (bp->hw_tx_port_stats_ext) {
3338 dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext),
3339 bp->hw_tx_port_stats_ext,
3340 bp->hw_tx_port_stats_ext_map);
3341 bp->hw_tx_port_stats_ext = NULL;
3342 }
3343
00db3cba
VV
3344 if (bp->hw_rx_port_stats_ext) {
3345 dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3346 bp->hw_rx_port_stats_ext,
3347 bp->hw_rx_port_stats_ext_map);
3348 bp->hw_rx_port_stats_ext = NULL;
3bdf56c4 3349 }
fd3ab1c7
MC
3350}
3351
3352static void bnxt_free_ring_stats(struct bnxt *bp)
3353{
3354 struct pci_dev *pdev = bp->pdev;
3355 int size, i;
3bdf56c4 3356
c0c050c5
MC
3357 if (!bp->bnapi)
3358 return;
3359
3360 size = sizeof(struct ctx_hw_stats);
3361
3362 for (i = 0; i < bp->cp_nr_rings; i++) {
3363 struct bnxt_napi *bnapi = bp->bnapi[i];
3364 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3365
3366 if (cpr->hw_stats) {
3367 dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
3368 cpr->hw_stats_map);
3369 cpr->hw_stats = NULL;
3370 }
3371 }
3372}
3373
3374static int bnxt_alloc_stats(struct bnxt *bp)
3375{
3376 u32 size, i;
3377 struct pci_dev *pdev = bp->pdev;
3378
3379 size = sizeof(struct ctx_hw_stats);
3380
3381 for (i = 0; i < bp->cp_nr_rings; i++) {
3382 struct bnxt_napi *bnapi = bp->bnapi[i];
3383 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3384
3385 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
3386 &cpr->hw_stats_map,
3387 GFP_KERNEL);
3388 if (!cpr->hw_stats)
3389 return -ENOMEM;
3390
3391 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3392 }
3bdf56c4 3393
3e8060fa 3394 if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
fd3ab1c7
MC
3395 if (bp->hw_rx_port_stats)
3396 goto alloc_ext_stats;
3397
3bdf56c4
MC
3398 bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
3399 sizeof(struct tx_port_stats) + 1024;
3400
3401 bp->hw_rx_port_stats =
3402 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
3403 &bp->hw_rx_port_stats_map,
3404 GFP_KERNEL);
3405 if (!bp->hw_rx_port_stats)
3406 return -ENOMEM;
3407
3408 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
3409 512;
3410 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
3411 sizeof(struct rx_port_stats) + 512;
3412 bp->flags |= BNXT_FLAG_PORT_STATS;
00db3cba 3413
fd3ab1c7 3414alloc_ext_stats:
00db3cba
VV
3415 /* Display extended statistics only if FW supports it */
3416 if (bp->hwrm_spec_code < 0x10804 ||
3417 bp->hwrm_spec_code == 0x10900)
3418 return 0;
3419
fd3ab1c7
MC
3420 if (bp->hw_rx_port_stats_ext)
3421 goto alloc_tx_ext_stats;
3422
00db3cba
VV
3423 bp->hw_rx_port_stats_ext =
3424 dma_zalloc_coherent(&pdev->dev,
3425 sizeof(struct rx_port_stats_ext),
3426 &bp->hw_rx_port_stats_ext_map,
3427 GFP_KERNEL);
3428 if (!bp->hw_rx_port_stats_ext)
3429 return 0;
3430
fd3ab1c7
MC
3431alloc_tx_ext_stats:
3432 if (bp->hw_tx_port_stats_ext)
3433 return 0;
3434
36e53349
MC
3435 if (bp->hwrm_spec_code >= 0x10902) {
3436 bp->hw_tx_port_stats_ext =
3437 dma_zalloc_coherent(&pdev->dev,
3438 sizeof(struct tx_port_stats_ext),
3439 &bp->hw_tx_port_stats_ext_map,
3440 GFP_KERNEL);
3441 }
00db3cba 3442 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
3bdf56c4 3443 }
c0c050c5
MC
3444 return 0;
3445}
3446
3447static void bnxt_clear_ring_indices(struct bnxt *bp)
3448{
3449 int i;
3450
3451 if (!bp->bnapi)
3452 return;
3453
3454 for (i = 0; i < bp->cp_nr_rings; i++) {
3455 struct bnxt_napi *bnapi = bp->bnapi[i];
3456 struct bnxt_cp_ring_info *cpr;
3457 struct bnxt_rx_ring_info *rxr;
3458 struct bnxt_tx_ring_info *txr;
3459
3460 if (!bnapi)
3461 continue;
3462
3463 cpr = &bnapi->cp_ring;
3464 cpr->cp_raw_cons = 0;
3465
b6ab4b01 3466 txr = bnapi->tx_ring;
3b2b7d9d
MC
3467 if (txr) {
3468 txr->tx_prod = 0;
3469 txr->tx_cons = 0;
3470 }
c0c050c5 3471
b6ab4b01 3472 rxr = bnapi->rx_ring;
3b2b7d9d
MC
3473 if (rxr) {
3474 rxr->rx_prod = 0;
3475 rxr->rx_agg_prod = 0;
3476 rxr->rx_sw_agg_prod = 0;
376a5b86 3477 rxr->rx_next_cons = 0;
3b2b7d9d 3478 }
c0c050c5
MC
3479 }
3480}
3481
3482static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
3483{
3484#ifdef CONFIG_RFS_ACCEL
3485 int i;
3486
3487 /* Under rtnl_lock and all our NAPIs have been disabled. It's
3488 * safe to delete the hash table.
3489 */
3490 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
3491 struct hlist_head *head;
3492 struct hlist_node *tmp;
3493 struct bnxt_ntuple_filter *fltr;
3494
3495 head = &bp->ntp_fltr_hash_tbl[i];
3496 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
3497 hlist_del(&fltr->hash);
3498 kfree(fltr);
3499 }
3500 }
3501 if (irq_reinit) {
3502 kfree(bp->ntp_fltr_bmap);
3503 bp->ntp_fltr_bmap = NULL;
3504 }
3505 bp->ntp_fltr_count = 0;
3506#endif
3507}
3508
3509static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
3510{
3511#ifdef CONFIG_RFS_ACCEL
3512 int i, rc = 0;
3513
3514 if (!(bp->flags & BNXT_FLAG_RFS))
3515 return 0;
3516
3517 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
3518 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
3519
3520 bp->ntp_fltr_count = 0;
ac45bd93
DC
3521 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
3522 sizeof(long),
c0c050c5
MC
3523 GFP_KERNEL);
3524
3525 if (!bp->ntp_fltr_bmap)
3526 rc = -ENOMEM;
3527
3528 return rc;
3529#else
3530 return 0;
3531#endif
3532}
3533
3534static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
3535{
3536 bnxt_free_vnic_attributes(bp);
3537 bnxt_free_tx_rings(bp);
3538 bnxt_free_rx_rings(bp);
3539 bnxt_free_cp_rings(bp);
3540 bnxt_free_ntp_fltrs(bp, irq_re_init);
3541 if (irq_re_init) {
fd3ab1c7 3542 bnxt_free_ring_stats(bp);
c0c050c5
MC
3543 bnxt_free_ring_grps(bp);
3544 bnxt_free_vnics(bp);
a960dec9
MC
3545 kfree(bp->tx_ring_map);
3546 bp->tx_ring_map = NULL;
b6ab4b01
MC
3547 kfree(bp->tx_ring);
3548 bp->tx_ring = NULL;
3549 kfree(bp->rx_ring);
3550 bp->rx_ring = NULL;
c0c050c5
MC
3551 kfree(bp->bnapi);
3552 bp->bnapi = NULL;
3553 } else {
3554 bnxt_clear_ring_indices(bp);
3555 }
3556}
3557
3558static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
3559{
01657bcd 3560 int i, j, rc, size, arr_size;
c0c050c5
MC
3561 void *bnapi;
3562
3563 if (irq_re_init) {
3564 /* Allocate bnapi mem pointer array and mem block for
3565 * all queues
3566 */
3567 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
3568 bp->cp_nr_rings);
3569 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
3570 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
3571 if (!bnapi)
3572 return -ENOMEM;
3573
3574 bp->bnapi = bnapi;
3575 bnapi += arr_size;
3576 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
3577 bp->bnapi[i] = bnapi;
3578 bp->bnapi[i]->index = i;
3579 bp->bnapi[i]->bp = bp;
e38287b7
MC
3580 if (bp->flags & BNXT_FLAG_CHIP_P5) {
3581 struct bnxt_cp_ring_info *cpr =
3582 &bp->bnapi[i]->cp_ring;
3583
3584 cpr->cp_ring_struct.ring_mem.flags =
3585 BNXT_RMEM_RING_PTE_FLAG;
3586 }
c0c050c5
MC
3587 }
3588
b6ab4b01
MC
3589 bp->rx_ring = kcalloc(bp->rx_nr_rings,
3590 sizeof(struct bnxt_rx_ring_info),
3591 GFP_KERNEL);
3592 if (!bp->rx_ring)
3593 return -ENOMEM;
3594
3595 for (i = 0; i < bp->rx_nr_rings; i++) {
e38287b7
MC
3596 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3597
3598 if (bp->flags & BNXT_FLAG_CHIP_P5) {
3599 rxr->rx_ring_struct.ring_mem.flags =
3600 BNXT_RMEM_RING_PTE_FLAG;
3601 rxr->rx_agg_ring_struct.ring_mem.flags =
3602 BNXT_RMEM_RING_PTE_FLAG;
3603 }
3604 rxr->bnapi = bp->bnapi[i];
b6ab4b01
MC
3605 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
3606 }
3607
3608 bp->tx_ring = kcalloc(bp->tx_nr_rings,
3609 sizeof(struct bnxt_tx_ring_info),
3610 GFP_KERNEL);
3611 if (!bp->tx_ring)
3612 return -ENOMEM;
3613
a960dec9
MC
3614 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
3615 GFP_KERNEL);
3616
3617 if (!bp->tx_ring_map)
3618 return -ENOMEM;
3619
01657bcd
MC
3620 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
3621 j = 0;
3622 else
3623 j = bp->rx_nr_rings;
3624
3625 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
e38287b7
MC
3626 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3627
3628 if (bp->flags & BNXT_FLAG_CHIP_P5)
3629 txr->tx_ring_struct.ring_mem.flags =
3630 BNXT_RMEM_RING_PTE_FLAG;
3631 txr->bnapi = bp->bnapi[j];
3632 bp->bnapi[j]->tx_ring = txr;
5f449249 3633 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
38413406 3634 if (i >= bp->tx_nr_rings_xdp) {
e38287b7 3635 txr->txq_index = i - bp->tx_nr_rings_xdp;
38413406
MC
3636 bp->bnapi[j]->tx_int = bnxt_tx_int;
3637 } else {
fa3e93e8 3638 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
38413406
MC
3639 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
3640 }
b6ab4b01
MC
3641 }
3642
c0c050c5
MC
3643 rc = bnxt_alloc_stats(bp);
3644 if (rc)
3645 goto alloc_mem_err;
3646
3647 rc = bnxt_alloc_ntp_fltrs(bp);
3648 if (rc)
3649 goto alloc_mem_err;
3650
3651 rc = bnxt_alloc_vnics(bp);
3652 if (rc)
3653 goto alloc_mem_err;
3654 }
3655
3656 bnxt_init_ring_struct(bp);
3657
3658 rc = bnxt_alloc_rx_rings(bp);
3659 if (rc)
3660 goto alloc_mem_err;
3661
3662 rc = bnxt_alloc_tx_rings(bp);
3663 if (rc)
3664 goto alloc_mem_err;
3665
3666 rc = bnxt_alloc_cp_rings(bp);
3667 if (rc)
3668 goto alloc_mem_err;
3669
3670 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
3671 BNXT_VNIC_UCAST_FLAG;
3672 rc = bnxt_alloc_vnic_attributes(bp);
3673 if (rc)
3674 goto alloc_mem_err;
3675 return 0;
3676
3677alloc_mem_err:
3678 bnxt_free_mem(bp, true);
3679 return rc;
3680}
3681
9d8bc097
MC
3682static void bnxt_disable_int(struct bnxt *bp)
3683{
3684 int i;
3685
3686 if (!bp->bnapi)
3687 return;
3688
3689 for (i = 0; i < bp->cp_nr_rings; i++) {
3690 struct bnxt_napi *bnapi = bp->bnapi[i];
3691 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
daf1f1e7 3692 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
9d8bc097 3693
daf1f1e7 3694 if (ring->fw_ring_id != INVALID_HW_RING_ID)
697197e5 3695 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
9d8bc097
MC
3696 }
3697}
3698
e5811b8c
MC
3699static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
3700{
3701 struct bnxt_napi *bnapi = bp->bnapi[n];
3702 struct bnxt_cp_ring_info *cpr;
3703
3704 cpr = &bnapi->cp_ring;
3705 return cpr->cp_ring_struct.map_idx;
3706}
3707
9d8bc097
MC
3708static void bnxt_disable_int_sync(struct bnxt *bp)
3709{
3710 int i;
3711
3712 atomic_inc(&bp->intr_sem);
3713
3714 bnxt_disable_int(bp);
e5811b8c
MC
3715 for (i = 0; i < bp->cp_nr_rings; i++) {
3716 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
3717
3718 synchronize_irq(bp->irq_tbl[map_idx].vector);
3719 }
9d8bc097
MC
3720}
3721
3722static void bnxt_enable_int(struct bnxt *bp)
3723{
3724 int i;
3725
3726 atomic_set(&bp->intr_sem, 0);
3727 for (i = 0; i < bp->cp_nr_rings; i++) {
3728 struct bnxt_napi *bnapi = bp->bnapi[i];
3729 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3730
697197e5 3731 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
9d8bc097
MC
3732 }
3733}
3734
c0c050c5
MC
3735void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
3736 u16 cmpl_ring, u16 target_id)
3737{
a8643e16 3738 struct input *req = request;
c0c050c5 3739
a8643e16
MC
3740 req->req_type = cpu_to_le16(req_type);
3741 req->cmpl_ring = cpu_to_le16(cmpl_ring);
3742 req->target_id = cpu_to_le16(target_id);
c0c050c5
MC
3743 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
3744}
3745
fbfbc485
MC
3746static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3747 int timeout, bool silent)
c0c050c5 3748{
a11fa2be 3749 int i, intr_process, rc, tmo_count;
a8643e16 3750 struct input *req = msg;
c0c050c5 3751 u32 *data = msg;
845adfe4
MC
3752 __le32 *resp_len;
3753 u8 *valid;
c0c050c5
MC
3754 u16 cp_ring_id, len = 0;
3755 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
e605db80 3756 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
ebd5818c 3757 struct hwrm_short_input short_input = {0};
c0c050c5 3758
a8643e16 3759 req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
c0c050c5 3760 memset(resp, 0, PAGE_SIZE);
a8643e16 3761 cp_ring_id = le16_to_cpu(req->cmpl_ring);
c0c050c5
MC
3762 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
3763
1dfddc41
MC
3764 if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
3765 if (msg_len > bp->hwrm_max_ext_req_len ||
3766 !bp->hwrm_short_cmd_req_addr)
3767 return -EINVAL;
3768 }
3769
3770 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
3771 msg_len > BNXT_HWRM_MAX_REQ_LEN) {
e605db80 3772 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
1dfddc41
MC
3773 u16 max_msg_len;
3774
3775 /* Set boundary for maximum extended request length for short
3776 * cmd format. If passed up from device use the max supported
3777 * internal req length.
3778 */
3779 max_msg_len = bp->hwrm_max_ext_req_len;
e605db80
DK
3780
3781 memcpy(short_cmd_req, req, msg_len);
1dfddc41
MC
3782 if (msg_len < max_msg_len)
3783 memset(short_cmd_req + msg_len, 0,
3784 max_msg_len - msg_len);
e605db80
DK
3785
3786 short_input.req_type = req->req_type;
3787 short_input.signature =
3788 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
3789 short_input.size = cpu_to_le16(msg_len);
3790 short_input.req_addr =
3791 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
3792
3793 data = (u32 *)&short_input;
3794 msg_len = sizeof(short_input);
3795
3796 /* Sync memory write before updating doorbell */
3797 wmb();
3798
3799 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
3800 }
3801
c0c050c5
MC
3802 /* Write request msg to hwrm channel */
3803 __iowrite32_copy(bp->bar0, data, msg_len / 4);
3804
e605db80 3805 for (i = msg_len; i < max_req_len; i += 4)
d79979a1
MC
3806 writel(0, bp->bar0 + i);
3807
c0c050c5
MC
3808 /* currently supports only one outstanding message */
3809 if (intr_process)
a8643e16 3810 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
c0c050c5
MC
3811
3812 /* Ring channel doorbell */
3813 writel(1, bp->bar0 + 0x100);
3814
ff4fe81d
MC
3815 if (!timeout)
3816 timeout = DFLT_HWRM_CMD_TIMEOUT;
9751e8e7
AG
3817 /* convert timeout to usec */
3818 timeout *= 1000;
ff4fe81d 3819
c0c050c5 3820 i = 0;
9751e8e7
AG
3821 /* Short timeout for the first few iterations:
3822 * number of loops = number of loops for short timeout +
3823 * number of loops for standard timeout.
3824 */
3825 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
3826 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
3827 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
845adfe4 3828 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
c0c050c5
MC
3829 if (intr_process) {
3830 /* Wait until hwrm response cmpl interrupt is processed */
3831 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
a11fa2be 3832 i++ < tmo_count) {
9751e8e7
AG
3833 /* on first few passes, just barely sleep */
3834 if (i < HWRM_SHORT_TIMEOUT_COUNTER)
3835 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
3836 HWRM_SHORT_MAX_TIMEOUT);
3837 else
3838 usleep_range(HWRM_MIN_TIMEOUT,
3839 HWRM_MAX_TIMEOUT);
c0c050c5
MC
3840 }
3841
3842 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
3843 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
a8643e16 3844 le16_to_cpu(req->req_type));
c0c050c5
MC
3845 return -1;
3846 }
845adfe4
MC
3847 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3848 HWRM_RESP_LEN_SFT;
3849 valid = bp->hwrm_cmd_resp_addr + len - 1;
c0c050c5 3850 } else {
cc559c1a
MC
3851 int j;
3852
c0c050c5 3853 /* Check if response len is updated */
a11fa2be 3854 for (i = 0; i < tmo_count; i++) {
c0c050c5
MC
3855 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3856 HWRM_RESP_LEN_SFT;
3857 if (len)
3858 break;
9751e8e7
AG
3859 /* on first few passes, just barely sleep */
3860 if (i < DFLT_HWRM_CMD_TIMEOUT)
3861 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
3862 HWRM_SHORT_MAX_TIMEOUT);
3863 else
3864 usleep_range(HWRM_MIN_TIMEOUT,
3865 HWRM_MAX_TIMEOUT);
c0c050c5
MC
3866 }
3867
a11fa2be 3868 if (i >= tmo_count) {
c0c050c5 3869 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
cc559c1a
MC
3870 HWRM_TOTAL_TIMEOUT(i),
3871 le16_to_cpu(req->req_type),
8578d6c1 3872 le16_to_cpu(req->seq_id), len);
c0c050c5
MC
3873 return -1;
3874 }
3875
845adfe4
MC
3876 /* Last byte of resp contains valid bit */
3877 valid = bp->hwrm_cmd_resp_addr + len - 1;
cc559c1a 3878 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
845adfe4
MC
3879 /* make sure we read from updated DMA memory */
3880 dma_rmb();
3881 if (*valid)
c0c050c5 3882 break;
a11fa2be 3883 udelay(1);
c0c050c5
MC
3884 }
3885
cc559c1a 3886 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
c0c050c5 3887 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
cc559c1a
MC
3888 HWRM_TOTAL_TIMEOUT(i),
3889 le16_to_cpu(req->req_type),
a8643e16 3890 le16_to_cpu(req->seq_id), len, *valid);
c0c050c5
MC
3891 return -1;
3892 }
3893 }
3894
845adfe4
MC
3895 /* Zero valid bit for compatibility. Valid bit in an older spec
3896 * may become a new field in a newer spec. We must make sure that
3897 * a new field not implemented by old spec will read zero.
3898 */
3899 *valid = 0;
c0c050c5 3900 rc = le16_to_cpu(resp->error_code);
fbfbc485 3901 if (rc && !silent)
c0c050c5
MC
3902 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
3903 le16_to_cpu(resp->req_type),
3904 le16_to_cpu(resp->seq_id), rc);
fbfbc485
MC
3905 return rc;
3906}
3907
3908int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3909{
3910 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
c0c050c5
MC
3911}
3912
cc72f3b1
MC
3913int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3914 int timeout)
3915{
3916 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3917}
3918
c0c050c5
MC
3919int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3920{
3921 int rc;
3922
3923 mutex_lock(&bp->hwrm_cmd_lock);
3924 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
3925 mutex_unlock(&bp->hwrm_cmd_lock);
3926 return rc;
3927}
3928
90e20921
MC
3929int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3930 int timeout)
3931{
3932 int rc;
3933
3934 mutex_lock(&bp->hwrm_cmd_lock);
3935 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3936 mutex_unlock(&bp->hwrm_cmd_lock);
3937 return rc;
3938}
3939
a1653b13
MC
3940int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
3941 int bmap_size)
c0c050c5
MC
3942{
3943 struct hwrm_func_drv_rgtr_input req = {0};
25be8623
MC
3944 DECLARE_BITMAP(async_events_bmap, 256);
3945 u32 *events = (u32 *)async_events_bmap;
a1653b13 3946 int i;
c0c050c5
MC
3947
3948 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3949
3950 req.enables =
a1653b13 3951 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
c0c050c5 3952
25be8623
MC
3953 memset(async_events_bmap, 0, sizeof(async_events_bmap));
3954 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
3955 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
3956
a1653b13
MC
3957 if (bmap && bmap_size) {
3958 for (i = 0; i < bmap_size; i++) {
3959 if (test_bit(i, bmap))
3960 __set_bit(i, async_events_bmap);
3961 }
3962 }
3963
25be8623
MC
3964 for (i = 0; i < 8; i++)
3965 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
3966
a1653b13
MC
3967 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3968}
3969
3970static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
3971{
25e1acd6 3972 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
a1653b13 3973 struct hwrm_func_drv_rgtr_input req = {0};
25e1acd6 3974 int rc;
a1653b13
MC
3975
3976 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3977
3978 req.enables =
3979 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
3980 FUNC_DRV_RGTR_REQ_ENABLES_VER);
3981
11f15ed3 3982 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
d4f52de0
MC
3983 req.flags = cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE);
3984 req.ver_maj_8b = DRV_VER_MAJ;
3985 req.ver_min_8b = DRV_VER_MIN;
3986 req.ver_upd_8b = DRV_VER_UPD;
3987 req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
3988 req.ver_min = cpu_to_le16(DRV_VER_MIN);
3989 req.ver_upd = cpu_to_le16(DRV_VER_UPD);
c0c050c5
MC
3990
3991 if (BNXT_PF(bp)) {
9b0436c3 3992 u32 data[8];
a1653b13 3993 int i;
c0c050c5 3994
9b0436c3
MC
3995 memset(data, 0, sizeof(data));
3996 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
3997 u16 cmd = bnxt_vf_req_snif[i];
3998 unsigned int bit, idx;
3999
4000 idx = cmd / 32;
4001 bit = cmd % 32;
4002 data[idx] |= 1 << bit;
4003 }
c0c050c5 4004
de68f5de
MC
4005 for (i = 0; i < 8; i++)
4006 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4007
c0c050c5
MC
4008 req.enables |=
4009 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4010 }
4011
25e1acd6
MC
4012 mutex_lock(&bp->hwrm_cmd_lock);
4013 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4014 if (rc)
4015 rc = -EIO;
4016 else if (resp->flags &
4017 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4018 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4019 mutex_unlock(&bp->hwrm_cmd_lock);
4020 return rc;
c0c050c5
MC
4021}
4022
be58a0da
JH
4023static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4024{
4025 struct hwrm_func_drv_unrgtr_input req = {0};
4026
4027 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4028 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4029}
4030
c0c050c5
MC
4031static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4032{
4033 u32 rc = 0;
4034 struct hwrm_tunnel_dst_port_free_input req = {0};
4035
4036 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4037 req.tunnel_type = tunnel_type;
4038
4039 switch (tunnel_type) {
4040 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4041 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
4042 break;
4043 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4044 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
4045 break;
4046 default:
4047 break;
4048 }
4049
4050 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4051 if (rc)
4052 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4053 rc);
4054 return rc;
4055}
4056
4057static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4058 u8 tunnel_type)
4059{
4060 u32 rc = 0;
4061 struct hwrm_tunnel_dst_port_alloc_input req = {0};
4062 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4063
4064 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4065
4066 req.tunnel_type = tunnel_type;
4067 req.tunnel_dst_port_val = port;
4068
4069 mutex_lock(&bp->hwrm_cmd_lock);
4070 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4071 if (rc) {
4072 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4073 rc);
4074 goto err_out;
4075 }
4076
57aac71b
CJ
4077 switch (tunnel_type) {
4078 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
c0c050c5 4079 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
57aac71b
CJ
4080 break;
4081 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
c0c050c5 4082 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
57aac71b
CJ
4083 break;
4084 default:
4085 break;
4086 }
4087
c0c050c5
MC
4088err_out:
4089 mutex_unlock(&bp->hwrm_cmd_lock);
4090 return rc;
4091}
4092
4093static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4094{
4095 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4096 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4097
4098 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
c193554e 4099 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
c0c050c5
MC
4100
4101 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4102 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4103 req.mask = cpu_to_le32(vnic->rx_mask);
4104 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4105}
4106
4107#ifdef CONFIG_RFS_ACCEL
4108static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4109 struct bnxt_ntuple_filter *fltr)
4110{
4111 struct hwrm_cfa_ntuple_filter_free_input req = {0};
4112
4113 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4114 req.ntuple_filter_id = fltr->filter_id;
4115 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4116}
4117
4118#define BNXT_NTP_FLTR_FLAGS \
4119 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4120 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
4121 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
4122 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
4123 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
4124 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
4125 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
4126 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
4127 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
4128 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
4129 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
4130 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
4131 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
c193554e 4132 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
c0c050c5 4133
61aad724
MC
4134#define BNXT_NTP_TUNNEL_FLTR_FLAG \
4135 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4136
c0c050c5
MC
4137static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4138 struct bnxt_ntuple_filter *fltr)
4139{
4140 int rc = 0;
4141 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
4142 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
4143 bp->hwrm_cmd_resp_addr;
4144 struct flow_keys *keys = &fltr->fkeys;
4145 struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
4146
4147 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
a54c4d74 4148 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
c0c050c5
MC
4149
4150 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4151
4152 req.ethertype = htons(ETH_P_IP);
4153 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
c193554e 4154 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
c0c050c5
MC
4155 req.ip_protocol = keys->basic.ip_proto;
4156
dda0e746
MC
4157 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4158 int i;
4159
4160 req.ethertype = htons(ETH_P_IPV6);
4161 req.ip_addr_type =
4162 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4163 *(struct in6_addr *)&req.src_ipaddr[0] =
4164 keys->addrs.v6addrs.src;
4165 *(struct in6_addr *)&req.dst_ipaddr[0] =
4166 keys->addrs.v6addrs.dst;
4167 for (i = 0; i < 4; i++) {
4168 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4169 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4170 }
4171 } else {
4172 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4173 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4174 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4175 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4176 }
61aad724
MC
4177 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4178 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4179 req.tunnel_type =
4180 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4181 }
c0c050c5
MC
4182
4183 req.src_port = keys->ports.src;
4184 req.src_port_mask = cpu_to_be16(0xffff);
4185 req.dst_port = keys->ports.dst;
4186 req.dst_port_mask = cpu_to_be16(0xffff);
4187
c193554e 4188 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
c0c050c5
MC
4189 mutex_lock(&bp->hwrm_cmd_lock);
4190 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4191 if (!rc)
4192 fltr->filter_id = resp->ntuple_filter_id;
4193 mutex_unlock(&bp->hwrm_cmd_lock);
4194 return rc;
4195}
4196#endif
4197
4198static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4199 u8 *mac_addr)
4200{
4201 u32 rc = 0;
4202 struct hwrm_cfa_l2_filter_alloc_input req = {0};
4203 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4204
4205 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
dc52c6c7
PS
4206 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4207 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4208 req.flags |=
4209 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
c193554e 4210 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
c0c050c5
MC
4211 req.enables =
4212 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
c193554e 4213 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
c0c050c5
MC
4214 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4215 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
4216 req.l2_addr_mask[0] = 0xff;
4217 req.l2_addr_mask[1] = 0xff;
4218 req.l2_addr_mask[2] = 0xff;
4219 req.l2_addr_mask[3] = 0xff;
4220 req.l2_addr_mask[4] = 0xff;
4221 req.l2_addr_mask[5] = 0xff;
4222
4223 mutex_lock(&bp->hwrm_cmd_lock);
4224 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4225 if (!rc)
4226 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4227 resp->l2_filter_id;
4228 mutex_unlock(&bp->hwrm_cmd_lock);
4229 return rc;
4230}
4231
4232static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4233{
4234 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4235 int rc = 0;
4236
4237 /* Any associated ntuple filters will also be cleared by firmware. */
4238 mutex_lock(&bp->hwrm_cmd_lock);
4239 for (i = 0; i < num_of_vnics; i++) {
4240 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4241
4242 for (j = 0; j < vnic->uc_filter_count; j++) {
4243 struct hwrm_cfa_l2_filter_free_input req = {0};
4244
4245 bnxt_hwrm_cmd_hdr_init(bp, &req,
4246 HWRM_CFA_L2_FILTER_FREE, -1, -1);
4247
4248 req.l2_filter_id = vnic->fw_l2_filter_id[j];
4249
4250 rc = _hwrm_send_message(bp, &req, sizeof(req),
4251 HWRM_CMD_TIMEOUT);
4252 }
4253 vnic->uc_filter_count = 0;
4254 }
4255 mutex_unlock(&bp->hwrm_cmd_lock);
4256
4257 return rc;
4258}
4259
4260static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4261{
4262 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4263 struct hwrm_vnic_tpa_cfg_input req = {0};
4264
3c4fe80b
MC
4265 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4266 return 0;
4267
c0c050c5
MC
4268 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
4269
4270 if (tpa_flags) {
4271 u16 mss = bp->dev->mtu - 40;
4272 u32 nsegs, n, segs = 0, flags;
4273
4274 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4275 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4276 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4277 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4278 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4279 if (tpa_flags & BNXT_FLAG_GRO)
4280 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4281
4282 req.flags = cpu_to_le32(flags);
4283
4284 req.enables =
4285 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
c193554e
MC
4286 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4287 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
c0c050c5
MC
4288
4289 /* Number of segs are log2 units, and first packet is not
4290 * included as part of this units.
4291 */
2839f28b
MC
4292 if (mss <= BNXT_RX_PAGE_SIZE) {
4293 n = BNXT_RX_PAGE_SIZE / mss;
c0c050c5
MC
4294 nsegs = (MAX_SKB_FRAGS - 1) * n;
4295 } else {
2839f28b
MC
4296 n = mss / BNXT_RX_PAGE_SIZE;
4297 if (mss & (BNXT_RX_PAGE_SIZE - 1))
c0c050c5
MC
4298 n++;
4299 nsegs = (MAX_SKB_FRAGS - n) / n;
4300 }
4301
4302 segs = ilog2(nsegs);
4303 req.max_agg_segs = cpu_to_le16(segs);
4304 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
c193554e
MC
4305
4306 req.min_agg_len = cpu_to_le32(512);
c0c050c5
MC
4307 }
4308 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4309
4310 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4311}
4312
2c61d211
MC
4313static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4314{
4315 struct bnxt_ring_grp_info *grp_info;
4316
4317 grp_info = &bp->grp_info[ring->grp_idx];
4318 return grp_info->cp_fw_ring_id;
4319}
4320
4321static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
4322{
4323 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4324 struct bnxt_napi *bnapi = rxr->bnapi;
4325 struct bnxt_cp_ring_info *cpr;
4326
4327 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
4328 return cpr->cp_ring_struct.fw_ring_id;
4329 } else {
4330 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
4331 }
4332}
4333
4334static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
4335{
4336 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4337 struct bnxt_napi *bnapi = txr->bnapi;
4338 struct bnxt_cp_ring_info *cpr;
4339
4340 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
4341 return cpr->cp_ring_struct.fw_ring_id;
4342 } else {
4343 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
4344 }
4345}
4346
c0c050c5
MC
4347static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
4348{
4349 u32 i, j, max_rings;
4350 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4351 struct hwrm_vnic_rss_cfg_input req = {0};
4352
7b3af4f7
MC
4353 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
4354 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
c0c050c5
MC
4355 return 0;
4356
4357 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4358 if (set_rss) {
87da7f79 4359 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
50f011b6 4360 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
dc52c6c7
PS
4361 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
4362 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4363 max_rings = bp->rx_nr_rings - 1;
4364 else
4365 max_rings = bp->rx_nr_rings;
4366 } else {
c0c050c5 4367 max_rings = 1;
dc52c6c7 4368 }
c0c050c5
MC
4369
4370 /* Fill the RSS indirection table with ring group ids */
4371 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
4372 if (j == max_rings)
4373 j = 0;
4374 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
4375 }
4376
4377 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4378 req.hash_key_tbl_addr =
4379 cpu_to_le64(vnic->rss_hash_key_dma_addr);
4380 }
94ce9caa 4381 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
c0c050c5
MC
4382 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4383}
4384
7b3af4f7
MC
4385static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
4386{
4387 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4388 u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings;
4389 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4390 struct hwrm_vnic_rss_cfg_input req = {0};
4391
4392 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4393 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4394 if (!set_rss) {
4395 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4396 return 0;
4397 }
4398 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4399 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4400 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4401 req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
4402 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
4403 for (i = 0, k = 0; i < nr_ctxs; i++) {
4404 __le16 *ring_tbl = vnic->rss_table;
4405 int rc;
4406
4407 req.ring_table_pair_index = i;
4408 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
4409 for (j = 0; j < 64; j++) {
4410 u16 ring_id;
4411
4412 ring_id = rxr->rx_ring_struct.fw_ring_id;
4413 *ring_tbl++ = cpu_to_le16(ring_id);
4414 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
4415 *ring_tbl++ = cpu_to_le16(ring_id);
4416 rxr++;
4417 k++;
4418 if (k == max_rings) {
4419 k = 0;
4420 rxr = &bp->rx_ring[0];
4421 }
4422 }
4423 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4424 if (rc)
4425 return -EIO;
4426 }
4427 return 0;
4428}
4429
c0c050c5
MC
4430static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
4431{
4432 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4433 struct hwrm_vnic_plcmodes_cfg_input req = {0};
4434
4435 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
4436 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
4437 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
4438 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
4439 req.enables =
4440 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
4441 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
4442 /* thresholds not implemented in firmware yet */
4443 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
4444 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
4445 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4446 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4447}
4448
94ce9caa
PS
4449static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
4450 u16 ctx_idx)
c0c050c5
MC
4451{
4452 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
4453
4454 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
4455 req.rss_cos_lb_ctx_id =
94ce9caa 4456 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
c0c050c5
MC
4457
4458 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
94ce9caa 4459 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
c0c050c5
MC
4460}
4461
4462static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
4463{
94ce9caa 4464 int i, j;
c0c050c5
MC
4465
4466 for (i = 0; i < bp->nr_vnics; i++) {
4467 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4468
94ce9caa
PS
4469 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
4470 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
4471 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
4472 }
c0c050c5
MC
4473 }
4474 bp->rsscos_nr_ctxs = 0;
4475}
4476
94ce9caa 4477static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
c0c050c5
MC
4478{
4479 int rc;
4480 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
4481 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
4482 bp->hwrm_cmd_resp_addr;
4483
4484 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
4485 -1);
4486
4487 mutex_lock(&bp->hwrm_cmd_lock);
4488 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4489 if (!rc)
94ce9caa 4490 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
c0c050c5
MC
4491 le16_to_cpu(resp->rss_cos_lb_ctx_id);
4492 mutex_unlock(&bp->hwrm_cmd_lock);
4493
4494 return rc;
4495}
4496
abe93ad2
MC
4497static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
4498{
4499 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
4500 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
4501 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
4502}
4503
a588e458 4504int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
c0c050c5 4505{
b81a90d3 4506 unsigned int ring = 0, grp_idx;
c0c050c5
MC
4507 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4508 struct hwrm_vnic_cfg_input req = {0};
cf6645f8 4509 u16 def_vlan = 0;
c0c050c5
MC
4510
4511 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
dc52c6c7 4512
7b3af4f7
MC
4513 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4514 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4515
4516 req.default_rx_ring_id =
4517 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
4518 req.default_cmpl_ring_id =
4519 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
4520 req.enables =
4521 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
4522 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
4523 goto vnic_mru;
4524 }
dc52c6c7 4525 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
c0c050c5 4526 /* Only RSS support for now TBD: COS & LB */
dc52c6c7
PS
4527 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
4528 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
4529 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
4530 VNIC_CFG_REQ_ENABLES_MRU);
ae10ae74
MC
4531 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
4532 req.rss_rule =
4533 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
4534 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
4535 VNIC_CFG_REQ_ENABLES_MRU);
4536 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
dc52c6c7
PS
4537 } else {
4538 req.rss_rule = cpu_to_le16(0xffff);
4539 }
94ce9caa 4540
dc52c6c7
PS
4541 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
4542 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
94ce9caa
PS
4543 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
4544 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
4545 } else {
4546 req.cos_rule = cpu_to_le16(0xffff);
4547 }
4548
c0c050c5 4549 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
b81a90d3 4550 ring = 0;
c0c050c5 4551 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
b81a90d3 4552 ring = vnic_id - 1;
76595193
PS
4553 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
4554 ring = bp->rx_nr_rings - 1;
c0c050c5 4555
b81a90d3 4556 grp_idx = bp->rx_ring[ring].bnapi->index;
c0c050c5 4557 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
c0c050c5 4558 req.lb_rule = cpu_to_le16(0xffff);
7b3af4f7 4559vnic_mru:
c0c050c5
MC
4560 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
4561 VLAN_HLEN);
4562
7b3af4f7 4563 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
cf6645f8
MC
4564#ifdef CONFIG_BNXT_SRIOV
4565 if (BNXT_VF(bp))
4566 def_vlan = bp->vf.vlan;
4567#endif
4568 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
c0c050c5 4569 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
a588e458 4570 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
abe93ad2 4571 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
c0c050c5
MC
4572
4573 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4574}
4575
4576static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
4577{
4578 u32 rc = 0;
4579
4580 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
4581 struct hwrm_vnic_free_input req = {0};
4582
4583 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
4584 req.vnic_id =
4585 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
4586
4587 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4588 if (rc)
4589 return rc;
4590 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
4591 }
4592 return rc;
4593}
4594
4595static void bnxt_hwrm_vnic_free(struct bnxt *bp)
4596{
4597 u16 i;
4598
4599 for (i = 0; i < bp->nr_vnics; i++)
4600 bnxt_hwrm_vnic_free_one(bp, i);
4601}
4602
b81a90d3
MC
4603static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
4604 unsigned int start_rx_ring_idx,
4605 unsigned int nr_rings)
c0c050c5 4606{
b81a90d3
MC
4607 int rc = 0;
4608 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
c0c050c5
MC
4609 struct hwrm_vnic_alloc_input req = {0};
4610 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
44c6f72a
MC
4611 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4612
4613 if (bp->flags & BNXT_FLAG_CHIP_P5)
4614 goto vnic_no_ring_grps;
c0c050c5
MC
4615
4616 /* map ring groups to this vnic */
b81a90d3
MC
4617 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
4618 grp_idx = bp->rx_ring[i].bnapi->index;
4619 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
c0c050c5 4620 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
b81a90d3 4621 j, nr_rings);
c0c050c5
MC
4622 break;
4623 }
44c6f72a 4624 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
c0c050c5
MC
4625 }
4626
44c6f72a
MC
4627vnic_no_ring_grps:
4628 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
4629 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
c0c050c5
MC
4630 if (vnic_id == 0)
4631 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
4632
4633 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
4634
4635 mutex_lock(&bp->hwrm_cmd_lock);
4636 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4637 if (!rc)
44c6f72a 4638 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
c0c050c5
MC
4639 mutex_unlock(&bp->hwrm_cmd_lock);
4640 return rc;
4641}
4642
8fdefd63
MC
4643static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
4644{
4645 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4646 struct hwrm_vnic_qcaps_input req = {0};
4647 int rc;
4648
4649 if (bp->hwrm_spec_code < 0x10600)
4650 return 0;
4651
4652 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
4653 mutex_lock(&bp->hwrm_cmd_lock);
4654 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4655 if (!rc) {
abe93ad2
MC
4656 u32 flags = le32_to_cpu(resp->flags);
4657
41e8d798
MC
4658 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
4659 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
8fdefd63 4660 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
abe93ad2
MC
4661 if (flags &
4662 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
4663 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
8fdefd63
MC
4664 }
4665 mutex_unlock(&bp->hwrm_cmd_lock);
4666 return rc;
4667}
4668
c0c050c5
MC
4669static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
4670{
4671 u16 i;
4672 u32 rc = 0;
4673
44c6f72a
MC
4674 if (bp->flags & BNXT_FLAG_CHIP_P5)
4675 return 0;
4676
c0c050c5
MC
4677 mutex_lock(&bp->hwrm_cmd_lock);
4678 for (i = 0; i < bp->rx_nr_rings; i++) {
4679 struct hwrm_ring_grp_alloc_input req = {0};
4680 struct hwrm_ring_grp_alloc_output *resp =
4681 bp->hwrm_cmd_resp_addr;
b81a90d3 4682 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
c0c050c5
MC
4683
4684 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
4685
b81a90d3
MC
4686 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
4687 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
4688 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
4689 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
c0c050c5
MC
4690
4691 rc = _hwrm_send_message(bp, &req, sizeof(req),
4692 HWRM_CMD_TIMEOUT);
4693 if (rc)
4694 break;
4695
b81a90d3
MC
4696 bp->grp_info[grp_idx].fw_grp_id =
4697 le32_to_cpu(resp->ring_group_id);
c0c050c5
MC
4698 }
4699 mutex_unlock(&bp->hwrm_cmd_lock);
4700 return rc;
4701}
4702
4703static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
4704{
4705 u16 i;
4706 u32 rc = 0;
4707 struct hwrm_ring_grp_free_input req = {0};
4708
44c6f72a 4709 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
c0c050c5
MC
4710 return 0;
4711
4712 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
4713
4714 mutex_lock(&bp->hwrm_cmd_lock);
4715 for (i = 0; i < bp->cp_nr_rings; i++) {
4716 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
4717 continue;
4718 req.ring_group_id =
4719 cpu_to_le32(bp->grp_info[i].fw_grp_id);
4720
4721 rc = _hwrm_send_message(bp, &req, sizeof(req),
4722 HWRM_CMD_TIMEOUT);
4723 if (rc)
4724 break;
4725 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4726 }
4727 mutex_unlock(&bp->hwrm_cmd_lock);
4728 return rc;
4729}
4730
4731static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
4732 struct bnxt_ring_struct *ring,
9899bb59 4733 u32 ring_type, u32 map_index)
c0c050c5
MC
4734{
4735 int rc = 0, err = 0;
4736 struct hwrm_ring_alloc_input req = {0};
4737 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6fe19886 4738 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
9899bb59 4739 struct bnxt_ring_grp_info *grp_info;
c0c050c5
MC
4740 u16 ring_id;
4741
4742 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
4743
4744 req.enables = 0;
6fe19886
MC
4745 if (rmem->nr_pages > 1) {
4746 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
c0c050c5
MC
4747 /* Page size is in log2 units */
4748 req.page_size = BNXT_PAGE_SHIFT;
4749 req.page_tbl_depth = 1;
4750 } else {
6fe19886 4751 req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
c0c050c5
MC
4752 }
4753 req.fbo = 0;
4754 /* Association of ring index with doorbell index and MSIX number */
4755 req.logical_id = cpu_to_le16(map_index);
4756
4757 switch (ring_type) {
2c61d211
MC
4758 case HWRM_RING_ALLOC_TX: {
4759 struct bnxt_tx_ring_info *txr;
4760
4761 txr = container_of(ring, struct bnxt_tx_ring_info,
4762 tx_ring_struct);
c0c050c5
MC
4763 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
4764 /* Association of transmit ring with completion ring */
9899bb59 4765 grp_info = &bp->grp_info[ring->grp_idx];
2c61d211 4766 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
c0c050c5 4767 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
9899bb59 4768 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
c0c050c5
MC
4769 req.queue_id = cpu_to_le16(ring->queue_id);
4770 break;
2c61d211 4771 }
c0c050c5
MC
4772 case HWRM_RING_ALLOC_RX:
4773 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4774 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
23aefdd7
MC
4775 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4776 u16 flags = 0;
4777
4778 /* Association of rx ring with stats context */
4779 grp_info = &bp->grp_info[ring->grp_idx];
4780 req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
4781 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
4782 req.enables |= cpu_to_le32(
4783 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
4784 if (NET_IP_ALIGN == 2)
4785 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
4786 req.flags = cpu_to_le16(flags);
4787 }
c0c050c5
MC
4788 break;
4789 case HWRM_RING_ALLOC_AGG:
23aefdd7
MC
4790 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4791 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
4792 /* Association of agg ring with rx ring */
4793 grp_info = &bp->grp_info[ring->grp_idx];
4794 req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
4795 req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
4796 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
4797 req.enables |= cpu_to_le32(
4798 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
4799 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
4800 } else {
4801 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4802 }
c0c050c5
MC
4803 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
4804 break;
4805 case HWRM_RING_ALLOC_CMPL:
bac9a7e0 4806 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
c0c050c5 4807 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
23aefdd7
MC
4808 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4809 /* Association of cp ring with nq */
4810 grp_info = &bp->grp_info[map_index];
4811 req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
4812 req.cq_handle = cpu_to_le64(ring->handle);
4813 req.enables |= cpu_to_le32(
4814 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
4815 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
4816 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
4817 }
4818 break;
4819 case HWRM_RING_ALLOC_NQ:
4820 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
4821 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
c0c050c5
MC
4822 if (bp->flags & BNXT_FLAG_USING_MSIX)
4823 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
4824 break;
4825 default:
4826 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
4827 ring_type);
4828 return -1;
4829 }
4830
4831 mutex_lock(&bp->hwrm_cmd_lock);
4832 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4833 err = le16_to_cpu(resp->error_code);
4834 ring_id = le16_to_cpu(resp->ring_id);
4835 mutex_unlock(&bp->hwrm_cmd_lock);
4836
4837 if (rc || err) {
2727c888
MC
4838 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
4839 ring_type, rc, err);
4840 return -EIO;
c0c050c5
MC
4841 }
4842 ring->fw_ring_id = ring_id;
4843 return rc;
4844}
4845
486b5c22
MC
4846static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
4847{
4848 int rc;
4849
4850 if (BNXT_PF(bp)) {
4851 struct hwrm_func_cfg_input req = {0};
4852
4853 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
4854 req.fid = cpu_to_le16(0xffff);
4855 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4856 req.async_event_cr = cpu_to_le16(idx);
4857 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4858 } else {
4859 struct hwrm_func_vf_cfg_input req = {0};
4860
4861 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
4862 req.enables =
4863 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4864 req.async_event_cr = cpu_to_le16(idx);
4865 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4866 }
4867 return rc;
4868}
4869
697197e5
MC
4870static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
4871 u32 map_idx, u32 xid)
4872{
4873 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4874 if (BNXT_PF(bp))
4875 db->doorbell = bp->bar1 + 0x10000;
4876 else
4877 db->doorbell = bp->bar1 + 0x4000;
4878 switch (ring_type) {
4879 case HWRM_RING_ALLOC_TX:
4880 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
4881 break;
4882 case HWRM_RING_ALLOC_RX:
4883 case HWRM_RING_ALLOC_AGG:
4884 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
4885 break;
4886 case HWRM_RING_ALLOC_CMPL:
4887 db->db_key64 = DBR_PATH_L2;
4888 break;
4889 case HWRM_RING_ALLOC_NQ:
4890 db->db_key64 = DBR_PATH_L2;
4891 break;
4892 }
4893 db->db_key64 |= (u64)xid << DBR_XID_SFT;
4894 } else {
4895 db->doorbell = bp->bar1 + map_idx * 0x80;
4896 switch (ring_type) {
4897 case HWRM_RING_ALLOC_TX:
4898 db->db_key32 = DB_KEY_TX;
4899 break;
4900 case HWRM_RING_ALLOC_RX:
4901 case HWRM_RING_ALLOC_AGG:
4902 db->db_key32 = DB_KEY_RX;
4903 break;
4904 case HWRM_RING_ALLOC_CMPL:
4905 db->db_key32 = DB_KEY_CP;
4906 break;
4907 }
4908 }
4909}
4910
c0c050c5
MC
4911static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
4912{
4913 int i, rc = 0;
697197e5 4914 u32 type;
c0c050c5 4915
23aefdd7
MC
4916 if (bp->flags & BNXT_FLAG_CHIP_P5)
4917 type = HWRM_RING_ALLOC_NQ;
4918 else
4919 type = HWRM_RING_ALLOC_CMPL;
edd0c2cc
MC
4920 for (i = 0; i < bp->cp_nr_rings; i++) {
4921 struct bnxt_napi *bnapi = bp->bnapi[i];
4922 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4923 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
9899bb59 4924 u32 map_idx = ring->map_idx;
c0c050c5 4925
697197e5 4926 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
edd0c2cc
MC
4927 if (rc)
4928 goto err_out;
697197e5
MC
4929 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
4930 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
edd0c2cc 4931 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
486b5c22
MC
4932
4933 if (!i) {
4934 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
4935 if (rc)
4936 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
4937 }
c0c050c5
MC
4938 }
4939
697197e5 4940 type = HWRM_RING_ALLOC_TX;
edd0c2cc 4941 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 4942 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3e08b184
MC
4943 struct bnxt_ring_struct *ring;
4944 u32 map_idx;
c0c050c5 4945
3e08b184
MC
4946 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4947 struct bnxt_napi *bnapi = txr->bnapi;
4948 struct bnxt_cp_ring_info *cpr, *cpr2;
4949 u32 type2 = HWRM_RING_ALLOC_CMPL;
4950
4951 cpr = &bnapi->cp_ring;
4952 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
4953 ring = &cpr2->cp_ring_struct;
4954 ring->handle = BNXT_TX_HDL;
4955 map_idx = bnapi->index;
4956 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
4957 if (rc)
4958 goto err_out;
4959 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
4960 ring->fw_ring_id);
4961 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
4962 }
4963 ring = &txr->tx_ring_struct;
4964 map_idx = i;
697197e5 4965 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
edd0c2cc
MC
4966 if (rc)
4967 goto err_out;
697197e5 4968 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
c0c050c5
MC
4969 }
4970
697197e5 4971 type = HWRM_RING_ALLOC_RX;
edd0c2cc 4972 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 4973 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 4974 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
3e08b184
MC
4975 struct bnxt_napi *bnapi = rxr->bnapi;
4976 u32 map_idx = bnapi->index;
c0c050c5 4977
697197e5 4978 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
edd0c2cc
MC
4979 if (rc)
4980 goto err_out;
697197e5
MC
4981 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
4982 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
b81a90d3 4983 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
3e08b184
MC
4984 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4985 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4986 u32 type2 = HWRM_RING_ALLOC_CMPL;
4987 struct bnxt_cp_ring_info *cpr2;
4988
4989 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
4990 ring = &cpr2->cp_ring_struct;
4991 ring->handle = BNXT_RX_HDL;
4992 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
4993 if (rc)
4994 goto err_out;
4995 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
4996 ring->fw_ring_id);
4997 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
4998 }
c0c050c5
MC
4999 }
5000
5001 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
697197e5 5002 type = HWRM_RING_ALLOC_AGG;
c0c050c5 5003 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5004 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
5005 struct bnxt_ring_struct *ring =
5006 &rxr->rx_agg_ring_struct;
9899bb59 5007 u32 grp_idx = ring->grp_idx;
b81a90d3 5008 u32 map_idx = grp_idx + bp->rx_nr_rings;
c0c050c5 5009
697197e5 5010 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
c0c050c5
MC
5011 if (rc)
5012 goto err_out;
5013
697197e5
MC
5014 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5015 ring->fw_ring_id);
5016 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
b81a90d3 5017 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
c0c050c5
MC
5018 }
5019 }
5020err_out:
5021 return rc;
5022}
5023
5024static int hwrm_ring_free_send_msg(struct bnxt *bp,
5025 struct bnxt_ring_struct *ring,
5026 u32 ring_type, int cmpl_ring_id)
5027{
5028 int rc;
5029 struct hwrm_ring_free_input req = {0};
5030 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5031 u16 error_code;
5032
74608fc9 5033 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
c0c050c5
MC
5034 req.ring_type = ring_type;
5035 req.ring_id = cpu_to_le16(ring->fw_ring_id);
5036
5037 mutex_lock(&bp->hwrm_cmd_lock);
5038 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5039 error_code = le16_to_cpu(resp->error_code);
5040 mutex_unlock(&bp->hwrm_cmd_lock);
5041
5042 if (rc || error_code) {
2727c888
MC
5043 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5044 ring_type, rc, error_code);
5045 return -EIO;
c0c050c5
MC
5046 }
5047 return 0;
5048}
5049
edd0c2cc 5050static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
c0c050c5 5051{
23aefdd7 5052 u32 type;
edd0c2cc 5053 int i;
c0c050c5
MC
5054
5055 if (!bp->bnapi)
edd0c2cc 5056 return;
c0c050c5 5057
edd0c2cc 5058 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 5059 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
edd0c2cc 5060 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2c61d211 5061 u32 cmpl_ring_id;
edd0c2cc 5062
2c61d211 5063 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
edd0c2cc
MC
5064 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5065 hwrm_ring_free_send_msg(bp, ring,
5066 RING_FREE_REQ_RING_TYPE_TX,
5067 close_path ? cmpl_ring_id :
5068 INVALID_HW_RING_ID);
5069 ring->fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
5070 }
5071 }
5072
edd0c2cc 5073 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5074 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 5075 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
b81a90d3 5076 u32 grp_idx = rxr->bnapi->index;
2c61d211 5077 u32 cmpl_ring_id;
edd0c2cc 5078
2c61d211 5079 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
edd0c2cc
MC
5080 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5081 hwrm_ring_free_send_msg(bp, ring,
5082 RING_FREE_REQ_RING_TYPE_RX,
5083 close_path ? cmpl_ring_id :
5084 INVALID_HW_RING_ID);
5085 ring->fw_ring_id = INVALID_HW_RING_ID;
b81a90d3
MC
5086 bp->grp_info[grp_idx].rx_fw_ring_id =
5087 INVALID_HW_RING_ID;
c0c050c5
MC
5088 }
5089 }
5090
23aefdd7
MC
5091 if (bp->flags & BNXT_FLAG_CHIP_P5)
5092 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5093 else
5094 type = RING_FREE_REQ_RING_TYPE_RX;
edd0c2cc 5095 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 5096 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 5097 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
b81a90d3 5098 u32 grp_idx = rxr->bnapi->index;
2c61d211 5099 u32 cmpl_ring_id;
edd0c2cc 5100
2c61d211 5101 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
edd0c2cc 5102 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
23aefdd7 5103 hwrm_ring_free_send_msg(bp, ring, type,
edd0c2cc
MC
5104 close_path ? cmpl_ring_id :
5105 INVALID_HW_RING_ID);
5106 ring->fw_ring_id = INVALID_HW_RING_ID;
b81a90d3
MC
5107 bp->grp_info[grp_idx].agg_fw_ring_id =
5108 INVALID_HW_RING_ID;
c0c050c5
MC
5109 }
5110 }
5111
9d8bc097
MC
5112 /* The completion rings are about to be freed. After that the
5113 * IRQ doorbell will not work anymore. So we need to disable
5114 * IRQ here.
5115 */
5116 bnxt_disable_int_sync(bp);
5117
23aefdd7
MC
5118 if (bp->flags & BNXT_FLAG_CHIP_P5)
5119 type = RING_FREE_REQ_RING_TYPE_NQ;
5120 else
5121 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
edd0c2cc
MC
5122 for (i = 0; i < bp->cp_nr_rings; i++) {
5123 struct bnxt_napi *bnapi = bp->bnapi[i];
5124 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3e08b184
MC
5125 struct bnxt_ring_struct *ring;
5126 int j;
edd0c2cc 5127
3e08b184
MC
5128 for (j = 0; j < 2; j++) {
5129 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5130
5131 if (cpr2) {
5132 ring = &cpr2->cp_ring_struct;
5133 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5134 continue;
5135 hwrm_ring_free_send_msg(bp, ring,
5136 RING_FREE_REQ_RING_TYPE_L2_CMPL,
5137 INVALID_HW_RING_ID);
5138 ring->fw_ring_id = INVALID_HW_RING_ID;
5139 }
5140 }
5141 ring = &cpr->cp_ring_struct;
edd0c2cc 5142 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
23aefdd7 5143 hwrm_ring_free_send_msg(bp, ring, type,
edd0c2cc
MC
5144 INVALID_HW_RING_ID);
5145 ring->fw_ring_id = INVALID_HW_RING_ID;
5146 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
5147 }
5148 }
c0c050c5
MC
5149}
5150
41e8d798
MC
5151static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5152 bool shared);
5153
674f50a5
MC
5154static int bnxt_hwrm_get_rings(struct bnxt *bp)
5155{
5156 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5157 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5158 struct hwrm_func_qcfg_input req = {0};
5159 int rc;
5160
5161 if (bp->hwrm_spec_code < 0x10601)
5162 return 0;
5163
5164 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5165 req.fid = cpu_to_le16(0xffff);
5166 mutex_lock(&bp->hwrm_cmd_lock);
5167 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5168 if (rc) {
5169 mutex_unlock(&bp->hwrm_cmd_lock);
5170 return -EIO;
5171 }
5172
5173 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
f1ca94de 5174 if (BNXT_NEW_RM(bp)) {
674f50a5
MC
5175 u16 cp, stats;
5176
5177 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5178 hw_resc->resv_hw_ring_grps =
5179 le32_to_cpu(resp->alloc_hw_ring_grps);
5180 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
5181 cp = le16_to_cpu(resp->alloc_cmpl_rings);
5182 stats = le16_to_cpu(resp->alloc_stat_ctx);
75720e63 5183 hw_resc->resv_irqs = cp;
41e8d798
MC
5184 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5185 int rx = hw_resc->resv_rx_rings;
5186 int tx = hw_resc->resv_tx_rings;
5187
5188 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5189 rx >>= 1;
5190 if (cp < (rx + tx)) {
5191 bnxt_trim_rings(bp, &rx, &tx, cp, false);
5192 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5193 rx <<= 1;
5194 hw_resc->resv_rx_rings = rx;
5195 hw_resc->resv_tx_rings = tx;
5196 }
75720e63 5197 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
41e8d798
MC
5198 hw_resc->resv_hw_ring_grps = rx;
5199 }
674f50a5 5200 hw_resc->resv_cp_rings = cp;
780baad4 5201 hw_resc->resv_stat_ctxs = stats;
674f50a5
MC
5202 }
5203 mutex_unlock(&bp->hwrm_cmd_lock);
5204 return 0;
5205}
5206
391be5c2
MC
5207/* Caller must hold bp->hwrm_cmd_lock */
5208int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
5209{
5210 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5211 struct hwrm_func_qcfg_input req = {0};
5212 int rc;
5213
5214 if (bp->hwrm_spec_code < 0x10601)
5215 return 0;
5216
5217 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5218 req.fid = cpu_to_le16(fid);
5219 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5220 if (!rc)
5221 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5222
5223 return rc;
5224}
5225
41e8d798
MC
5226static bool bnxt_rfs_supported(struct bnxt *bp);
5227
4ed50ef4
MC
5228static void
5229__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
5230 int tx_rings, int rx_rings, int ring_grps,
780baad4 5231 int cp_rings, int stats, int vnics)
391be5c2 5232{
674f50a5 5233 u32 enables = 0;
391be5c2 5234
4ed50ef4
MC
5235 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
5236 req->fid = cpu_to_le16(0xffff);
674f50a5 5237 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
4ed50ef4 5238 req->num_tx_rings = cpu_to_le16(tx_rings);
f1ca94de 5239 if (BNXT_NEW_RM(bp)) {
674f50a5 5240 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
41e8d798
MC
5241 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5242 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
5243 enables |= tx_rings + ring_grps ?
5244 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5245 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5246 enables |= rx_rings ?
5247 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5248 } else {
5249 enables |= cp_rings ?
5250 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5251 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5252 enables |= ring_grps ?
5253 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
5254 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5255 }
dbe80d44 5256 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
674f50a5 5257
4ed50ef4 5258 req->num_rx_rings = cpu_to_le16(rx_rings);
41e8d798
MC
5259 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5260 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5261 req->num_msix = cpu_to_le16(cp_rings);
5262 req->num_rsscos_ctxs =
5263 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5264 } else {
5265 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5266 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5267 req->num_rsscos_ctxs = cpu_to_le16(1);
5268 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
5269 bnxt_rfs_supported(bp))
5270 req->num_rsscos_ctxs =
5271 cpu_to_le16(ring_grps + 1);
5272 }
780baad4 5273 req->num_stat_ctxs = cpu_to_le16(stats);
4ed50ef4 5274 req->num_vnics = cpu_to_le16(vnics);
674f50a5 5275 }
4ed50ef4
MC
5276 req->enables = cpu_to_le32(enables);
5277}
5278
5279static void
5280__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
5281 struct hwrm_func_vf_cfg_input *req, int tx_rings,
5282 int rx_rings, int ring_grps, int cp_rings,
780baad4 5283 int stats, int vnics)
4ed50ef4
MC
5284{
5285 u32 enables = 0;
5286
5287 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
5288 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
41e8d798
MC
5289 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
5290 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5291 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5292 enables |= tx_rings + ring_grps ?
5293 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5294 FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5295 } else {
5296 enables |= cp_rings ?
5297 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5298 FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5299 enables |= ring_grps ?
5300 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
5301 }
4ed50ef4 5302 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
41e8d798 5303 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
4ed50ef4 5304
41e8d798 5305 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
4ed50ef4
MC
5306 req->num_tx_rings = cpu_to_le16(tx_rings);
5307 req->num_rx_rings = cpu_to_le16(rx_rings);
41e8d798
MC
5308 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5309 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5310 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5311 } else {
5312 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5313 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5314 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
5315 }
780baad4 5316 req->num_stat_ctxs = cpu_to_le16(stats);
4ed50ef4
MC
5317 req->num_vnics = cpu_to_le16(vnics);
5318
5319 req->enables = cpu_to_le32(enables);
5320}
5321
5322static int
5323bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4 5324 int ring_grps, int cp_rings, int stats, int vnics)
4ed50ef4
MC
5325{
5326 struct hwrm_func_cfg_input req = {0};
5327 int rc;
5328
5329 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
780baad4 5330 cp_rings, stats, vnics);
4ed50ef4 5331 if (!req.enables)
391be5c2
MC
5332 return 0;
5333
674f50a5
MC
5334 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5335 if (rc)
5336 return -ENOMEM;
5337
5338 if (bp->hwrm_spec_code < 0x10601)
5339 bp->hw_resc.resv_tx_rings = tx_rings;
5340
5341 rc = bnxt_hwrm_get_rings(bp);
5342 return rc;
5343}
5344
5345static int
5346bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4 5347 int ring_grps, int cp_rings, int stats, int vnics)
674f50a5
MC
5348{
5349 struct hwrm_func_vf_cfg_input req = {0};
674f50a5
MC
5350 int rc;
5351
f1ca94de 5352 if (!BNXT_NEW_RM(bp)) {
674f50a5 5353 bp->hw_resc.resv_tx_rings = tx_rings;
391be5c2 5354 return 0;
674f50a5 5355 }
391be5c2 5356
4ed50ef4 5357 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
780baad4 5358 cp_rings, stats, vnics);
391be5c2 5359 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
674f50a5
MC
5360 if (rc)
5361 return -ENOMEM;
5362
5363 rc = bnxt_hwrm_get_rings(bp);
5364 return rc;
5365}
5366
5367static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
780baad4 5368 int cp, int stat, int vnic)
674f50a5
MC
5369{
5370 if (BNXT_PF(bp))
780baad4
VV
5371 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
5372 vnic);
674f50a5 5373 else
780baad4
VV
5374 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
5375 vnic);
674f50a5
MC
5376}
5377
b16b6891 5378int bnxt_nq_rings_in_use(struct bnxt *bp)
08654eb2
MC
5379{
5380 int cp = bp->cp_nr_rings;
5381 int ulp_msix, ulp_base;
5382
5383 ulp_msix = bnxt_get_ulp_msix_num(bp);
5384 if (ulp_msix) {
5385 ulp_base = bnxt_get_ulp_msix_base(bp);
5386 cp += ulp_msix;
5387 if ((ulp_base + ulp_msix) > cp)
5388 cp = ulp_base + ulp_msix;
5389 }
5390 return cp;
5391}
5392
c0b8cda0
MC
5393static int bnxt_cp_rings_in_use(struct bnxt *bp)
5394{
5395 int cp;
5396
5397 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5398 return bnxt_nq_rings_in_use(bp);
5399
5400 cp = bp->tx_nr_rings + bp->rx_nr_rings;
5401 return cp;
5402}
5403
780baad4
VV
5404static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
5405{
5406 return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
5407}
5408
4e41dc5d
MC
5409static bool bnxt_need_reserve_rings(struct bnxt *bp)
5410{
5411 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
fbcfc8e4 5412 int cp = bnxt_cp_rings_in_use(bp);
c0b8cda0 5413 int nq = bnxt_nq_rings_in_use(bp);
780baad4 5414 int rx = bp->rx_nr_rings, stat;
4e41dc5d
MC
5415 int vnic = 1, grp = rx;
5416
5417 if (bp->hwrm_spec_code < 0x10601)
5418 return false;
5419
5420 if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
5421 return true;
5422
41e8d798 5423 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
4e41dc5d
MC
5424 vnic = rx + 1;
5425 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5426 rx <<= 1;
780baad4 5427 stat = bnxt_get_func_stat_ctxs(bp);
f1ca94de 5428 if (BNXT_NEW_RM(bp) &&
4e41dc5d 5429 (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
c0b8cda0 5430 hw_resc->resv_irqs < nq || hw_resc->resv_vnics != vnic ||
780baad4 5431 hw_resc->resv_stat_ctxs != stat ||
41e8d798
MC
5432 (hw_resc->resv_hw_ring_grps != grp &&
5433 !(bp->flags & BNXT_FLAG_CHIP_P5))))
4e41dc5d
MC
5434 return true;
5435 return false;
5436}
5437
674f50a5
MC
5438static int __bnxt_reserve_rings(struct bnxt *bp)
5439{
5440 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
c0b8cda0 5441 int cp = bnxt_nq_rings_in_use(bp);
674f50a5
MC
5442 int tx = bp->tx_nr_rings;
5443 int rx = bp->rx_nr_rings;
674f50a5 5444 int grp, rx_rings, rc;
780baad4 5445 int vnic = 1, stat;
674f50a5 5446 bool sh = false;
674f50a5 5447
4e41dc5d 5448 if (!bnxt_need_reserve_rings(bp))
674f50a5
MC
5449 return 0;
5450
5451 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5452 sh = true;
41e8d798 5453 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
674f50a5
MC
5454 vnic = rx + 1;
5455 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5456 rx <<= 1;
674f50a5 5457 grp = bp->rx_nr_rings;
780baad4 5458 stat = bnxt_get_func_stat_ctxs(bp);
674f50a5 5459
780baad4 5460 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
391be5c2
MC
5461 if (rc)
5462 return rc;
5463
674f50a5 5464 tx = hw_resc->resv_tx_rings;
f1ca94de 5465 if (BNXT_NEW_RM(bp)) {
674f50a5 5466 rx = hw_resc->resv_rx_rings;
c0b8cda0 5467 cp = hw_resc->resv_irqs;
674f50a5
MC
5468 grp = hw_resc->resv_hw_ring_grps;
5469 vnic = hw_resc->resv_vnics;
780baad4 5470 stat = hw_resc->resv_stat_ctxs;
674f50a5
MC
5471 }
5472
5473 rx_rings = rx;
5474 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
5475 if (rx >= 2) {
5476 rx_rings = rx >> 1;
5477 } else {
5478 if (netif_running(bp->dev))
5479 return -ENOMEM;
5480
5481 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
5482 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
5483 bp->dev->hw_features &= ~NETIF_F_LRO;
5484 bp->dev->features &= ~NETIF_F_LRO;
5485 bnxt_set_ring_params(bp);
5486 }
5487 }
5488 rx_rings = min_t(int, rx_rings, grp);
780baad4
VV
5489 cp = min_t(int, cp, bp->cp_nr_rings);
5490 if (stat > bnxt_get_ulp_stat_ctxs(bp))
5491 stat -= bnxt_get_ulp_stat_ctxs(bp);
5492 cp = min_t(int, cp, stat);
674f50a5
MC
5493 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
5494 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5495 rx = rx_rings << 1;
5496 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
5497 bp->tx_nr_rings = tx;
5498 bp->rx_nr_rings = rx_rings;
5499 bp->cp_nr_rings = cp;
5500
780baad4 5501 if (!tx || !rx || !cp || !grp || !vnic || !stat)
674f50a5
MC
5502 return -ENOMEM;
5503
391be5c2
MC
5504 return rc;
5505}
5506
8f23d638 5507static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
5508 int ring_grps, int cp_rings, int stats,
5509 int vnics)
98fdbe73 5510{
8f23d638 5511 struct hwrm_func_vf_cfg_input req = {0};
6fc2ffdf 5512 u32 flags;
98fdbe73
MC
5513 int rc;
5514
f1ca94de 5515 if (!BNXT_NEW_RM(bp))
98fdbe73
MC
5516 return 0;
5517
6fc2ffdf 5518 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
780baad4 5519 cp_rings, stats, vnics);
8f23d638
MC
5520 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
5521 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
5522 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8f23d638 5523 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
41e8d798
MC
5524 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
5525 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
5526 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5527 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
8f23d638
MC
5528
5529 req.flags = cpu_to_le32(flags);
8f23d638
MC
5530 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5531 if (rc)
5532 return -ENOMEM;
5533 return 0;
5534}
5535
5536static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
5537 int ring_grps, int cp_rings, int stats,
5538 int vnics)
8f23d638
MC
5539{
5540 struct hwrm_func_cfg_input req = {0};
6fc2ffdf 5541 u32 flags;
8f23d638 5542 int rc;
98fdbe73 5543
6fc2ffdf 5544 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
780baad4 5545 cp_rings, stats, vnics);
8f23d638 5546 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
41e8d798 5547 if (BNXT_NEW_RM(bp)) {
8f23d638
MC
5548 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
5549 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
8f23d638
MC
5550 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
5551 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
41e8d798
MC
5552 if (bp->flags & BNXT_FLAG_CHIP_P5)
5553 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
5554 else
5555 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
5556 }
6fc2ffdf 5557
8f23d638 5558 req.flags = cpu_to_le32(flags);
98fdbe73
MC
5559 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5560 if (rc)
5561 return -ENOMEM;
5562 return 0;
5563}
5564
8f23d638 5565static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
780baad4
VV
5566 int ring_grps, int cp_rings, int stats,
5567 int vnics)
8f23d638
MC
5568{
5569 if (bp->hwrm_spec_code < 0x10801)
5570 return 0;
5571
5572 if (BNXT_PF(bp))
5573 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
780baad4
VV
5574 ring_grps, cp_rings, stats,
5575 vnics);
8f23d638
MC
5576
5577 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
780baad4 5578 cp_rings, stats, vnics);
8f23d638
MC
5579}
5580
74706afa
MC
5581static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
5582{
5583 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5584 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5585 struct hwrm_ring_aggint_qcaps_input req = {0};
5586 int rc;
5587
5588 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
5589 coal_cap->num_cmpl_dma_aggr_max = 63;
5590 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
5591 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
5592 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
5593 coal_cap->int_lat_tmr_min_max = 65535;
5594 coal_cap->int_lat_tmr_max_max = 65535;
5595 coal_cap->num_cmpl_aggr_int_max = 65535;
5596 coal_cap->timer_units = 80;
5597
5598 if (bp->hwrm_spec_code < 0x10902)
5599 return;
5600
5601 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
5602 mutex_lock(&bp->hwrm_cmd_lock);
5603 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5604 if (!rc) {
5605 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
58590c8d 5606 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
74706afa
MC
5607 coal_cap->num_cmpl_dma_aggr_max =
5608 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
5609 coal_cap->num_cmpl_dma_aggr_during_int_max =
5610 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
5611 coal_cap->cmpl_aggr_dma_tmr_max =
5612 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
5613 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
5614 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
5615 coal_cap->int_lat_tmr_min_max =
5616 le16_to_cpu(resp->int_lat_tmr_min_max);
5617 coal_cap->int_lat_tmr_max_max =
5618 le16_to_cpu(resp->int_lat_tmr_max_max);
5619 coal_cap->num_cmpl_aggr_int_max =
5620 le16_to_cpu(resp->num_cmpl_aggr_int_max);
5621 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
5622 }
5623 mutex_unlock(&bp->hwrm_cmd_lock);
5624}
5625
5626static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
5627{
5628 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5629
5630 return usec * 1000 / coal_cap->timer_units;
5631}
5632
5633static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
5634 struct bnxt_coal *hw_coal,
bb053f52
MC
5635 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
5636{
74706afa
MC
5637 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5638 u32 cmpl_params = coal_cap->cmpl_params;
5639 u16 val, tmr, max, flags = 0;
f8503969
MC
5640
5641 max = hw_coal->bufs_per_record * 128;
5642 if (hw_coal->budget)
5643 max = hw_coal->bufs_per_record * hw_coal->budget;
74706afa 5644 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
f8503969
MC
5645
5646 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
5647 req->num_cmpl_aggr_int = cpu_to_le16(val);
b153cbc5 5648
74706afa 5649 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
f8503969
MC
5650 req->num_cmpl_dma_aggr = cpu_to_le16(val);
5651
74706afa
MC
5652 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
5653 coal_cap->num_cmpl_dma_aggr_during_int_max);
f8503969
MC
5654 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
5655
74706afa
MC
5656 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
5657 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
f8503969
MC
5658 req->int_lat_tmr_max = cpu_to_le16(tmr);
5659
5660 /* min timer set to 1/2 of interrupt timer */
74706afa
MC
5661 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
5662 val = tmr / 2;
5663 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
5664 req->int_lat_tmr_min = cpu_to_le16(val);
5665 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
5666 }
f8503969
MC
5667
5668 /* buf timer set to 1/4 of interrupt timer */
74706afa 5669 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
f8503969
MC
5670 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
5671
74706afa
MC
5672 if (cmpl_params &
5673 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
5674 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
5675 val = clamp_t(u16, tmr, 1,
5676 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
5677 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
5678 req->enables |=
5679 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
5680 }
f8503969 5681
74706afa
MC
5682 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
5683 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
5684 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
5685 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
f8503969 5686 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
bb053f52 5687 req->flags = cpu_to_le16(flags);
74706afa 5688 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
bb053f52
MC
5689}
5690
58590c8d
MC
5691/* Caller holds bp->hwrm_cmd_lock */
5692static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
5693 struct bnxt_coal *hw_coal)
5694{
5695 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
5696 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5697 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5698 u32 nq_params = coal_cap->nq_params;
5699 u16 tmr;
5700
5701 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
5702 return 0;
5703
5704 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
5705 -1, -1);
5706 req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
5707 req.flags =
5708 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
5709
5710 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
5711 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
5712 req.int_lat_tmr_min = cpu_to_le16(tmr);
5713 req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
5714 return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5715}
5716
6a8788f2
AG
5717int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
5718{
5719 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
5720 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5721 struct bnxt_coal coal;
6a8788f2
AG
5722
5723 /* Tick values in micro seconds.
5724 * 1 coal_buf x bufs_per_record = 1 completion record.
5725 */
5726 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
5727
5728 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
5729 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
5730
5731 if (!bnapi->rx_ring)
5732 return -ENODEV;
5733
5734 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
5735 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
5736
74706afa 5737 bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
6a8788f2 5738
2c61d211 5739 req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6a8788f2
AG
5740
5741 return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
5742 HWRM_CMD_TIMEOUT);
5743}
5744
c0c050c5
MC
5745int bnxt_hwrm_set_coal(struct bnxt *bp)
5746{
5747 int i, rc = 0;
dfc9c94a
MC
5748 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
5749 req_tx = {0}, *req;
c0c050c5 5750
dfc9c94a
MC
5751 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
5752 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
5753 bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
5754 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
c0c050c5 5755
74706afa
MC
5756 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
5757 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
c0c050c5
MC
5758
5759 mutex_lock(&bp->hwrm_cmd_lock);
5760 for (i = 0; i < bp->cp_nr_rings; i++) {
dfc9c94a 5761 struct bnxt_napi *bnapi = bp->bnapi[i];
58590c8d 5762 struct bnxt_coal *hw_coal;
2c61d211 5763 u16 ring_id;
c0c050c5 5764
dfc9c94a 5765 req = &req_rx;
2c61d211
MC
5766 if (!bnapi->rx_ring) {
5767 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
dfc9c94a 5768 req = &req_tx;
2c61d211
MC
5769 } else {
5770 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
5771 }
5772 req->ring_id = cpu_to_le16(ring_id);
dfc9c94a
MC
5773
5774 rc = _hwrm_send_message(bp, req, sizeof(*req),
c0c050c5
MC
5775 HWRM_CMD_TIMEOUT);
5776 if (rc)
5777 break;
58590c8d
MC
5778
5779 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5780 continue;
5781
5782 if (bnapi->rx_ring && bnapi->tx_ring) {
5783 req = &req_tx;
5784 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
5785 req->ring_id = cpu_to_le16(ring_id);
5786 rc = _hwrm_send_message(bp, req, sizeof(*req),
5787 HWRM_CMD_TIMEOUT);
5788 if (rc)
5789 break;
5790 }
5791 if (bnapi->rx_ring)
5792 hw_coal = &bp->rx_coal;
5793 else
5794 hw_coal = &bp->tx_coal;
5795 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
c0c050c5
MC
5796 }
5797 mutex_unlock(&bp->hwrm_cmd_lock);
5798 return rc;
5799}
5800
5801static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
5802{
5803 int rc = 0, i;
5804 struct hwrm_stat_ctx_free_input req = {0};
5805
5806 if (!bp->bnapi)
5807 return 0;
5808
3e8060fa
PS
5809 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5810 return 0;
5811
c0c050c5
MC
5812 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
5813
5814 mutex_lock(&bp->hwrm_cmd_lock);
5815 for (i = 0; i < bp->cp_nr_rings; i++) {
5816 struct bnxt_napi *bnapi = bp->bnapi[i];
5817 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5818
5819 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
5820 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
5821
5822 rc = _hwrm_send_message(bp, &req, sizeof(req),
5823 HWRM_CMD_TIMEOUT);
5824 if (rc)
5825 break;
5826
5827 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
5828 }
5829 }
5830 mutex_unlock(&bp->hwrm_cmd_lock);
5831 return rc;
5832}
5833
5834static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
5835{
5836 int rc = 0, i;
5837 struct hwrm_stat_ctx_alloc_input req = {0};
5838 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5839
3e8060fa
PS
5840 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5841 return 0;
5842
c0c050c5
MC
5843 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
5844
51f30785 5845 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
c0c050c5
MC
5846
5847 mutex_lock(&bp->hwrm_cmd_lock);
5848 for (i = 0; i < bp->cp_nr_rings; i++) {
5849 struct bnxt_napi *bnapi = bp->bnapi[i];
5850 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5851
5852 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
5853
5854 rc = _hwrm_send_message(bp, &req, sizeof(req),
5855 HWRM_CMD_TIMEOUT);
5856 if (rc)
5857 break;
5858
5859 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
5860
5861 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
5862 }
5863 mutex_unlock(&bp->hwrm_cmd_lock);
89aa8445 5864 return rc;
c0c050c5
MC
5865}
5866
cf6645f8
MC
5867static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
5868{
5869 struct hwrm_func_qcfg_input req = {0};
567b2abe 5870 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9315edca 5871 u16 flags;
cf6645f8
MC
5872 int rc;
5873
5874 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5875 req.fid = cpu_to_le16(0xffff);
5876 mutex_lock(&bp->hwrm_cmd_lock);
5877 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5878 if (rc)
5879 goto func_qcfg_exit;
5880
5881#ifdef CONFIG_BNXT_SRIOV
5882 if (BNXT_VF(bp)) {
cf6645f8
MC
5883 struct bnxt_vf_info *vf = &bp->vf;
5884
5885 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
5886 }
5887#endif
9315edca
MC
5888 flags = le16_to_cpu(resp->flags);
5889 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
5890 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
97381a18 5891 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
9315edca 5892 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
97381a18 5893 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
9315edca
MC
5894 }
5895 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
5896 bp->flags |= BNXT_FLAG_MULTI_HOST;
bc39f885 5897
567b2abe
SB
5898 switch (resp->port_partition_type) {
5899 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
5900 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
5901 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
5902 bp->port_partition_type = resp->port_partition_type;
5903 break;
5904 }
32e8239c
MC
5905 if (bp->hwrm_spec_code < 0x10707 ||
5906 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
5907 bp->br_mode = BRIDGE_MODE_VEB;
5908 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
5909 bp->br_mode = BRIDGE_MODE_VEPA;
5910 else
5911 bp->br_mode = BRIDGE_MODE_UNDEF;
cf6645f8 5912
7eb9bb3a
MC
5913 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
5914 if (!bp->max_mtu)
5915 bp->max_mtu = BNXT_MAX_MTU;
5916
cf6645f8
MC
5917func_qcfg_exit:
5918 mutex_unlock(&bp->hwrm_cmd_lock);
5919 return rc;
5920}
5921
98f04cf0
MC
5922static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
5923{
5924 struct hwrm_func_backing_store_qcaps_input req = {0};
5925 struct hwrm_func_backing_store_qcaps_output *resp =
5926 bp->hwrm_cmd_resp_addr;
5927 int rc;
5928
5929 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
5930 return 0;
5931
5932 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
5933 mutex_lock(&bp->hwrm_cmd_lock);
5934 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5935 if (!rc) {
5936 struct bnxt_ctx_pg_info *ctx_pg;
5937 struct bnxt_ctx_mem_info *ctx;
5938 int i;
5939
5940 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
5941 if (!ctx) {
5942 rc = -ENOMEM;
5943 goto ctx_err;
5944 }
5945 ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL);
5946 if (!ctx_pg) {
5947 kfree(ctx);
5948 rc = -ENOMEM;
5949 goto ctx_err;
5950 }
5951 for (i = 0; i < bp->max_q + 1; i++, ctx_pg++)
5952 ctx->tqm_mem[i] = ctx_pg;
5953
5954 bp->ctx = ctx;
5955 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
5956 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
5957 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
5958 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
5959 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
5960 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
5961 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
5962 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
5963 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
5964 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
5965 ctx->vnic_max_vnic_entries =
5966 le16_to_cpu(resp->vnic_max_vnic_entries);
5967 ctx->vnic_max_ring_table_entries =
5968 le16_to_cpu(resp->vnic_max_ring_table_entries);
5969 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
5970 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
5971 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
5972 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
5973 ctx->tqm_min_entries_per_ring =
5974 le32_to_cpu(resp->tqm_min_entries_per_ring);
5975 ctx->tqm_max_entries_per_ring =
5976 le32_to_cpu(resp->tqm_max_entries_per_ring);
5977 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
5978 if (!ctx->tqm_entries_multiple)
5979 ctx->tqm_entries_multiple = 1;
5980 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
5981 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
5982 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
5983 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
5984 } else {
5985 rc = 0;
5986 }
5987ctx_err:
5988 mutex_unlock(&bp->hwrm_cmd_lock);
5989 return rc;
5990}
5991
1b9394e5
MC
5992static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
5993 __le64 *pg_dir)
5994{
5995 u8 pg_size = 0;
5996
5997 if (BNXT_PAGE_SHIFT == 13)
5998 pg_size = 1 << 4;
5999 else if (BNXT_PAGE_SIZE == 16)
6000 pg_size = 2 << 4;
6001
6002 *pg_attr = pg_size;
6003 if (rmem->nr_pages > 1) {
6004 *pg_attr |= 1;
6005 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6006 } else {
6007 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6008 }
6009}
6010
6011#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
6012 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
6013 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
6014 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
6015 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
6016 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6017
6018static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6019{
6020 struct hwrm_func_backing_store_cfg_input req = {0};
6021 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6022 struct bnxt_ctx_pg_info *ctx_pg;
6023 __le32 *num_entries;
6024 __le64 *pg_dir;
6025 u8 *pg_attr;
6026 int i, rc;
6027 u32 ena;
6028
6029 if (!ctx)
6030 return 0;
6031
6032 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
6033 req.enables = cpu_to_le32(enables);
6034
6035 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
6036 ctx_pg = &ctx->qp_mem;
6037 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
6038 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
6039 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
6040 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
6041 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6042 &req.qpc_pg_size_qpc_lvl,
6043 &req.qpc_page_dir);
6044 }
6045 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
6046 ctx_pg = &ctx->srq_mem;
6047 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
6048 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
6049 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
6050 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6051 &req.srq_pg_size_srq_lvl,
6052 &req.srq_page_dir);
6053 }
6054 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
6055 ctx_pg = &ctx->cq_mem;
6056 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
6057 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
6058 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
6059 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
6060 &req.cq_page_dir);
6061 }
6062 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
6063 ctx_pg = &ctx->vnic_mem;
6064 req.vnic_num_vnic_entries =
6065 cpu_to_le16(ctx->vnic_max_vnic_entries);
6066 req.vnic_num_ring_table_entries =
6067 cpu_to_le16(ctx->vnic_max_ring_table_entries);
6068 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
6069 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6070 &req.vnic_pg_size_vnic_lvl,
6071 &req.vnic_page_dir);
6072 }
6073 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
6074 ctx_pg = &ctx->stat_mem;
6075 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
6076 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
6077 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6078 &req.stat_pg_size_stat_lvl,
6079 &req.stat_page_dir);
6080 }
6081 for (i = 0, num_entries = &req.tqm_sp_num_entries,
6082 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
6083 pg_dir = &req.tqm_sp_page_dir,
6084 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
6085 i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
6086 if (!(enables & ena))
6087 continue;
6088
6089 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
6090 ctx_pg = ctx->tqm_mem[i];
6091 *num_entries = cpu_to_le32(ctx_pg->entries);
6092 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
6093 }
6094 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6095 if (rc)
6096 rc = -EIO;
6097 return rc;
6098}
6099
98f04cf0
MC
6100static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
6101 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size)
6102{
6103 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6104
6105 if (!mem_size)
6106 return 0;
6107
6108 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6109 if (rmem->nr_pages > MAX_CTX_PAGES) {
6110 rmem->nr_pages = 0;
6111 return -EINVAL;
6112 }
6113 rmem->page_size = BNXT_PAGE_SIZE;
6114 rmem->pg_arr = ctx_pg->ctx_pg_arr;
6115 rmem->dma_arr = ctx_pg->ctx_dma_arr;
1b9394e5 6116 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
98f04cf0
MC
6117 return bnxt_alloc_ring(bp, rmem);
6118}
6119
6120static void bnxt_free_ctx_mem(struct bnxt *bp)
6121{
6122 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6123 int i;
6124
6125 if (!ctx)
6126 return;
6127
6128 if (ctx->tqm_mem[0]) {
6129 for (i = 0; i < bp->max_q + 1; i++)
6130 bnxt_free_ring(bp, &ctx->tqm_mem[i]->ring_mem);
6131 kfree(ctx->tqm_mem[0]);
6132 ctx->tqm_mem[0] = NULL;
6133 }
6134
6135 bnxt_free_ring(bp, &ctx->stat_mem.ring_mem);
6136 bnxt_free_ring(bp, &ctx->vnic_mem.ring_mem);
6137 bnxt_free_ring(bp, &ctx->cq_mem.ring_mem);
6138 bnxt_free_ring(bp, &ctx->srq_mem.ring_mem);
6139 bnxt_free_ring(bp, &ctx->qp_mem.ring_mem);
6140 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
6141}
6142
6143static int bnxt_alloc_ctx_mem(struct bnxt *bp)
6144{
6145 struct bnxt_ctx_pg_info *ctx_pg;
6146 struct bnxt_ctx_mem_info *ctx;
1b9394e5 6147 u32 mem_size, ena, entries;
98f04cf0
MC
6148 int i, rc;
6149
6150 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
6151 if (rc) {
6152 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
6153 rc);
6154 return rc;
6155 }
6156 ctx = bp->ctx;
6157 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
6158 return 0;
6159
6160 ctx_pg = &ctx->qp_mem;
6161 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;
6162 mem_size = ctx->qp_entry_size * ctx_pg->entries;
6163 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
6164 if (rc)
6165 return rc;
6166
6167 ctx_pg = &ctx->srq_mem;
6168 ctx_pg->entries = ctx->srq_max_l2_entries;
6169 mem_size = ctx->srq_entry_size * ctx_pg->entries;
6170 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
6171 if (rc)
6172 return rc;
6173
6174 ctx_pg = &ctx->cq_mem;
6175 ctx_pg->entries = ctx->cq_max_l2_entries;
6176 mem_size = ctx->cq_entry_size * ctx_pg->entries;
6177 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
6178 if (rc)
6179 return rc;
6180
6181 ctx_pg = &ctx->vnic_mem;
6182 ctx_pg->entries = ctx->vnic_max_vnic_entries +
6183 ctx->vnic_max_ring_table_entries;
6184 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
6185 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
6186 if (rc)
6187 return rc;
6188
6189 ctx_pg = &ctx->stat_mem;
6190 ctx_pg->entries = ctx->stat_max_entries;
6191 mem_size = ctx->stat_entry_size * ctx_pg->entries;
6192 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
6193 if (rc)
6194 return rc;
6195
6196 entries = ctx->qp_max_l2_entries;
6197 entries = roundup(entries, ctx->tqm_entries_multiple);
6198 entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
6199 ctx->tqm_max_entries_per_ring);
1b9394e5 6200 for (i = 0, ena = 0; i < bp->max_q + 1; i++) {
98f04cf0
MC
6201 ctx_pg = ctx->tqm_mem[i];
6202 ctx_pg->entries = entries;
6203 mem_size = ctx->tqm_entry_size * entries;
6204 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
6205 if (rc)
6206 return rc;
1b9394e5 6207 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
98f04cf0 6208 }
1b9394e5
MC
6209 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
6210 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
6211 if (rc)
6212 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
6213 rc);
6214 else
6215 ctx->flags |= BNXT_CTX_FLAG_INITED;
6216
98f04cf0
MC
6217 return 0;
6218}
6219
db4723b3 6220int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
be0dd9c4
MC
6221{
6222 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6223 struct hwrm_func_resource_qcaps_input req = {0};
6224 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6225 int rc;
6226
6227 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
6228 req.fid = cpu_to_le16(0xffff);
6229
6230 mutex_lock(&bp->hwrm_cmd_lock);
351cbde9
JT
6231 rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
6232 HWRM_CMD_TIMEOUT);
be0dd9c4
MC
6233 if (rc) {
6234 rc = -EIO;
6235 goto hwrm_func_resc_qcaps_exit;
6236 }
6237
db4723b3
MC
6238 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
6239 if (!all)
6240 goto hwrm_func_resc_qcaps_exit;
6241
be0dd9c4
MC
6242 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
6243 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6244 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
6245 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6246 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
6247 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6248 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
6249 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6250 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
6251 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
6252 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
6253 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6254 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
6255 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6256 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
6257 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6258
9c1fabdf
MC
6259 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6260 u16 max_msix = le16_to_cpu(resp->max_msix);
6261
f7588cd8 6262 hw_resc->max_nqs = max_msix;
9c1fabdf
MC
6263 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
6264 }
6265
4673d664
MC
6266 if (BNXT_PF(bp)) {
6267 struct bnxt_pf_info *pf = &bp->pf;
6268
6269 pf->vf_resv_strategy =
6270 le16_to_cpu(resp->vf_reservation_strategy);
bf82736d 6271 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
4673d664
MC
6272 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
6273 }
be0dd9c4
MC
6274hwrm_func_resc_qcaps_exit:
6275 mutex_unlock(&bp->hwrm_cmd_lock);
6276 return rc;
6277}
6278
6279static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
c0c050c5
MC
6280{
6281 int rc = 0;
6282 struct hwrm_func_qcaps_input req = {0};
6283 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6a4f2947
MC
6284 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6285 u32 flags;
c0c050c5
MC
6286
6287 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
6288 req.fid = cpu_to_le16(0xffff);
6289
6290 mutex_lock(&bp->hwrm_cmd_lock);
6291 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6292 if (rc)
6293 goto hwrm_func_qcaps_exit;
6294
6a4f2947
MC
6295 flags = le32_to_cpu(resp->flags);
6296 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
e4060d30 6297 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
6a4f2947 6298 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
e4060d30
MC
6299 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
6300
7cc5a20e 6301 bp->tx_push_thresh = 0;
6a4f2947 6302 if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
7cc5a20e
MC
6303 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
6304
6a4f2947
MC
6305 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6306 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6307 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6308 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6309 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
6310 if (!hw_resc->max_hw_ring_grps)
6311 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
6312 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6313 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6314 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6315
c0c050c5
MC
6316 if (BNXT_PF(bp)) {
6317 struct bnxt_pf_info *pf = &bp->pf;
6318
6319 pf->fw_fid = le16_to_cpu(resp->fid);
6320 pf->port_id = le16_to_cpu(resp->port_id);
87027db1 6321 bp->dev->dev_port = pf->port_id;
11f15ed3 6322 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
c0c050c5
MC
6323 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
6324 pf->max_vfs = le16_to_cpu(resp->max_vfs);
6325 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
6326 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
6327 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
6328 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
6329 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
6330 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
6a4f2947 6331 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
c1ef146a 6332 bp->flags |= BNXT_FLAG_WOL_CAP;
c0c050c5 6333 } else {
379a80a1 6334#ifdef CONFIG_BNXT_SRIOV
c0c050c5
MC
6335 struct bnxt_vf_info *vf = &bp->vf;
6336
6337 vf->fw_fid = le16_to_cpu(resp->fid);
7cc5a20e 6338 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
379a80a1 6339#endif
c0c050c5
MC
6340 }
6341
c0c050c5
MC
6342hwrm_func_qcaps_exit:
6343 mutex_unlock(&bp->hwrm_cmd_lock);
6344 return rc;
6345}
6346
804fba4e
MC
6347static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
6348
be0dd9c4
MC
6349static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
6350{
6351 int rc;
6352
6353 rc = __bnxt_hwrm_func_qcaps(bp);
6354 if (rc)
6355 return rc;
804fba4e
MC
6356 rc = bnxt_hwrm_queue_qportcfg(bp);
6357 if (rc) {
6358 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
6359 return rc;
6360 }
be0dd9c4 6361 if (bp->hwrm_spec_code >= 0x10803) {
98f04cf0
MC
6362 rc = bnxt_alloc_ctx_mem(bp);
6363 if (rc)
6364 return rc;
db4723b3 6365 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
be0dd9c4 6366 if (!rc)
97381a18 6367 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
be0dd9c4
MC
6368 }
6369 return 0;
6370}
6371
c0c050c5
MC
6372static int bnxt_hwrm_func_reset(struct bnxt *bp)
6373{
6374 struct hwrm_func_reset_input req = {0};
6375
6376 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
6377 req.enables = 0;
6378
6379 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
6380}
6381
6382static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
6383{
6384 int rc = 0;
6385 struct hwrm_queue_qportcfg_input req = {0};
6386 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
aabfc016
MC
6387 u8 i, j, *qptr;
6388 bool no_rdma;
c0c050c5
MC
6389
6390 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
6391
6392 mutex_lock(&bp->hwrm_cmd_lock);
6393 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6394 if (rc)
6395 goto qportcfg_exit;
6396
6397 if (!resp->max_configurable_queues) {
6398 rc = -EINVAL;
6399 goto qportcfg_exit;
6400 }
6401 bp->max_tc = resp->max_configurable_queues;
87c374de 6402 bp->max_lltc = resp->max_configurable_lossless_queues;
c0c050c5
MC
6403 if (bp->max_tc > BNXT_MAX_QUEUE)
6404 bp->max_tc = BNXT_MAX_QUEUE;
6405
aabfc016
MC
6406 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
6407 qptr = &resp->queue_id0;
6408 for (i = 0, j = 0; i < bp->max_tc; i++) {
98f04cf0
MC
6409 bp->q_info[j].queue_id = *qptr;
6410 bp->q_ids[i] = *qptr++;
aabfc016
MC
6411 bp->q_info[j].queue_profile = *qptr++;
6412 bp->tc_to_qidx[j] = j;
6413 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
6414 (no_rdma && BNXT_PF(bp)))
6415 j++;
6416 }
98f04cf0 6417 bp->max_q = bp->max_tc;
aabfc016
MC
6418 bp->max_tc = max_t(u8, j, 1);
6419
441cabbb
MC
6420 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
6421 bp->max_tc = 1;
6422
87c374de
MC
6423 if (bp->max_lltc > bp->max_tc)
6424 bp->max_lltc = bp->max_tc;
6425
c0c050c5
MC
6426qportcfg_exit:
6427 mutex_unlock(&bp->hwrm_cmd_lock);
6428 return rc;
6429}
6430
6431static int bnxt_hwrm_ver_get(struct bnxt *bp)
6432{
6433 int rc;
6434 struct hwrm_ver_get_input req = {0};
6435 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
e605db80 6436 u32 dev_caps_cfg;
c0c050c5 6437
e6ef2699 6438 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
c0c050c5
MC
6439 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
6440 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
6441 req.hwrm_intf_min = HWRM_VERSION_MINOR;
6442 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
6443 mutex_lock(&bp->hwrm_cmd_lock);
6444 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6445 if (rc)
6446 goto hwrm_ver_get_exit;
6447
6448 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
6449
894aa69a
MC
6450 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
6451 resp->hwrm_intf_min_8b << 8 |
6452 resp->hwrm_intf_upd_8b;
6453 if (resp->hwrm_intf_maj_8b < 1) {
c193554e 6454 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
894aa69a
MC
6455 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
6456 resp->hwrm_intf_upd_8b);
c193554e 6457 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
c0c050c5 6458 }
431aa1eb 6459 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
894aa69a
MC
6460 resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
6461 resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
c0c050c5 6462
ff4fe81d
MC
6463 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
6464 if (!bp->hwrm_cmd_timeout)
6465 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
6466
1dfddc41 6467 if (resp->hwrm_intf_maj_8b >= 1) {
e6ef2699 6468 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
1dfddc41
MC
6469 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
6470 }
6471 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
6472 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
e6ef2699 6473
659c805c 6474 bp->chip_num = le16_to_cpu(resp->chip_num);
3e8060fa
PS
6475 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
6476 !resp->chip_metal)
6477 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
659c805c 6478
e605db80
DK
6479 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
6480 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
6481 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
97381a18 6482 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
e605db80 6483
c0c050c5
MC
6484hwrm_ver_get_exit:
6485 mutex_unlock(&bp->hwrm_cmd_lock);
6486 return rc;
6487}
6488
5ac67d8b
RS
6489int bnxt_hwrm_fw_set_time(struct bnxt *bp)
6490{
6491 struct hwrm_fw_set_time_input req = {0};
7dfaa7bc
AB
6492 struct tm tm;
6493 time64_t now = ktime_get_real_seconds();
5ac67d8b 6494
ca2c39e2
MC
6495 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
6496 bp->hwrm_spec_code < 0x10400)
5ac67d8b
RS
6497 return -EOPNOTSUPP;
6498
7dfaa7bc 6499 time64_to_tm(now, 0, &tm);
5ac67d8b
RS
6500 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
6501 req.year = cpu_to_le16(1900 + tm.tm_year);
6502 req.month = 1 + tm.tm_mon;
6503 req.day = tm.tm_mday;
6504 req.hour = tm.tm_hour;
6505 req.minute = tm.tm_min;
6506 req.second = tm.tm_sec;
6507 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6508}
6509
3bdf56c4
MC
6510static int bnxt_hwrm_port_qstats(struct bnxt *bp)
6511{
6512 int rc;
6513 struct bnxt_pf_info *pf = &bp->pf;
6514 struct hwrm_port_qstats_input req = {0};
6515
6516 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
6517 return 0;
6518
6519 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
6520 req.port_id = cpu_to_le16(pf->port_id);
6521 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
6522 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
6523 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6524 return rc;
6525}
6526
00db3cba
VV
6527static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
6528{
36e53349 6529 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
e37fed79 6530 struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
00db3cba
VV
6531 struct hwrm_port_qstats_ext_input req = {0};
6532 struct bnxt_pf_info *pf = &bp->pf;
36e53349 6533 int rc;
00db3cba
VV
6534
6535 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
6536 return 0;
6537
6538 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
6539 req.port_id = cpu_to_le16(pf->port_id);
6540 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
6541 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
36e53349
MC
6542 req.tx_stat_size = cpu_to_le16(sizeof(struct tx_port_stats_ext));
6543 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
6544 mutex_lock(&bp->hwrm_cmd_lock);
6545 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6546 if (!rc) {
6547 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
6548 bp->fw_tx_stats_ext_size = le16_to_cpu(resp->tx_stat_size) / 8;
6549 } else {
6550 bp->fw_rx_stats_ext_size = 0;
6551 bp->fw_tx_stats_ext_size = 0;
6552 }
e37fed79
MC
6553 if (bp->fw_tx_stats_ext_size <=
6554 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
6555 mutex_unlock(&bp->hwrm_cmd_lock);
6556 bp->pri2cos_valid = 0;
6557 return rc;
6558 }
6559
6560 bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
6561 req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
6562
6563 rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
6564 if (!rc) {
6565 struct hwrm_queue_pri2cos_qcfg_output *resp2;
6566 u8 *pri2cos;
6567 int i, j;
6568
6569 resp2 = bp->hwrm_cmd_resp_addr;
6570 pri2cos = &resp2->pri0_cos_queue_id;
6571 for (i = 0; i < 8; i++) {
6572 u8 queue_id = pri2cos[i];
6573
6574 for (j = 0; j < bp->max_q; j++) {
6575 if (bp->q_ids[j] == queue_id)
6576 bp->pri2cos[i] = j;
6577 }
6578 }
6579 bp->pri2cos_valid = 1;
6580 }
36e53349
MC
6581 mutex_unlock(&bp->hwrm_cmd_lock);
6582 return rc;
00db3cba
VV
6583}
6584
c0c050c5
MC
6585static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
6586{
6587 if (bp->vxlan_port_cnt) {
6588 bnxt_hwrm_tunnel_dst_port_free(
6589 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
6590 }
6591 bp->vxlan_port_cnt = 0;
6592 if (bp->nge_port_cnt) {
6593 bnxt_hwrm_tunnel_dst_port_free(
6594 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6595 }
6596 bp->nge_port_cnt = 0;
6597}
6598
6599static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
6600{
6601 int rc, i;
6602 u32 tpa_flags = 0;
6603
6604 if (set_tpa)
6605 tpa_flags = bp->flags & BNXT_FLAG_TPA;
6606 for (i = 0; i < bp->nr_vnics; i++) {
6607 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
6608 if (rc) {
6609 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
23e12c89 6610 i, rc);
c0c050c5
MC
6611 return rc;
6612 }
6613 }
6614 return 0;
6615}
6616
6617static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
6618{
6619 int i;
6620
6621 for (i = 0; i < bp->nr_vnics; i++)
6622 bnxt_hwrm_vnic_set_rss(bp, i, false);
6623}
6624
6625static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
6626 bool irq_re_init)
6627{
6628 if (bp->vnic_info) {
6629 bnxt_hwrm_clear_vnic_filter(bp);
6630 /* clear all RSS setting before free vnic ctx */
6631 bnxt_hwrm_clear_vnic_rss(bp);
6632 bnxt_hwrm_vnic_ctx_free(bp);
6633 /* before free the vnic, undo the vnic tpa settings */
6634 if (bp->flags & BNXT_FLAG_TPA)
6635 bnxt_set_tpa(bp, false);
6636 bnxt_hwrm_vnic_free(bp);
6637 }
6638 bnxt_hwrm_ring_free(bp, close_path);
6639 bnxt_hwrm_ring_grp_free(bp);
6640 if (irq_re_init) {
6641 bnxt_hwrm_stat_ctx_free(bp);
6642 bnxt_hwrm_free_tunnel_ports(bp);
6643 }
6644}
6645
39d8ba2e
MC
6646static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
6647{
6648 struct hwrm_func_cfg_input req = {0};
6649 int rc;
6650
6651 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
6652 req.fid = cpu_to_le16(0xffff);
6653 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
6654 if (br_mode == BRIDGE_MODE_VEB)
6655 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
6656 else if (br_mode == BRIDGE_MODE_VEPA)
6657 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
6658 else
6659 return -EINVAL;
6660 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6661 if (rc)
6662 rc = -EIO;
6663 return rc;
6664}
6665
c3480a60
MC
6666static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
6667{
6668 struct hwrm_func_cfg_input req = {0};
6669 int rc;
6670
6671 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
6672 return 0;
6673
6674 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
6675 req.fid = cpu_to_le16(0xffff);
6676 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
d4f52de0 6677 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
c3480a60 6678 if (size == 128)
d4f52de0 6679 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
c3480a60
MC
6680
6681 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6682 if (rc)
6683 rc = -EIO;
6684 return rc;
6685}
6686
7b3af4f7 6687static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
c0c050c5 6688{
ae10ae74 6689 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
c0c050c5
MC
6690 int rc;
6691
ae10ae74
MC
6692 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
6693 goto skip_rss_ctx;
6694
c0c050c5 6695 /* allocate context for vnic */
94ce9caa 6696 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
c0c050c5
MC
6697 if (rc) {
6698 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
6699 vnic_id, rc);
6700 goto vnic_setup_err;
6701 }
6702 bp->rsscos_nr_ctxs++;
6703
94ce9caa
PS
6704 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
6705 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
6706 if (rc) {
6707 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
6708 vnic_id, rc);
6709 goto vnic_setup_err;
6710 }
6711 bp->rsscos_nr_ctxs++;
6712 }
6713
ae10ae74 6714skip_rss_ctx:
c0c050c5
MC
6715 /* configure default vnic, ring grp */
6716 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
6717 if (rc) {
6718 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
6719 vnic_id, rc);
6720 goto vnic_setup_err;
6721 }
6722
6723 /* Enable RSS hashing on vnic */
6724 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
6725 if (rc) {
6726 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
6727 vnic_id, rc);
6728 goto vnic_setup_err;
6729 }
6730
6731 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6732 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
6733 if (rc) {
6734 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
6735 vnic_id, rc);
6736 }
6737 }
6738
6739vnic_setup_err:
6740 return rc;
6741}
6742
7b3af4f7
MC
6743static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
6744{
6745 int rc, i, nr_ctxs;
6746
6747 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
6748 for (i = 0; i < nr_ctxs; i++) {
6749 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
6750 if (rc) {
6751 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
6752 vnic_id, i, rc);
6753 break;
6754 }
6755 bp->rsscos_nr_ctxs++;
6756 }
6757 if (i < nr_ctxs)
6758 return -ENOMEM;
6759
6760 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
6761 if (rc) {
6762 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
6763 vnic_id, rc);
6764 return rc;
6765 }
6766 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
6767 if (rc) {
6768 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
6769 vnic_id, rc);
6770 return rc;
6771 }
6772 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6773 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
6774 if (rc) {
6775 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
6776 vnic_id, rc);
6777 }
6778 }
6779 return rc;
6780}
6781
6782static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
6783{
6784 if (bp->flags & BNXT_FLAG_CHIP_P5)
6785 return __bnxt_setup_vnic_p5(bp, vnic_id);
6786 else
6787 return __bnxt_setup_vnic(bp, vnic_id);
6788}
6789
c0c050c5
MC
6790static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
6791{
6792#ifdef CONFIG_RFS_ACCEL
6793 int i, rc = 0;
6794
6795 for (i = 0; i < bp->rx_nr_rings; i++) {
ae10ae74 6796 struct bnxt_vnic_info *vnic;
c0c050c5
MC
6797 u16 vnic_id = i + 1;
6798 u16 ring_id = i;
6799
6800 if (vnic_id >= bp->nr_vnics)
6801 break;
6802
ae10ae74
MC
6803 vnic = &bp->vnic_info[vnic_id];
6804 vnic->flags |= BNXT_VNIC_RFS_FLAG;
6805 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
6806 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
b81a90d3 6807 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
c0c050c5
MC
6808 if (rc) {
6809 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
6810 vnic_id, rc);
6811 break;
6812 }
6813 rc = bnxt_setup_vnic(bp, vnic_id);
6814 if (rc)
6815 break;
6816 }
6817 return rc;
6818#else
6819 return 0;
6820#endif
6821}
6822
17c71ac3
MC
6823/* Allow PF and VF with default VLAN to be in promiscuous mode */
6824static bool bnxt_promisc_ok(struct bnxt *bp)
6825{
6826#ifdef CONFIG_BNXT_SRIOV
6827 if (BNXT_VF(bp) && !bp->vf.vlan)
6828 return false;
6829#endif
6830 return true;
6831}
6832
dc52c6c7
PS
6833static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
6834{
6835 unsigned int rc = 0;
6836
6837 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
6838 if (rc) {
6839 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
6840 rc);
6841 return rc;
6842 }
6843
6844 rc = bnxt_hwrm_vnic_cfg(bp, 1);
6845 if (rc) {
6846 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
6847 rc);
6848 return rc;
6849 }
6850 return rc;
6851}
6852
b664f008 6853static int bnxt_cfg_rx_mode(struct bnxt *);
7d2837dd 6854static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
b664f008 6855
c0c050c5
MC
6856static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
6857{
7d2837dd 6858 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
c0c050c5 6859 int rc = 0;
76595193 6860 unsigned int rx_nr_rings = bp->rx_nr_rings;
c0c050c5
MC
6861
6862 if (irq_re_init) {
6863 rc = bnxt_hwrm_stat_ctx_alloc(bp);
6864 if (rc) {
6865 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
6866 rc);
6867 goto err_out;
6868 }
6869 }
6870
6871 rc = bnxt_hwrm_ring_alloc(bp);
6872 if (rc) {
6873 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
6874 goto err_out;
6875 }
6876
6877 rc = bnxt_hwrm_ring_grp_alloc(bp);
6878 if (rc) {
6879 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
6880 goto err_out;
6881 }
6882
76595193
PS
6883 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6884 rx_nr_rings--;
6885
c0c050c5 6886 /* default vnic 0 */
76595193 6887 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
c0c050c5
MC
6888 if (rc) {
6889 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
6890 goto err_out;
6891 }
6892
6893 rc = bnxt_setup_vnic(bp, 0);
6894 if (rc)
6895 goto err_out;
6896
6897 if (bp->flags & BNXT_FLAG_RFS) {
6898 rc = bnxt_alloc_rfs_vnics(bp);
6899 if (rc)
6900 goto err_out;
6901 }
6902
6903 if (bp->flags & BNXT_FLAG_TPA) {
6904 rc = bnxt_set_tpa(bp, true);
6905 if (rc)
6906 goto err_out;
6907 }
6908
6909 if (BNXT_VF(bp))
6910 bnxt_update_vf_mac(bp);
6911
6912 /* Filter for default vnic 0 */
6913 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
6914 if (rc) {
6915 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
6916 goto err_out;
6917 }
7d2837dd 6918 vnic->uc_filter_count = 1;
c0c050c5 6919
30e33848
MC
6920 vnic->rx_mask = 0;
6921 if (bp->dev->flags & IFF_BROADCAST)
6922 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
c0c050c5 6923
17c71ac3 6924 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
7d2837dd
MC
6925 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
6926
6927 if (bp->dev->flags & IFF_ALLMULTI) {
6928 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
6929 vnic->mc_list_count = 0;
6930 } else {
6931 u32 mask = 0;
6932
6933 bnxt_mc_list_updated(bp, &mask);
6934 vnic->rx_mask |= mask;
6935 }
c0c050c5 6936
b664f008
MC
6937 rc = bnxt_cfg_rx_mode(bp);
6938 if (rc)
c0c050c5 6939 goto err_out;
c0c050c5
MC
6940
6941 rc = bnxt_hwrm_set_coal(bp);
6942 if (rc)
6943 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
dc52c6c7
PS
6944 rc);
6945
6946 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
6947 rc = bnxt_setup_nitroa0_vnic(bp);
6948 if (rc)
6949 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
6950 rc);
6951 }
c0c050c5 6952
cf6645f8
MC
6953 if (BNXT_VF(bp)) {
6954 bnxt_hwrm_func_qcfg(bp);
6955 netdev_update_features(bp->dev);
6956 }
6957
c0c050c5
MC
6958 return 0;
6959
6960err_out:
6961 bnxt_hwrm_resource_free(bp, 0, true);
6962
6963 return rc;
6964}
6965
6966static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
6967{
6968 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
6969 return 0;
6970}
6971
6972static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
6973{
2247925f 6974 bnxt_init_cp_rings(bp);
c0c050c5
MC
6975 bnxt_init_rx_rings(bp);
6976 bnxt_init_tx_rings(bp);
6977 bnxt_init_ring_grps(bp, irq_re_init);
6978 bnxt_init_vnics(bp);
6979
6980 return bnxt_init_chip(bp, irq_re_init);
6981}
6982
c0c050c5
MC
6983static int bnxt_set_real_num_queues(struct bnxt *bp)
6984{
6985 int rc;
6986 struct net_device *dev = bp->dev;
6987
5f449249
MC
6988 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
6989 bp->tx_nr_rings_xdp);
c0c050c5
MC
6990 if (rc)
6991 return rc;
6992
6993 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
6994 if (rc)
6995 return rc;
6996
6997#ifdef CONFIG_RFS_ACCEL
45019a18 6998 if (bp->flags & BNXT_FLAG_RFS)
c0c050c5 6999 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
c0c050c5
MC
7000#endif
7001
7002 return rc;
7003}
7004
6e6c5a57
MC
7005static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7006 bool shared)
7007{
7008 int _rx = *rx, _tx = *tx;
7009
7010 if (shared) {
7011 *rx = min_t(int, _rx, max);
7012 *tx = min_t(int, _tx, max);
7013 } else {
7014 if (max < 2)
7015 return -ENOMEM;
7016
7017 while (_rx + _tx > max) {
7018 if (_rx > _tx && _rx > 1)
7019 _rx--;
7020 else if (_tx > 1)
7021 _tx--;
7022 }
7023 *rx = _rx;
7024 *tx = _tx;
7025 }
7026 return 0;
7027}
7028
7809592d
MC
7029static void bnxt_setup_msix(struct bnxt *bp)
7030{
7031 const int len = sizeof(bp->irq_tbl[0].name);
7032 struct net_device *dev = bp->dev;
7033 int tcs, i;
7034
7035 tcs = netdev_get_num_tc(dev);
7036 if (tcs > 1) {
d1e7925e 7037 int i, off, count;
7809592d 7038
d1e7925e
MC
7039 for (i = 0; i < tcs; i++) {
7040 count = bp->tx_nr_rings_per_tc;
7041 off = i * count;
7042 netdev_set_tc_queue(dev, i, count, off);
7809592d
MC
7043 }
7044 }
7045
7046 for (i = 0; i < bp->cp_nr_rings; i++) {
e5811b8c 7047 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7809592d
MC
7048 char *attr;
7049
7050 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7051 attr = "TxRx";
7052 else if (i < bp->rx_nr_rings)
7053 attr = "rx";
7054 else
7055 attr = "tx";
7056
e5811b8c
MC
7057 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
7058 attr, i);
7059 bp->irq_tbl[map_idx].handler = bnxt_msix;
7809592d
MC
7060 }
7061}
7062
7063static void bnxt_setup_inta(struct bnxt *bp)
7064{
7065 const int len = sizeof(bp->irq_tbl[0].name);
7066
7067 if (netdev_get_num_tc(bp->dev))
7068 netdev_reset_tc(bp->dev);
7069
7070 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
7071 0);
7072 bp->irq_tbl[0].handler = bnxt_inta;
7073}
7074
7075static int bnxt_setup_int_mode(struct bnxt *bp)
7076{
7077 int rc;
7078
7079 if (bp->flags & BNXT_FLAG_USING_MSIX)
7080 bnxt_setup_msix(bp);
7081 else
7082 bnxt_setup_inta(bp);
7083
7084 rc = bnxt_set_real_num_queues(bp);
7085 return rc;
7086}
7087
b7429954 7088#ifdef CONFIG_RFS_ACCEL
8079e8f1
MC
7089static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
7090{
6a4f2947 7091 return bp->hw_resc.max_rsscos_ctxs;
8079e8f1
MC
7092}
7093
7094static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
7095{
6a4f2947 7096 return bp->hw_resc.max_vnics;
8079e8f1 7097}
b7429954 7098#endif
8079e8f1 7099
e4060d30
MC
7100unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
7101{
6a4f2947 7102 return bp->hw_resc.max_stat_ctxs;
e4060d30
MC
7103}
7104
7105unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
7106{
6a4f2947 7107 return bp->hw_resc.max_cp_rings;
e4060d30
MC
7108}
7109
e916b081 7110static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
a588e458 7111{
c0b8cda0
MC
7112 unsigned int cp = bp->hw_resc.max_cp_rings;
7113
7114 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
7115 cp -= bnxt_get_ulp_msix_num(bp);
7116
7117 return cp;
a588e458
MC
7118}
7119
ad95c27b 7120static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
7809592d 7121{
6a4f2947
MC
7122 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7123
f7588cd8
MC
7124 if (bp->flags & BNXT_FLAG_CHIP_P5)
7125 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
7126
6a4f2947 7127 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
7809592d
MC
7128}
7129
30f52947 7130static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
33c2657e 7131{
6a4f2947 7132 bp->hw_resc.max_irqs = max_irqs;
33c2657e
MC
7133}
7134
e916b081
MC
7135unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
7136{
7137 unsigned int cp;
7138
7139 cp = bnxt_get_max_func_cp_rings_for_en(bp);
7140 if (bp->flags & BNXT_FLAG_CHIP_P5)
7141 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
7142 else
7143 return cp - bp->cp_nr_rings;
7144}
7145
c027c6b4
VV
7146unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
7147{
7148 unsigned int stat;
7149
7150 stat = bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_ulp_stat_ctxs(bp);
7151 stat -= bp->cp_nr_rings;
7152 return stat;
7153}
7154
fbcfc8e4
MC
7155int bnxt_get_avail_msix(struct bnxt *bp, int num)
7156{
7157 int max_cp = bnxt_get_max_func_cp_rings(bp);
7158 int max_irq = bnxt_get_max_func_irqs(bp);
7159 int total_req = bp->cp_nr_rings + num;
7160 int max_idx, avail_msix;
7161
75720e63
MC
7162 max_idx = bp->total_irqs;
7163 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
7164 max_idx = min_t(int, bp->total_irqs, max_cp);
fbcfc8e4 7165 avail_msix = max_idx - bp->cp_nr_rings;
f1ca94de 7166 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
fbcfc8e4
MC
7167 return avail_msix;
7168
7169 if (max_irq < total_req) {
7170 num = max_irq - bp->cp_nr_rings;
7171 if (num <= 0)
7172 return 0;
7173 }
7174 return num;
7175}
7176
08654eb2
MC
7177static int bnxt_get_num_msix(struct bnxt *bp)
7178{
f1ca94de 7179 if (!BNXT_NEW_RM(bp))
08654eb2
MC
7180 return bnxt_get_max_func_irqs(bp);
7181
c0b8cda0 7182 return bnxt_nq_rings_in_use(bp);
08654eb2
MC
7183}
7184
7809592d 7185static int bnxt_init_msix(struct bnxt *bp)
c0c050c5 7186{
fbcfc8e4 7187 int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
7809592d 7188 struct msix_entry *msix_ent;
c0c050c5 7189
08654eb2
MC
7190 total_vecs = bnxt_get_num_msix(bp);
7191 max = bnxt_get_max_func_irqs(bp);
7192 if (total_vecs > max)
7193 total_vecs = max;
7194
2773dfb2
MC
7195 if (!total_vecs)
7196 return 0;
7197
c0c050c5
MC
7198 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
7199 if (!msix_ent)
7200 return -ENOMEM;
7201
7202 for (i = 0; i < total_vecs; i++) {
7203 msix_ent[i].entry = i;
7204 msix_ent[i].vector = 0;
7205 }
7206
01657bcd
MC
7207 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
7208 min = 2;
7209
7210 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
fbcfc8e4
MC
7211 ulp_msix = bnxt_get_ulp_msix_num(bp);
7212 if (total_vecs < 0 || total_vecs < ulp_msix) {
c0c050c5
MC
7213 rc = -ENODEV;
7214 goto msix_setup_exit;
7215 }
7216
7217 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
7218 if (bp->irq_tbl) {
7809592d
MC
7219 for (i = 0; i < total_vecs; i++)
7220 bp->irq_tbl[i].vector = msix_ent[i].vector;
c0c050c5 7221
7809592d 7222 bp->total_irqs = total_vecs;
c0c050c5 7223 /* Trim rings based upon num of vectors allocated */
6e6c5a57 7224 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
fbcfc8e4 7225 total_vecs - ulp_msix, min == 1);
6e6c5a57
MC
7226 if (rc)
7227 goto msix_setup_exit;
7228
7809592d
MC
7229 bp->cp_nr_rings = (min == 1) ?
7230 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
7231 bp->tx_nr_rings + bp->rx_nr_rings;
c0c050c5 7232
c0c050c5
MC
7233 } else {
7234 rc = -ENOMEM;
7235 goto msix_setup_exit;
7236 }
7237 bp->flags |= BNXT_FLAG_USING_MSIX;
7238 kfree(msix_ent);
7239 return 0;
7240
7241msix_setup_exit:
7809592d
MC
7242 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
7243 kfree(bp->irq_tbl);
7244 bp->irq_tbl = NULL;
c0c050c5
MC
7245 pci_disable_msix(bp->pdev);
7246 kfree(msix_ent);
7247 return rc;
7248}
7249
7809592d 7250static int bnxt_init_inta(struct bnxt *bp)
c0c050c5 7251{
c0c050c5 7252 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
7809592d
MC
7253 if (!bp->irq_tbl)
7254 return -ENOMEM;
7255
7256 bp->total_irqs = 1;
c0c050c5
MC
7257 bp->rx_nr_rings = 1;
7258 bp->tx_nr_rings = 1;
7259 bp->cp_nr_rings = 1;
01657bcd 7260 bp->flags |= BNXT_FLAG_SHARED_RINGS;
c0c050c5 7261 bp->irq_tbl[0].vector = bp->pdev->irq;
7809592d 7262 return 0;
c0c050c5
MC
7263}
7264
7809592d 7265static int bnxt_init_int_mode(struct bnxt *bp)
c0c050c5
MC
7266{
7267 int rc = 0;
7268
7269 if (bp->flags & BNXT_FLAG_MSIX_CAP)
7809592d 7270 rc = bnxt_init_msix(bp);
c0c050c5 7271
1fa72e29 7272 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
c0c050c5 7273 /* fallback to INTA */
7809592d 7274 rc = bnxt_init_inta(bp);
c0c050c5
MC
7275 }
7276 return rc;
7277}
7278
7809592d
MC
7279static void bnxt_clear_int_mode(struct bnxt *bp)
7280{
7281 if (bp->flags & BNXT_FLAG_USING_MSIX)
7282 pci_disable_msix(bp->pdev);
7283
7284 kfree(bp->irq_tbl);
7285 bp->irq_tbl = NULL;
7286 bp->flags &= ~BNXT_FLAG_USING_MSIX;
7287}
7288
fbcfc8e4 7289int bnxt_reserve_rings(struct bnxt *bp)
674f50a5 7290{
674f50a5 7291 int tcs = netdev_get_num_tc(bp->dev);
36d65be9 7292 bool reinit_irq = false;
674f50a5
MC
7293 int rc;
7294
7295 if (!bnxt_need_reserve_rings(bp))
7296 return 0;
7297
f1ca94de 7298 if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) {
ec86f14e 7299 bnxt_ulp_irq_stop(bp);
674f50a5 7300 bnxt_clear_int_mode(bp);
36d65be9
MC
7301 reinit_irq = true;
7302 }
7303 rc = __bnxt_reserve_rings(bp);
7304 if (reinit_irq) {
7305 if (!rc)
7306 rc = bnxt_init_int_mode(bp);
ec86f14e 7307 bnxt_ulp_irq_restart(bp, rc);
36d65be9
MC
7308 }
7309 if (rc) {
7310 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
7311 return rc;
674f50a5
MC
7312 }
7313 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
7314 netdev_err(bp->dev, "tx ring reservation failure\n");
7315 netdev_reset_tc(bp->dev);
7316 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
7317 return -ENOMEM;
7318 }
674f50a5
MC
7319 return 0;
7320}
7321
c0c050c5
MC
7322static void bnxt_free_irq(struct bnxt *bp)
7323{
7324 struct bnxt_irq *irq;
7325 int i;
7326
7327#ifdef CONFIG_RFS_ACCEL
7328 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
7329 bp->dev->rx_cpu_rmap = NULL;
7330#endif
cb98526b 7331 if (!bp->irq_tbl || !bp->bnapi)
c0c050c5
MC
7332 return;
7333
7334 for (i = 0; i < bp->cp_nr_rings; i++) {
e5811b8c
MC
7335 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7336
7337 irq = &bp->irq_tbl[map_idx];
56f0fd80
VV
7338 if (irq->requested) {
7339 if (irq->have_cpumask) {
7340 irq_set_affinity_hint(irq->vector, NULL);
7341 free_cpumask_var(irq->cpu_mask);
7342 irq->have_cpumask = 0;
7343 }
c0c050c5 7344 free_irq(irq->vector, bp->bnapi[i]);
56f0fd80
VV
7345 }
7346
c0c050c5
MC
7347 irq->requested = 0;
7348 }
c0c050c5
MC
7349}
7350
7351static int bnxt_request_irq(struct bnxt *bp)
7352{
b81a90d3 7353 int i, j, rc = 0;
c0c050c5
MC
7354 unsigned long flags = 0;
7355#ifdef CONFIG_RFS_ACCEL
e5811b8c 7356 struct cpu_rmap *rmap;
c0c050c5
MC
7357#endif
7358
e5811b8c
MC
7359 rc = bnxt_setup_int_mode(bp);
7360 if (rc) {
7361 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
7362 rc);
7363 return rc;
7364 }
7365#ifdef CONFIG_RFS_ACCEL
7366 rmap = bp->dev->rx_cpu_rmap;
7367#endif
c0c050c5
MC
7368 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
7369 flags = IRQF_SHARED;
7370
b81a90d3 7371 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
e5811b8c
MC
7372 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7373 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
7374
c0c050c5 7375#ifdef CONFIG_RFS_ACCEL
b81a90d3 7376 if (rmap && bp->bnapi[i]->rx_ring) {
c0c050c5
MC
7377 rc = irq_cpu_rmap_add(rmap, irq->vector);
7378 if (rc)
7379 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
b81a90d3
MC
7380 j);
7381 j++;
c0c050c5
MC
7382 }
7383#endif
7384 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
7385 bp->bnapi[i]);
7386 if (rc)
7387 break;
7388
7389 irq->requested = 1;
56f0fd80
VV
7390
7391 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
7392 int numa_node = dev_to_node(&bp->pdev->dev);
7393
7394 irq->have_cpumask = 1;
7395 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
7396 irq->cpu_mask);
7397 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
7398 if (rc) {
7399 netdev_warn(bp->dev,
7400 "Set affinity failed, IRQ = %d\n",
7401 irq->vector);
7402 break;
7403 }
7404 }
c0c050c5
MC
7405 }
7406 return rc;
7407}
7408
7409static void bnxt_del_napi(struct bnxt *bp)
7410{
7411 int i;
7412
7413 if (!bp->bnapi)
7414 return;
7415
7416 for (i = 0; i < bp->cp_nr_rings; i++) {
7417 struct bnxt_napi *bnapi = bp->bnapi[i];
7418
7419 napi_hash_del(&bnapi->napi);
7420 netif_napi_del(&bnapi->napi);
7421 }
e5f6f564
ED
7422 /* We called napi_hash_del() before netif_napi_del(), we need
7423 * to respect an RCU grace period before freeing napi structures.
7424 */
7425 synchronize_net();
c0c050c5
MC
7426}
7427
7428static void bnxt_init_napi(struct bnxt *bp)
7429{
7430 int i;
10bbdaf5 7431 unsigned int cp_nr_rings = bp->cp_nr_rings;
c0c050c5
MC
7432 struct bnxt_napi *bnapi;
7433
7434 if (bp->flags & BNXT_FLAG_USING_MSIX) {
0fcec985
MC
7435 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
7436
7437 if (bp->flags & BNXT_FLAG_CHIP_P5)
7438 poll_fn = bnxt_poll_p5;
7439 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
10bbdaf5
PS
7440 cp_nr_rings--;
7441 for (i = 0; i < cp_nr_rings; i++) {
c0c050c5 7442 bnapi = bp->bnapi[i];
0fcec985 7443 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
c0c050c5 7444 }
10bbdaf5
PS
7445 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7446 bnapi = bp->bnapi[cp_nr_rings];
7447 netif_napi_add(bp->dev, &bnapi->napi,
7448 bnxt_poll_nitroa0, 64);
10bbdaf5 7449 }
c0c050c5
MC
7450 } else {
7451 bnapi = bp->bnapi[0];
7452 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
c0c050c5
MC
7453 }
7454}
7455
7456static void bnxt_disable_napi(struct bnxt *bp)
7457{
7458 int i;
7459
7460 if (!bp->bnapi)
7461 return;
7462
0bc0b97f
AG
7463 for (i = 0; i < bp->cp_nr_rings; i++) {
7464 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
7465
7466 if (bp->bnapi[i]->rx_ring)
7467 cancel_work_sync(&cpr->dim.work);
7468
c0c050c5 7469 napi_disable(&bp->bnapi[i]->napi);
0bc0b97f 7470 }
c0c050c5
MC
7471}
7472
7473static void bnxt_enable_napi(struct bnxt *bp)
7474{
7475 int i;
7476
7477 for (i = 0; i < bp->cp_nr_rings; i++) {
6a8788f2 7478 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
fa7e2812 7479 bp->bnapi[i]->in_reset = false;
6a8788f2
AG
7480
7481 if (bp->bnapi[i]->rx_ring) {
7482 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
7483 cpr->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
7484 }
c0c050c5
MC
7485 napi_enable(&bp->bnapi[i]->napi);
7486 }
7487}
7488
7df4ae9f 7489void bnxt_tx_disable(struct bnxt *bp)
c0c050c5
MC
7490{
7491 int i;
c0c050c5 7492 struct bnxt_tx_ring_info *txr;
c0c050c5 7493
b6ab4b01 7494 if (bp->tx_ring) {
c0c050c5 7495 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 7496 txr = &bp->tx_ring[i];
c0c050c5 7497 txr->dev_state = BNXT_DEV_STATE_CLOSING;
c0c050c5
MC
7498 }
7499 }
7500 /* Stop all TX queues */
7501 netif_tx_disable(bp->dev);
7502 netif_carrier_off(bp->dev);
7503}
7504
7df4ae9f 7505void bnxt_tx_enable(struct bnxt *bp)
c0c050c5
MC
7506{
7507 int i;
c0c050c5 7508 struct bnxt_tx_ring_info *txr;
c0c050c5
MC
7509
7510 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 7511 txr = &bp->tx_ring[i];
c0c050c5
MC
7512 txr->dev_state = 0;
7513 }
7514 netif_tx_wake_all_queues(bp->dev);
7515 if (bp->link_info.link_up)
7516 netif_carrier_on(bp->dev);
7517}
7518
7519static void bnxt_report_link(struct bnxt *bp)
7520{
7521 if (bp->link_info.link_up) {
7522 const char *duplex;
7523 const char *flow_ctrl;
38a21b34
DK
7524 u32 speed;
7525 u16 fec;
c0c050c5
MC
7526
7527 netif_carrier_on(bp->dev);
7528 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
7529 duplex = "full";
7530 else
7531 duplex = "half";
7532 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
7533 flow_ctrl = "ON - receive & transmit";
7534 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
7535 flow_ctrl = "ON - transmit";
7536 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
7537 flow_ctrl = "ON - receive";
7538 else
7539 flow_ctrl = "none";
7540 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
38a21b34 7541 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
c0c050c5 7542 speed, duplex, flow_ctrl);
170ce013
MC
7543 if (bp->flags & BNXT_FLAG_EEE_CAP)
7544 netdev_info(bp->dev, "EEE is %s\n",
7545 bp->eee.eee_active ? "active" :
7546 "not active");
e70c752f
MC
7547 fec = bp->link_info.fec_cfg;
7548 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
7549 netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
7550 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
7551 (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
7552 (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
c0c050c5
MC
7553 } else {
7554 netif_carrier_off(bp->dev);
7555 netdev_err(bp->dev, "NIC Link is Down\n");
7556 }
7557}
7558
170ce013
MC
7559static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
7560{
7561 int rc = 0;
7562 struct hwrm_port_phy_qcaps_input req = {0};
7563 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
93ed8117 7564 struct bnxt_link_info *link_info = &bp->link_info;
170ce013
MC
7565
7566 if (bp->hwrm_spec_code < 0x10201)
7567 return 0;
7568
7569 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
7570
7571 mutex_lock(&bp->hwrm_cmd_lock);
7572 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7573 if (rc)
7574 goto hwrm_phy_qcaps_exit;
7575
acb20054 7576 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
170ce013
MC
7577 struct ethtool_eee *eee = &bp->eee;
7578 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
7579
7580 bp->flags |= BNXT_FLAG_EEE_CAP;
7581 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
7582 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
7583 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
7584 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
7585 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
7586 }
55fd0cf3
MC
7587 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
7588 if (bp->test_info)
7589 bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
7590 }
520ad89a
MC
7591 if (resp->supported_speeds_auto_mode)
7592 link_info->support_auto_speeds =
7593 le16_to_cpu(resp->supported_speeds_auto_mode);
170ce013 7594
d5430d31
MC
7595 bp->port_count = resp->port_cnt;
7596
170ce013
MC
7597hwrm_phy_qcaps_exit:
7598 mutex_unlock(&bp->hwrm_cmd_lock);
7599 return rc;
7600}
7601
c0c050c5
MC
7602static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
7603{
7604 int rc = 0;
7605 struct bnxt_link_info *link_info = &bp->link_info;
7606 struct hwrm_port_phy_qcfg_input req = {0};
7607 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7608 u8 link_up = link_info->link_up;
286ef9d6 7609 u16 diff;
c0c050c5
MC
7610
7611 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
7612
7613 mutex_lock(&bp->hwrm_cmd_lock);
7614 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7615 if (rc) {
7616 mutex_unlock(&bp->hwrm_cmd_lock);
7617 return rc;
7618 }
7619
7620 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
7621 link_info->phy_link_status = resp->link;
acb20054
MC
7622 link_info->duplex = resp->duplex_cfg;
7623 if (bp->hwrm_spec_code >= 0x10800)
7624 link_info->duplex = resp->duplex_state;
c0c050c5
MC
7625 link_info->pause = resp->pause;
7626 link_info->auto_mode = resp->auto_mode;
7627 link_info->auto_pause_setting = resp->auto_pause;
3277360e 7628 link_info->lp_pause = resp->link_partner_adv_pause;
c0c050c5 7629 link_info->force_pause_setting = resp->force_pause;
acb20054 7630 link_info->duplex_setting = resp->duplex_cfg;
c0c050c5
MC
7631 if (link_info->phy_link_status == BNXT_LINK_LINK)
7632 link_info->link_speed = le16_to_cpu(resp->link_speed);
7633 else
7634 link_info->link_speed = 0;
7635 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
c0c050c5
MC
7636 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
7637 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
3277360e
MC
7638 link_info->lp_auto_link_speeds =
7639 le16_to_cpu(resp->link_partner_adv_speeds);
c0c050c5
MC
7640 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
7641 link_info->phy_ver[0] = resp->phy_maj;
7642 link_info->phy_ver[1] = resp->phy_min;
7643 link_info->phy_ver[2] = resp->phy_bld;
7644 link_info->media_type = resp->media_type;
03efbec0 7645 link_info->phy_type = resp->phy_type;
11f15ed3 7646 link_info->transceiver = resp->xcvr_pkg_type;
170ce013
MC
7647 link_info->phy_addr = resp->eee_config_phy_addr &
7648 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
42ee18fe 7649 link_info->module_status = resp->module_status;
170ce013
MC
7650
7651 if (bp->flags & BNXT_FLAG_EEE_CAP) {
7652 struct ethtool_eee *eee = &bp->eee;
7653 u16 fw_speeds;
7654
7655 eee->eee_active = 0;
7656 if (resp->eee_config_phy_addr &
7657 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
7658 eee->eee_active = 1;
7659 fw_speeds = le16_to_cpu(
7660 resp->link_partner_adv_eee_link_speed_mask);
7661 eee->lp_advertised =
7662 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
7663 }
7664
7665 /* Pull initial EEE config */
7666 if (!chng_link_state) {
7667 if (resp->eee_config_phy_addr &
7668 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
7669 eee->eee_enabled = 1;
c0c050c5 7670
170ce013
MC
7671 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
7672 eee->advertised =
7673 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
7674
7675 if (resp->eee_config_phy_addr &
7676 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
7677 __le32 tmr;
7678
7679 eee->tx_lpi_enabled = 1;
7680 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
7681 eee->tx_lpi_timer = le32_to_cpu(tmr) &
7682 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
7683 }
7684 }
7685 }
e70c752f
MC
7686
7687 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
7688 if (bp->hwrm_spec_code >= 0x10504)
7689 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
7690
c0c050c5
MC
7691 /* TODO: need to add more logic to report VF link */
7692 if (chng_link_state) {
7693 if (link_info->phy_link_status == BNXT_LINK_LINK)
7694 link_info->link_up = 1;
7695 else
7696 link_info->link_up = 0;
7697 if (link_up != link_info->link_up)
7698 bnxt_report_link(bp);
7699 } else {
7700 /* alwasy link down if not require to update link state */
7701 link_info->link_up = 0;
7702 }
7703 mutex_unlock(&bp->hwrm_cmd_lock);
286ef9d6 7704
dac04907
MC
7705 if (!BNXT_SINGLE_PF(bp))
7706 return 0;
7707
286ef9d6
MC
7708 diff = link_info->support_auto_speeds ^ link_info->advertising;
7709 if ((link_info->support_auto_speeds | diff) !=
7710 link_info->support_auto_speeds) {
7711 /* An advertised speed is no longer supported, so we need to
0eaa24b9
MC
7712 * update the advertisement settings. Caller holds RTNL
7713 * so we can modify link settings.
286ef9d6 7714 */
286ef9d6 7715 link_info->advertising = link_info->support_auto_speeds;
0eaa24b9 7716 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
286ef9d6 7717 bnxt_hwrm_set_link_setting(bp, true, false);
286ef9d6 7718 }
c0c050c5
MC
7719 return 0;
7720}
7721
10289bec
MC
7722static void bnxt_get_port_module_status(struct bnxt *bp)
7723{
7724 struct bnxt_link_info *link_info = &bp->link_info;
7725 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
7726 u8 module_status;
7727
7728 if (bnxt_update_link(bp, true))
7729 return;
7730
7731 module_status = link_info->module_status;
7732 switch (module_status) {
7733 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
7734 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
7735 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
7736 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
7737 bp->pf.port_id);
7738 if (bp->hwrm_spec_code >= 0x10201) {
7739 netdev_warn(bp->dev, "Module part number %s\n",
7740 resp->phy_vendor_partnumber);
7741 }
7742 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
7743 netdev_warn(bp->dev, "TX is disabled\n");
7744 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
7745 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
7746 }
7747}
7748
c0c050c5
MC
7749static void
7750bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
7751{
7752 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
c9ee9516
MC
7753 if (bp->hwrm_spec_code >= 0x10201)
7754 req->auto_pause =
7755 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
c0c050c5
MC
7756 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
7757 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
7758 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
49b5c7a1 7759 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
c0c050c5
MC
7760 req->enables |=
7761 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
7762 } else {
7763 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
7764 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
7765 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
7766 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
7767 req->enables |=
7768 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
c9ee9516
MC
7769 if (bp->hwrm_spec_code >= 0x10201) {
7770 req->auto_pause = req->force_pause;
7771 req->enables |= cpu_to_le32(
7772 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
7773 }
c0c050c5
MC
7774 }
7775}
7776
7777static void bnxt_hwrm_set_link_common(struct bnxt *bp,
7778 struct hwrm_port_phy_cfg_input *req)
7779{
7780 u8 autoneg = bp->link_info.autoneg;
7781 u16 fw_link_speed = bp->link_info.req_link_speed;
68515a18 7782 u16 advertising = bp->link_info.advertising;
c0c050c5
MC
7783
7784 if (autoneg & BNXT_AUTONEG_SPEED) {
7785 req->auto_mode |=
11f15ed3 7786 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
c0c050c5
MC
7787
7788 req->enables |= cpu_to_le32(
7789 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
7790 req->auto_link_speed_mask = cpu_to_le16(advertising);
7791
7792 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
7793 req->flags |=
7794 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
7795 } else {
7796 req->force_link_speed = cpu_to_le16(fw_link_speed);
7797 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
7798 }
7799
c0c050c5
MC
7800 /* tell chimp that the setting takes effect immediately */
7801 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
7802}
7803
7804int bnxt_hwrm_set_pause(struct bnxt *bp)
7805{
7806 struct hwrm_port_phy_cfg_input req = {0};
7807 int rc;
7808
7809 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
7810 bnxt_hwrm_set_pause_common(bp, &req);
7811
7812 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
7813 bp->link_info.force_link_chng)
7814 bnxt_hwrm_set_link_common(bp, &req);
7815
7816 mutex_lock(&bp->hwrm_cmd_lock);
7817 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7818 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
7819 /* since changing of pause setting doesn't trigger any link
7820 * change event, the driver needs to update the current pause
7821 * result upon successfully return of the phy_cfg command
7822 */
7823 bp->link_info.pause =
7824 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
7825 bp->link_info.auto_pause_setting = 0;
7826 if (!bp->link_info.force_link_chng)
7827 bnxt_report_link(bp);
7828 }
7829 bp->link_info.force_link_chng = false;
7830 mutex_unlock(&bp->hwrm_cmd_lock);
7831 return rc;
7832}
7833
939f7f0c
MC
7834static void bnxt_hwrm_set_eee(struct bnxt *bp,
7835 struct hwrm_port_phy_cfg_input *req)
7836{
7837 struct ethtool_eee *eee = &bp->eee;
7838
7839 if (eee->eee_enabled) {
7840 u16 eee_speeds;
7841 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
7842
7843 if (eee->tx_lpi_enabled)
7844 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
7845 else
7846 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
7847
7848 req->flags |= cpu_to_le32(flags);
7849 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
7850 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
7851 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
7852 } else {
7853 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
7854 }
7855}
7856
7857int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
c0c050c5
MC
7858{
7859 struct hwrm_port_phy_cfg_input req = {0};
7860
7861 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
7862 if (set_pause)
7863 bnxt_hwrm_set_pause_common(bp, &req);
7864
7865 bnxt_hwrm_set_link_common(bp, &req);
939f7f0c
MC
7866
7867 if (set_eee)
7868 bnxt_hwrm_set_eee(bp, &req);
c0c050c5
MC
7869 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7870}
7871
33f7d55f
MC
7872static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
7873{
7874 struct hwrm_port_phy_cfg_input req = {0};
7875
567b2abe 7876 if (!BNXT_SINGLE_PF(bp))
33f7d55f
MC
7877 return 0;
7878
7879 if (pci_num_vf(bp->pdev))
7880 return 0;
7881
7882 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
16d663a6 7883 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
33f7d55f
MC
7884 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7885}
7886
25e1acd6
MC
7887static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
7888{
7889 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
7890 struct hwrm_func_drv_if_change_input req = {0};
7891 bool resc_reinit = false;
7892 int rc;
7893
7894 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
7895 return 0;
7896
7897 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
7898 if (up)
7899 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
7900 mutex_lock(&bp->hwrm_cmd_lock);
7901 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7902 if (!rc && (resp->flags &
7903 cpu_to_le32(FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)))
7904 resc_reinit = true;
7905 mutex_unlock(&bp->hwrm_cmd_lock);
7906
7907 if (up && resc_reinit && BNXT_NEW_RM(bp)) {
7908 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7909
7910 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7911 hw_resc->resv_cp_rings = 0;
780baad4 7912 hw_resc->resv_stat_ctxs = 0;
75720e63 7913 hw_resc->resv_irqs = 0;
25e1acd6
MC
7914 hw_resc->resv_tx_rings = 0;
7915 hw_resc->resv_rx_rings = 0;
7916 hw_resc->resv_hw_ring_grps = 0;
7917 hw_resc->resv_vnics = 0;
6b95c3e9
MC
7918 bp->tx_nr_rings = 0;
7919 bp->rx_nr_rings = 0;
25e1acd6
MC
7920 }
7921 return rc;
7922}
7923
5ad2cbee
MC
7924static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
7925{
7926 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7927 struct hwrm_port_led_qcaps_input req = {0};
7928 struct bnxt_pf_info *pf = &bp->pf;
7929 int rc;
7930
7931 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
7932 return 0;
7933
7934 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
7935 req.port_id = cpu_to_le16(pf->port_id);
7936 mutex_lock(&bp->hwrm_cmd_lock);
7937 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7938 if (rc) {
7939 mutex_unlock(&bp->hwrm_cmd_lock);
7940 return rc;
7941 }
7942 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
7943 int i;
7944
7945 bp->num_leds = resp->num_leds;
7946 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
7947 bp->num_leds);
7948 for (i = 0; i < bp->num_leds; i++) {
7949 struct bnxt_led_info *led = &bp->leds[i];
7950 __le16 caps = led->led_state_caps;
7951
7952 if (!led->led_group_id ||
7953 !BNXT_LED_ALT_BLINK_CAP(caps)) {
7954 bp->num_leds = 0;
7955 break;
7956 }
7957 }
7958 }
7959 mutex_unlock(&bp->hwrm_cmd_lock);
7960 return 0;
7961}
7962
5282db6c
MC
7963int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
7964{
7965 struct hwrm_wol_filter_alloc_input req = {0};
7966 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
7967 int rc;
7968
7969 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
7970 req.port_id = cpu_to_le16(bp->pf.port_id);
7971 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
7972 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
7973 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
7974 mutex_lock(&bp->hwrm_cmd_lock);
7975 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7976 if (!rc)
7977 bp->wol_filter_id = resp->wol_filter_id;
7978 mutex_unlock(&bp->hwrm_cmd_lock);
7979 return rc;
7980}
7981
7982int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
7983{
7984 struct hwrm_wol_filter_free_input req = {0};
7985 int rc;
7986
7987 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
7988 req.port_id = cpu_to_le16(bp->pf.port_id);
7989 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
7990 req.wol_filter_id = bp->wol_filter_id;
7991 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7992 return rc;
7993}
7994
c1ef146a
MC
7995static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
7996{
7997 struct hwrm_wol_filter_qcfg_input req = {0};
7998 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7999 u16 next_handle = 0;
8000 int rc;
8001
8002 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
8003 req.port_id = cpu_to_le16(bp->pf.port_id);
8004 req.handle = cpu_to_le16(handle);
8005 mutex_lock(&bp->hwrm_cmd_lock);
8006 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8007 if (!rc) {
8008 next_handle = le16_to_cpu(resp->next_handle);
8009 if (next_handle != 0) {
8010 if (resp->wol_type ==
8011 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
8012 bp->wol = 1;
8013 bp->wol_filter_id = resp->wol_filter_id;
8014 }
8015 }
8016 }
8017 mutex_unlock(&bp->hwrm_cmd_lock);
8018 return next_handle;
8019}
8020
8021static void bnxt_get_wol_settings(struct bnxt *bp)
8022{
8023 u16 handle = 0;
8024
8025 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
8026 return;
8027
8028 do {
8029 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
8030 } while (handle && handle != 0xffff);
8031}
8032
cde49a42
VV
8033#ifdef CONFIG_BNXT_HWMON
8034static ssize_t bnxt_show_temp(struct device *dev,
8035 struct device_attribute *devattr, char *buf)
8036{
8037 struct hwrm_temp_monitor_query_input req = {0};
8038 struct hwrm_temp_monitor_query_output *resp;
8039 struct bnxt *bp = dev_get_drvdata(dev);
8040 u32 temp = 0;
8041
8042 resp = bp->hwrm_cmd_resp_addr;
8043 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
8044 mutex_lock(&bp->hwrm_cmd_lock);
8045 if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
8046 temp = resp->temp * 1000; /* display millidegree */
8047 mutex_unlock(&bp->hwrm_cmd_lock);
8048
8049 return sprintf(buf, "%u\n", temp);
8050}
8051static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
8052
8053static struct attribute *bnxt_attrs[] = {
8054 &sensor_dev_attr_temp1_input.dev_attr.attr,
8055 NULL
8056};
8057ATTRIBUTE_GROUPS(bnxt);
8058
8059static void bnxt_hwmon_close(struct bnxt *bp)
8060{
8061 if (bp->hwmon_dev) {
8062 hwmon_device_unregister(bp->hwmon_dev);
8063 bp->hwmon_dev = NULL;
8064 }
8065}
8066
8067static void bnxt_hwmon_open(struct bnxt *bp)
8068{
8069 struct pci_dev *pdev = bp->pdev;
8070
8071 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
8072 DRV_MODULE_NAME, bp,
8073 bnxt_groups);
8074 if (IS_ERR(bp->hwmon_dev)) {
8075 bp->hwmon_dev = NULL;
8076 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
8077 }
8078}
8079#else
8080static void bnxt_hwmon_close(struct bnxt *bp)
8081{
8082}
8083
8084static void bnxt_hwmon_open(struct bnxt *bp)
8085{
8086}
8087#endif
8088
939f7f0c
MC
8089static bool bnxt_eee_config_ok(struct bnxt *bp)
8090{
8091 struct ethtool_eee *eee = &bp->eee;
8092 struct bnxt_link_info *link_info = &bp->link_info;
8093
8094 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
8095 return true;
8096
8097 if (eee->eee_enabled) {
8098 u32 advertising =
8099 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
8100
8101 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
8102 eee->eee_enabled = 0;
8103 return false;
8104 }
8105 if (eee->advertised & ~advertising) {
8106 eee->advertised = advertising & eee->supported;
8107 return false;
8108 }
8109 }
8110 return true;
8111}
8112
c0c050c5
MC
8113static int bnxt_update_phy_setting(struct bnxt *bp)
8114{
8115 int rc;
8116 bool update_link = false;
8117 bool update_pause = false;
939f7f0c 8118 bool update_eee = false;
c0c050c5
MC
8119 struct bnxt_link_info *link_info = &bp->link_info;
8120
8121 rc = bnxt_update_link(bp, true);
8122 if (rc) {
8123 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
8124 rc);
8125 return rc;
8126 }
33dac24a
MC
8127 if (!BNXT_SINGLE_PF(bp))
8128 return 0;
8129
c0c050c5 8130 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
c9ee9516
MC
8131 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
8132 link_info->req_flow_ctrl)
c0c050c5
MC
8133 update_pause = true;
8134 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
8135 link_info->force_pause_setting != link_info->req_flow_ctrl)
8136 update_pause = true;
c0c050c5
MC
8137 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
8138 if (BNXT_AUTO_MODE(link_info->auto_mode))
8139 update_link = true;
8140 if (link_info->req_link_speed != link_info->force_link_speed)
8141 update_link = true;
de73018f
MC
8142 if (link_info->req_duplex != link_info->duplex_setting)
8143 update_link = true;
c0c050c5
MC
8144 } else {
8145 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
8146 update_link = true;
8147 if (link_info->advertising != link_info->auto_link_speeds)
8148 update_link = true;
c0c050c5
MC
8149 }
8150
16d663a6
MC
8151 /* The last close may have shutdown the link, so need to call
8152 * PHY_CFG to bring it back up.
8153 */
8154 if (!netif_carrier_ok(bp->dev))
8155 update_link = true;
8156
939f7f0c
MC
8157 if (!bnxt_eee_config_ok(bp))
8158 update_eee = true;
8159
c0c050c5 8160 if (update_link)
939f7f0c 8161 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
c0c050c5
MC
8162 else if (update_pause)
8163 rc = bnxt_hwrm_set_pause(bp);
8164 if (rc) {
8165 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
8166 rc);
8167 return rc;
8168 }
8169
8170 return rc;
8171}
8172
11809490
JH
8173/* Common routine to pre-map certain register block to different GRC window.
8174 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
8175 * in PF and 3 windows in VF that can be customized to map in different
8176 * register blocks.
8177 */
8178static void bnxt_preset_reg_win(struct bnxt *bp)
8179{
8180 if (BNXT_PF(bp)) {
8181 /* CAG registers map to GRC window #4 */
8182 writel(BNXT_CAG_REG_BASE,
8183 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
8184 }
8185}
8186
47558acd
MC
8187static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
8188
c0c050c5
MC
8189static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8190{
8191 int rc = 0;
8192
11809490 8193 bnxt_preset_reg_win(bp);
c0c050c5
MC
8194 netif_carrier_off(bp->dev);
8195 if (irq_re_init) {
47558acd
MC
8196 /* Reserve rings now if none were reserved at driver probe. */
8197 rc = bnxt_init_dflt_ring_mode(bp);
8198 if (rc) {
8199 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
8200 return rc;
8201 }
c0c050c5 8202 }
41e8d798
MC
8203 rc = bnxt_reserve_rings(bp);
8204 if (rc)
8205 return rc;
c0c050c5
MC
8206 if ((bp->flags & BNXT_FLAG_RFS) &&
8207 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
8208 /* disable RFS if falling back to INTA */
8209 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
8210 bp->flags &= ~BNXT_FLAG_RFS;
8211 }
8212
8213 rc = bnxt_alloc_mem(bp, irq_re_init);
8214 if (rc) {
8215 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
8216 goto open_err_free_mem;
8217 }
8218
8219 if (irq_re_init) {
8220 bnxt_init_napi(bp);
8221 rc = bnxt_request_irq(bp);
8222 if (rc) {
8223 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
c58387ab 8224 goto open_err_irq;
c0c050c5
MC
8225 }
8226 }
8227
8228 bnxt_enable_napi(bp);
cabfb09d 8229 bnxt_debug_dev_init(bp);
c0c050c5
MC
8230
8231 rc = bnxt_init_nic(bp, irq_re_init);
8232 if (rc) {
8233 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
8234 goto open_err;
8235 }
8236
8237 if (link_re_init) {
e2dc9b6e 8238 mutex_lock(&bp->link_lock);
c0c050c5 8239 rc = bnxt_update_phy_setting(bp);
e2dc9b6e 8240 mutex_unlock(&bp->link_lock);
a1ef4a79 8241 if (rc) {
ba41d46f 8242 netdev_warn(bp->dev, "failed to update phy settings\n");
a1ef4a79
MC
8243 if (BNXT_SINGLE_PF(bp)) {
8244 bp->link_info.phy_retry = true;
8245 bp->link_info.phy_retry_expires =
8246 jiffies + 5 * HZ;
8247 }
8248 }
c0c050c5
MC
8249 }
8250
7cdd5fc3 8251 if (irq_re_init)
ad51b8e9 8252 udp_tunnel_get_rx_info(bp->dev);
c0c050c5 8253
caefe526 8254 set_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
8255 bnxt_enable_int(bp);
8256 /* Enable TX queues */
8257 bnxt_tx_enable(bp);
8258 mod_timer(&bp->timer, jiffies + bp->current_interval);
10289bec
MC
8259 /* Poll link status and check for SFP+ module status */
8260 bnxt_get_port_module_status(bp);
c0c050c5 8261
ee5c7fb3
SP
8262 /* VF-reps may need to be re-opened after the PF is re-opened */
8263 if (BNXT_PF(bp))
8264 bnxt_vf_reps_open(bp);
c0c050c5
MC
8265 return 0;
8266
8267open_err:
cabfb09d 8268 bnxt_debug_dev_exit(bp);
c0c050c5 8269 bnxt_disable_napi(bp);
c58387ab
VG
8270
8271open_err_irq:
c0c050c5
MC
8272 bnxt_del_napi(bp);
8273
8274open_err_free_mem:
8275 bnxt_free_skbs(bp);
8276 bnxt_free_irq(bp);
8277 bnxt_free_mem(bp, true);
8278 return rc;
8279}
8280
8281/* rtnl_lock held */
8282int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8283{
8284 int rc = 0;
8285
8286 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
8287 if (rc) {
8288 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
8289 dev_close(bp->dev);
8290 }
8291 return rc;
8292}
8293
f7dc1ea6
MC
8294/* rtnl_lock held, open the NIC half way by allocating all resources, but
8295 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
8296 * self tests.
8297 */
8298int bnxt_half_open_nic(struct bnxt *bp)
8299{
8300 int rc = 0;
8301
8302 rc = bnxt_alloc_mem(bp, false);
8303 if (rc) {
8304 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
8305 goto half_open_err;
8306 }
8307 rc = bnxt_init_nic(bp, false);
8308 if (rc) {
8309 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
8310 goto half_open_err;
8311 }
8312 return 0;
8313
8314half_open_err:
8315 bnxt_free_skbs(bp);
8316 bnxt_free_mem(bp, false);
8317 dev_close(bp->dev);
8318 return rc;
8319}
8320
8321/* rtnl_lock held, this call can only be made after a previous successful
8322 * call to bnxt_half_open_nic().
8323 */
8324void bnxt_half_close_nic(struct bnxt *bp)
8325{
8326 bnxt_hwrm_resource_free(bp, false, false);
8327 bnxt_free_skbs(bp);
8328 bnxt_free_mem(bp, false);
8329}
8330
c0c050c5
MC
8331static int bnxt_open(struct net_device *dev)
8332{
8333 struct bnxt *bp = netdev_priv(dev);
25e1acd6 8334 int rc;
c0c050c5 8335
25e1acd6
MC
8336 bnxt_hwrm_if_change(bp, true);
8337 rc = __bnxt_open_nic(bp, true, true);
8338 if (rc)
8339 bnxt_hwrm_if_change(bp, false);
cde49a42
VV
8340
8341 bnxt_hwmon_open(bp);
8342
25e1acd6 8343 return rc;
c0c050c5
MC
8344}
8345
f9b76ebd
MC
8346static bool bnxt_drv_busy(struct bnxt *bp)
8347{
8348 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
8349 test_bit(BNXT_STATE_READ_STATS, &bp->state));
8350}
8351
b8875ca3
MC
8352static void bnxt_get_ring_stats(struct bnxt *bp,
8353 struct rtnl_link_stats64 *stats);
8354
86e953db
MC
8355static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
8356 bool link_re_init)
c0c050c5 8357{
ee5c7fb3
SP
8358 /* Close the VF-reps before closing PF */
8359 if (BNXT_PF(bp))
8360 bnxt_vf_reps_close(bp);
86e953db 8361
c0c050c5
MC
8362 /* Change device state to avoid TX queue wake up's */
8363 bnxt_tx_disable(bp);
8364
caefe526 8365 clear_bit(BNXT_STATE_OPEN, &bp->state);
4cebdcec 8366 smp_mb__after_atomic();
f9b76ebd 8367 while (bnxt_drv_busy(bp))
4cebdcec 8368 msleep(20);
c0c050c5 8369
9d8bc097 8370 /* Flush rings and and disable interrupts */
c0c050c5
MC
8371 bnxt_shutdown_nic(bp, irq_re_init);
8372
8373 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
8374
cabfb09d 8375 bnxt_debug_dev_exit(bp);
c0c050c5 8376 bnxt_disable_napi(bp);
c0c050c5
MC
8377 del_timer_sync(&bp->timer);
8378 bnxt_free_skbs(bp);
8379
b8875ca3
MC
8380 /* Save ring stats before shutdown */
8381 if (bp->bnapi)
8382 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
c0c050c5
MC
8383 if (irq_re_init) {
8384 bnxt_free_irq(bp);
8385 bnxt_del_napi(bp);
8386 }
8387 bnxt_free_mem(bp, irq_re_init);
86e953db
MC
8388}
8389
8390int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8391{
8392 int rc = 0;
8393
8394#ifdef CONFIG_BNXT_SRIOV
8395 if (bp->sriov_cfg) {
8396 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
8397 !bp->sriov_cfg,
8398 BNXT_SRIOV_CFG_WAIT_TMO);
8399 if (rc)
8400 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
8401 }
8402#endif
8403 __bnxt_close_nic(bp, irq_re_init, link_re_init);
c0c050c5
MC
8404 return rc;
8405}
8406
8407static int bnxt_close(struct net_device *dev)
8408{
8409 struct bnxt *bp = netdev_priv(dev);
8410
cde49a42 8411 bnxt_hwmon_close(bp);
c0c050c5 8412 bnxt_close_nic(bp, true, true);
33f7d55f 8413 bnxt_hwrm_shutdown_link(bp);
25e1acd6 8414 bnxt_hwrm_if_change(bp, false);
c0c050c5
MC
8415 return 0;
8416}
8417
8418/* rtnl_lock held */
8419static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8420{
8421 switch (cmd) {
8422 case SIOCGMIIPHY:
8423 /* fallthru */
8424 case SIOCGMIIREG: {
8425 if (!netif_running(dev))
8426 return -EAGAIN;
8427
8428 return 0;
8429 }
8430
8431 case SIOCSMIIREG:
8432 if (!netif_running(dev))
8433 return -EAGAIN;
8434
8435 return 0;
8436
8437 default:
8438 /* do nothing */
8439 break;
8440 }
8441 return -EOPNOTSUPP;
8442}
8443
b8875ca3
MC
8444static void bnxt_get_ring_stats(struct bnxt *bp,
8445 struct rtnl_link_stats64 *stats)
c0c050c5 8446{
b8875ca3 8447 int i;
c0c050c5 8448
c0c050c5 8449
c0c050c5
MC
8450 for (i = 0; i < bp->cp_nr_rings; i++) {
8451 struct bnxt_napi *bnapi = bp->bnapi[i];
8452 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8453 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
8454
8455 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
8456 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
8457 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
8458
8459 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
8460 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
8461 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
8462
8463 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
8464 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
8465 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
8466
8467 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
8468 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
8469 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
8470
8471 stats->rx_missed_errors +=
8472 le64_to_cpu(hw_stats->rx_discard_pkts);
8473
8474 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
8475
c0c050c5
MC
8476 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
8477 }
b8875ca3
MC
8478}
8479
8480static void bnxt_add_prev_stats(struct bnxt *bp,
8481 struct rtnl_link_stats64 *stats)
8482{
8483 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
8484
8485 stats->rx_packets += prev_stats->rx_packets;
8486 stats->tx_packets += prev_stats->tx_packets;
8487 stats->rx_bytes += prev_stats->rx_bytes;
8488 stats->tx_bytes += prev_stats->tx_bytes;
8489 stats->rx_missed_errors += prev_stats->rx_missed_errors;
8490 stats->multicast += prev_stats->multicast;
8491 stats->tx_dropped += prev_stats->tx_dropped;
8492}
8493
8494static void
8495bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
8496{
8497 struct bnxt *bp = netdev_priv(dev);
8498
8499 set_bit(BNXT_STATE_READ_STATS, &bp->state);
8500 /* Make sure bnxt_close_nic() sees that we are reading stats before
8501 * we check the BNXT_STATE_OPEN flag.
8502 */
8503 smp_mb__after_atomic();
8504 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
8505 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
8506 *stats = bp->net_stats_prev;
8507 return;
8508 }
8509
8510 bnxt_get_ring_stats(bp, stats);
8511 bnxt_add_prev_stats(bp, stats);
c0c050c5 8512
9947f83f
MC
8513 if (bp->flags & BNXT_FLAG_PORT_STATS) {
8514 struct rx_port_stats *rx = bp->hw_rx_port_stats;
8515 struct tx_port_stats *tx = bp->hw_tx_port_stats;
8516
8517 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
8518 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
8519 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
8520 le64_to_cpu(rx->rx_ovrsz_frames) +
8521 le64_to_cpu(rx->rx_runt_frames);
8522 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
8523 le64_to_cpu(rx->rx_jbr_frames);
8524 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
8525 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
8526 stats->tx_errors = le64_to_cpu(tx->tx_err);
8527 }
f9b76ebd 8528 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
c0c050c5
MC
8529}
8530
8531static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
8532{
8533 struct net_device *dev = bp->dev;
8534 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8535 struct netdev_hw_addr *ha;
8536 u8 *haddr;
8537 int mc_count = 0;
8538 bool update = false;
8539 int off = 0;
8540
8541 netdev_for_each_mc_addr(ha, dev) {
8542 if (mc_count >= BNXT_MAX_MC_ADDRS) {
8543 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8544 vnic->mc_list_count = 0;
8545 return false;
8546 }
8547 haddr = ha->addr;
8548 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
8549 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
8550 update = true;
8551 }
8552 off += ETH_ALEN;
8553 mc_count++;
8554 }
8555 if (mc_count)
8556 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
8557
8558 if (mc_count != vnic->mc_list_count) {
8559 vnic->mc_list_count = mc_count;
8560 update = true;
8561 }
8562 return update;
8563}
8564
8565static bool bnxt_uc_list_updated(struct bnxt *bp)
8566{
8567 struct net_device *dev = bp->dev;
8568 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8569 struct netdev_hw_addr *ha;
8570 int off = 0;
8571
8572 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
8573 return true;
8574
8575 netdev_for_each_uc_addr(ha, dev) {
8576 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
8577 return true;
8578
8579 off += ETH_ALEN;
8580 }
8581 return false;
8582}
8583
8584static void bnxt_set_rx_mode(struct net_device *dev)
8585{
8586 struct bnxt *bp = netdev_priv(dev);
8587 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8588 u32 mask = vnic->rx_mask;
8589 bool mc_update = false;
8590 bool uc_update;
8591
8592 if (!netif_running(dev))
8593 return;
8594
8595 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
8596 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
30e33848
MC
8597 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
8598 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
c0c050c5 8599
17c71ac3 8600 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
c0c050c5
MC
8601 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8602
8603 uc_update = bnxt_uc_list_updated(bp);
8604
30e33848
MC
8605 if (dev->flags & IFF_BROADCAST)
8606 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
c0c050c5
MC
8607 if (dev->flags & IFF_ALLMULTI) {
8608 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8609 vnic->mc_list_count = 0;
8610 } else {
8611 mc_update = bnxt_mc_list_updated(bp, &mask);
8612 }
8613
8614 if (mask != vnic->rx_mask || uc_update || mc_update) {
8615 vnic->rx_mask = mask;
8616
8617 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
c213eae8 8618 bnxt_queue_sp_work(bp);
c0c050c5
MC
8619 }
8620}
8621
b664f008 8622static int bnxt_cfg_rx_mode(struct bnxt *bp)
c0c050c5
MC
8623{
8624 struct net_device *dev = bp->dev;
8625 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8626 struct netdev_hw_addr *ha;
8627 int i, off = 0, rc;
8628 bool uc_update;
8629
8630 netif_addr_lock_bh(dev);
8631 uc_update = bnxt_uc_list_updated(bp);
8632 netif_addr_unlock_bh(dev);
8633
8634 if (!uc_update)
8635 goto skip_uc;
8636
8637 mutex_lock(&bp->hwrm_cmd_lock);
8638 for (i = 1; i < vnic->uc_filter_count; i++) {
8639 struct hwrm_cfa_l2_filter_free_input req = {0};
8640
8641 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
8642 -1);
8643
8644 req.l2_filter_id = vnic->fw_l2_filter_id[i];
8645
8646 rc = _hwrm_send_message(bp, &req, sizeof(req),
8647 HWRM_CMD_TIMEOUT);
8648 }
8649 mutex_unlock(&bp->hwrm_cmd_lock);
8650
8651 vnic->uc_filter_count = 1;
8652
8653 netif_addr_lock_bh(dev);
8654 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
8655 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8656 } else {
8657 netdev_for_each_uc_addr(ha, dev) {
8658 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
8659 off += ETH_ALEN;
8660 vnic->uc_filter_count++;
8661 }
8662 }
8663 netif_addr_unlock_bh(dev);
8664
8665 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
8666 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
8667 if (rc) {
8668 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
8669 rc);
8670 vnic->uc_filter_count = i;
b664f008 8671 return rc;
c0c050c5
MC
8672 }
8673 }
8674
8675skip_uc:
8676 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
8677 if (rc)
8678 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
8679 rc);
b664f008
MC
8680
8681 return rc;
c0c050c5
MC
8682}
8683
2773dfb2
MC
8684static bool bnxt_can_reserve_rings(struct bnxt *bp)
8685{
8686#ifdef CONFIG_BNXT_SRIOV
f1ca94de 8687 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
2773dfb2
MC
8688 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8689
8690 /* No minimum rings were provisioned by the PF. Don't
8691 * reserve rings by default when device is down.
8692 */
8693 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
8694 return true;
8695
8696 if (!netif_running(bp->dev))
8697 return false;
8698 }
8699#endif
8700 return true;
8701}
8702
8079e8f1
MC
8703/* If the chip and firmware supports RFS */
8704static bool bnxt_rfs_supported(struct bnxt *bp)
8705{
41e8d798
MC
8706 if (bp->flags & BNXT_FLAG_CHIP_P5)
8707 return false;
8079e8f1
MC
8708 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
8709 return true;
ae10ae74
MC
8710 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8711 return true;
8079e8f1
MC
8712 return false;
8713}
8714
8715/* If runtime conditions support RFS */
2bcfa6f6
MC
8716static bool bnxt_rfs_capable(struct bnxt *bp)
8717{
8718#ifdef CONFIG_RFS_ACCEL
8079e8f1 8719 int vnics, max_vnics, max_rss_ctxs;
2bcfa6f6 8720
41e8d798
MC
8721 if (bp->flags & BNXT_FLAG_CHIP_P5)
8722 return false;
2773dfb2 8723 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
2bcfa6f6
MC
8724 return false;
8725
8726 vnics = 1 + bp->rx_nr_rings;
8079e8f1
MC
8727 max_vnics = bnxt_get_max_func_vnics(bp);
8728 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
ae10ae74
MC
8729
8730 /* RSS contexts not a limiting factor */
8731 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8732 max_rss_ctxs = max_vnics;
8079e8f1 8733 if (vnics > max_vnics || vnics > max_rss_ctxs) {
6a1eef5b
MC
8734 if (bp->rx_nr_rings > 1)
8735 netdev_warn(bp->dev,
8736 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
8737 min(max_rss_ctxs - 1, max_vnics - 1));
2bcfa6f6 8738 return false;
a2304909 8739 }
2bcfa6f6 8740
f1ca94de 8741 if (!BNXT_NEW_RM(bp))
6a1eef5b
MC
8742 return true;
8743
8744 if (vnics == bp->hw_resc.resv_vnics)
8745 return true;
8746
780baad4 8747 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
6a1eef5b
MC
8748 if (vnics <= bp->hw_resc.resv_vnics)
8749 return true;
8750
8751 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
780baad4 8752 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
6a1eef5b 8753 return false;
2bcfa6f6
MC
8754#else
8755 return false;
8756#endif
8757}
8758
c0c050c5
MC
8759static netdev_features_t bnxt_fix_features(struct net_device *dev,
8760 netdev_features_t features)
8761{
2bcfa6f6
MC
8762 struct bnxt *bp = netdev_priv(dev);
8763
a2304909 8764 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
2bcfa6f6 8765 features &= ~NETIF_F_NTUPLE;
5a9f6b23 8766
1054aee8
MC
8767 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
8768 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
8769
8770 if (!(features & NETIF_F_GRO))
8771 features &= ~NETIF_F_GRO_HW;
8772
8773 if (features & NETIF_F_GRO_HW)
8774 features &= ~NETIF_F_LRO;
8775
5a9f6b23
MC
8776 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
8777 * turned on or off together.
8778 */
8779 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
8780 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
8781 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
8782 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
8783 NETIF_F_HW_VLAN_STAG_RX);
8784 else
8785 features |= NETIF_F_HW_VLAN_CTAG_RX |
8786 NETIF_F_HW_VLAN_STAG_RX;
8787 }
cf6645f8
MC
8788#ifdef CONFIG_BNXT_SRIOV
8789 if (BNXT_VF(bp)) {
8790 if (bp->vf.vlan) {
8791 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
8792 NETIF_F_HW_VLAN_STAG_RX);
8793 }
8794 }
8795#endif
c0c050c5
MC
8796 return features;
8797}
8798
8799static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
8800{
8801 struct bnxt *bp = netdev_priv(dev);
8802 u32 flags = bp->flags;
8803 u32 changes;
8804 int rc = 0;
8805 bool re_init = false;
8806 bool update_tpa = false;
8807
8808 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
1054aee8 8809 if (features & NETIF_F_GRO_HW)
c0c050c5 8810 flags |= BNXT_FLAG_GRO;
1054aee8 8811 else if (features & NETIF_F_LRO)
c0c050c5
MC
8812 flags |= BNXT_FLAG_LRO;
8813
bdbd1eb5
MC
8814 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
8815 flags &= ~BNXT_FLAG_TPA;
8816
c0c050c5
MC
8817 if (features & NETIF_F_HW_VLAN_CTAG_RX)
8818 flags |= BNXT_FLAG_STRIP_VLAN;
8819
8820 if (features & NETIF_F_NTUPLE)
8821 flags |= BNXT_FLAG_RFS;
8822
8823 changes = flags ^ bp->flags;
8824 if (changes & BNXT_FLAG_TPA) {
8825 update_tpa = true;
8826 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
8827 (flags & BNXT_FLAG_TPA) == 0)
8828 re_init = true;
8829 }
8830
8831 if (changes & ~BNXT_FLAG_TPA)
8832 re_init = true;
8833
8834 if (flags != bp->flags) {
8835 u32 old_flags = bp->flags;
8836
8837 bp->flags = flags;
8838
2bcfa6f6 8839 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
c0c050c5
MC
8840 if (update_tpa)
8841 bnxt_set_ring_params(bp);
8842 return rc;
8843 }
8844
8845 if (re_init) {
8846 bnxt_close_nic(bp, false, false);
8847 if (update_tpa)
8848 bnxt_set_ring_params(bp);
8849
8850 return bnxt_open_nic(bp, false, false);
8851 }
8852 if (update_tpa) {
8853 rc = bnxt_set_tpa(bp,
8854 (flags & BNXT_FLAG_TPA) ?
8855 true : false);
8856 if (rc)
8857 bp->flags = old_flags;
8858 }
8859 }
8860 return rc;
8861}
8862
ffd77621
MC
8863static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
8864 u32 ring_id, u32 *prod, u32 *cons)
8865{
8866 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
8867 struct hwrm_dbg_ring_info_get_input req = {0};
8868 int rc;
8869
8870 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
8871 req.ring_type = ring_type;
8872 req.fw_ring_id = cpu_to_le32(ring_id);
8873 mutex_lock(&bp->hwrm_cmd_lock);
8874 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8875 if (!rc) {
8876 *prod = le32_to_cpu(resp->producer_index);
8877 *cons = le32_to_cpu(resp->consumer_index);
8878 }
8879 mutex_unlock(&bp->hwrm_cmd_lock);
8880 return rc;
8881}
8882
9f554590
MC
8883static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
8884{
b6ab4b01 8885 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
9f554590
MC
8886 int i = bnapi->index;
8887
3b2b7d9d
MC
8888 if (!txr)
8889 return;
8890
9f554590
MC
8891 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
8892 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
8893 txr->tx_cons);
8894}
8895
8896static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
8897{
b6ab4b01 8898 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
9f554590
MC
8899 int i = bnapi->index;
8900
3b2b7d9d
MC
8901 if (!rxr)
8902 return;
8903
9f554590
MC
8904 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
8905 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
8906 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
8907 rxr->rx_sw_agg_prod);
8908}
8909
8910static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
8911{
8912 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8913 int i = bnapi->index;
8914
8915 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
8916 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
8917}
8918
c0c050c5
MC
8919static void bnxt_dbg_dump_states(struct bnxt *bp)
8920{
8921 int i;
8922 struct bnxt_napi *bnapi;
c0c050c5
MC
8923
8924 for (i = 0; i < bp->cp_nr_rings; i++) {
8925 bnapi = bp->bnapi[i];
c0c050c5 8926 if (netif_msg_drv(bp)) {
9f554590
MC
8927 bnxt_dump_tx_sw_state(bnapi);
8928 bnxt_dump_rx_sw_state(bnapi);
8929 bnxt_dump_cp_sw_state(bnapi);
c0c050c5
MC
8930 }
8931 }
8932}
8933
6988bd92 8934static void bnxt_reset_task(struct bnxt *bp, bool silent)
c0c050c5 8935{
6988bd92
MC
8936 if (!silent)
8937 bnxt_dbg_dump_states(bp);
028de140 8938 if (netif_running(bp->dev)) {
b386cd36
MC
8939 int rc;
8940
8941 if (!silent)
8942 bnxt_ulp_stop(bp);
028de140 8943 bnxt_close_nic(bp, false, false);
b386cd36
MC
8944 rc = bnxt_open_nic(bp, false, false);
8945 if (!silent && !rc)
8946 bnxt_ulp_start(bp);
028de140 8947 }
c0c050c5
MC
8948}
8949
8950static void bnxt_tx_timeout(struct net_device *dev)
8951{
8952 struct bnxt *bp = netdev_priv(dev);
8953
8954 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
8955 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
c213eae8 8956 bnxt_queue_sp_work(bp);
c0c050c5
MC
8957}
8958
e99e88a9 8959static void bnxt_timer(struct timer_list *t)
c0c050c5 8960{
e99e88a9 8961 struct bnxt *bp = from_timer(bp, t, timer);
c0c050c5
MC
8962 struct net_device *dev = bp->dev;
8963
8964 if (!netif_running(dev))
8965 return;
8966
8967 if (atomic_read(&bp->intr_sem) != 0)
8968 goto bnxt_restart_timer;
8969
adcc331e
MC
8970 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
8971 bp->stats_coal_ticks) {
3bdf56c4 8972 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
c213eae8 8973 bnxt_queue_sp_work(bp);
3bdf56c4 8974 }
5a84acbe
SP
8975
8976 if (bnxt_tc_flower_enabled(bp)) {
8977 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
8978 bnxt_queue_sp_work(bp);
8979 }
a1ef4a79
MC
8980
8981 if (bp->link_info.phy_retry) {
8982 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
8983 bp->link_info.phy_retry = 0;
8984 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
8985 } else {
8986 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
8987 bnxt_queue_sp_work(bp);
8988 }
8989 }
ffd77621
MC
8990
8991 if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) {
8992 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
8993 bnxt_queue_sp_work(bp);
8994 }
c0c050c5
MC
8995bnxt_restart_timer:
8996 mod_timer(&bp->timer, jiffies + bp->current_interval);
8997}
8998
a551ee94 8999static void bnxt_rtnl_lock_sp(struct bnxt *bp)
6988bd92 9000{
a551ee94
MC
9001 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
9002 * set. If the device is being closed, bnxt_close() may be holding
6988bd92
MC
9003 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
9004 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
9005 */
9006 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9007 rtnl_lock();
a551ee94
MC
9008}
9009
9010static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
9011{
6988bd92
MC
9012 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9013 rtnl_unlock();
9014}
9015
a551ee94
MC
9016/* Only called from bnxt_sp_task() */
9017static void bnxt_reset(struct bnxt *bp, bool silent)
9018{
9019 bnxt_rtnl_lock_sp(bp);
9020 if (test_bit(BNXT_STATE_OPEN, &bp->state))
9021 bnxt_reset_task(bp, silent);
9022 bnxt_rtnl_unlock_sp(bp);
9023}
9024
ffd77621
MC
9025static void bnxt_chk_missed_irq(struct bnxt *bp)
9026{
9027 int i;
9028
9029 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
9030 return;
9031
9032 for (i = 0; i < bp->cp_nr_rings; i++) {
9033 struct bnxt_napi *bnapi = bp->bnapi[i];
9034 struct bnxt_cp_ring_info *cpr;
9035 u32 fw_ring_id;
9036 int j;
9037
9038 if (!bnapi)
9039 continue;
9040
9041 cpr = &bnapi->cp_ring;
9042 for (j = 0; j < 2; j++) {
9043 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
9044 u32 val[2];
9045
9046 if (!cpr2 || cpr2->has_more_work ||
9047 !bnxt_has_work(bp, cpr2))
9048 continue;
9049
9050 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
9051 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
9052 continue;
9053 }
9054 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
9055 bnxt_dbg_hwrm_ring_info_get(bp,
9056 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
9057 fw_ring_id, &val[0], &val[1]);
83eb5c5c 9058 cpr->missed_irqs++;
ffd77621
MC
9059 }
9060 }
9061}
9062
c0c050c5
MC
9063static void bnxt_cfg_ntp_filters(struct bnxt *);
9064
9065static void bnxt_sp_task(struct work_struct *work)
9066{
9067 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
c0c050c5 9068
4cebdcec
MC
9069 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9070 smp_mb__after_atomic();
9071 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9072 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5 9073 return;
4cebdcec 9074 }
c0c050c5
MC
9075
9076 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
9077 bnxt_cfg_rx_mode(bp);
9078
9079 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
9080 bnxt_cfg_ntp_filters(bp);
c0c050c5
MC
9081 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
9082 bnxt_hwrm_exec_fwd_req(bp);
9083 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
9084 bnxt_hwrm_tunnel_dst_port_alloc(
9085 bp, bp->vxlan_port,
9086 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
9087 }
9088 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
9089 bnxt_hwrm_tunnel_dst_port_free(
9090 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
9091 }
7cdd5fc3
AD
9092 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
9093 bnxt_hwrm_tunnel_dst_port_alloc(
9094 bp, bp->nge_port,
9095 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
9096 }
9097 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
9098 bnxt_hwrm_tunnel_dst_port_free(
9099 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
9100 }
00db3cba 9101 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
3bdf56c4 9102 bnxt_hwrm_port_qstats(bp);
00db3cba
VV
9103 bnxt_hwrm_port_qstats_ext(bp);
9104 }
3bdf56c4 9105
0eaa24b9 9106 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
e2dc9b6e 9107 int rc;
0eaa24b9 9108
e2dc9b6e 9109 mutex_lock(&bp->link_lock);
0eaa24b9
MC
9110 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
9111 &bp->sp_event))
9112 bnxt_hwrm_phy_qcaps(bp);
9113
e2dc9b6e
MC
9114 rc = bnxt_update_link(bp, true);
9115 mutex_unlock(&bp->link_lock);
0eaa24b9
MC
9116 if (rc)
9117 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
9118 rc);
9119 }
a1ef4a79
MC
9120 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
9121 int rc;
9122
9123 mutex_lock(&bp->link_lock);
9124 rc = bnxt_update_phy_setting(bp);
9125 mutex_unlock(&bp->link_lock);
9126 if (rc) {
9127 netdev_warn(bp->dev, "update phy settings retry failed\n");
9128 } else {
9129 bp->link_info.phy_retry = false;
9130 netdev_info(bp->dev, "update phy settings retry succeeded\n");
9131 }
9132 }
90c694bb 9133 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
e2dc9b6e
MC
9134 mutex_lock(&bp->link_lock);
9135 bnxt_get_port_module_status(bp);
9136 mutex_unlock(&bp->link_lock);
90c694bb 9137 }
5a84acbe
SP
9138
9139 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
9140 bnxt_tc_flow_stats_work(bp);
9141
ffd77621
MC
9142 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
9143 bnxt_chk_missed_irq(bp);
9144
e2dc9b6e
MC
9145 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
9146 * must be the last functions to be called before exiting.
9147 */
6988bd92
MC
9148 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
9149 bnxt_reset(bp, false);
4cebdcec 9150
fc0f1929
MC
9151 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
9152 bnxt_reset(bp, true);
9153
4cebdcec
MC
9154 smp_mb__before_atomic();
9155 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5
MC
9156}
9157
d1e7925e 9158/* Under rtnl_lock */
98fdbe73
MC
9159int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
9160 int tx_xdp)
d1e7925e
MC
9161{
9162 int max_rx, max_tx, tx_sets = 1;
780baad4 9163 int tx_rings_needed, stats;
8f23d638 9164 int rx_rings = rx;
6fc2ffdf 9165 int cp, vnics, rc;
d1e7925e 9166
d1e7925e
MC
9167 if (tcs)
9168 tx_sets = tcs;
9169
9170 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
9171 if (rc)
9172 return rc;
9173
9174 if (max_rx < rx)
9175 return -ENOMEM;
9176
5f449249 9177 tx_rings_needed = tx * tx_sets + tx_xdp;
d1e7925e
MC
9178 if (max_tx < tx_rings_needed)
9179 return -ENOMEM;
9180
6fc2ffdf
EW
9181 vnics = 1;
9182 if (bp->flags & BNXT_FLAG_RFS)
9183 vnics += rx_rings;
9184
8f23d638
MC
9185 if (bp->flags & BNXT_FLAG_AGG_RINGS)
9186 rx_rings <<= 1;
9187 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
780baad4
VV
9188 stats = cp;
9189 if (BNXT_NEW_RM(bp)) {
11c3ec7b 9190 cp += bnxt_get_ulp_msix_num(bp);
780baad4
VV
9191 stats += bnxt_get_ulp_stat_ctxs(bp);
9192 }
6fc2ffdf 9193 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
780baad4 9194 stats, vnics);
d1e7925e
MC
9195}
9196
17086399
SP
9197static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
9198{
9199 if (bp->bar2) {
9200 pci_iounmap(pdev, bp->bar2);
9201 bp->bar2 = NULL;
9202 }
9203
9204 if (bp->bar1) {
9205 pci_iounmap(pdev, bp->bar1);
9206 bp->bar1 = NULL;
9207 }
9208
9209 if (bp->bar0) {
9210 pci_iounmap(pdev, bp->bar0);
9211 bp->bar0 = NULL;
9212 }
9213}
9214
9215static void bnxt_cleanup_pci(struct bnxt *bp)
9216{
9217 bnxt_unmap_bars(bp, bp->pdev);
9218 pci_release_regions(bp->pdev);
9219 pci_disable_device(bp->pdev);
9220}
9221
18775aa8
MC
9222static void bnxt_init_dflt_coal(struct bnxt *bp)
9223{
9224 struct bnxt_coal *coal;
9225
9226 /* Tick values in micro seconds.
9227 * 1 coal_buf x bufs_per_record = 1 completion record.
9228 */
9229 coal = &bp->rx_coal;
9230 coal->coal_ticks = 14;
9231 coal->coal_bufs = 30;
9232 coal->coal_ticks_irq = 1;
9233 coal->coal_bufs_irq = 2;
05abe4dd 9234 coal->idle_thresh = 50;
18775aa8
MC
9235 coal->bufs_per_record = 2;
9236 coal->budget = 64; /* NAPI budget */
9237
9238 coal = &bp->tx_coal;
9239 coal->coal_ticks = 28;
9240 coal->coal_bufs = 30;
9241 coal->coal_ticks_irq = 2;
9242 coal->coal_bufs_irq = 2;
9243 coal->bufs_per_record = 1;
9244
9245 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
9246}
9247
c0c050c5
MC
9248static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
9249{
9250 int rc;
9251 struct bnxt *bp = netdev_priv(dev);
9252
9253 SET_NETDEV_DEV(dev, &pdev->dev);
9254
9255 /* enable device (incl. PCI PM wakeup), and bus-mastering */
9256 rc = pci_enable_device(pdev);
9257 if (rc) {
9258 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
9259 goto init_err;
9260 }
9261
9262 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9263 dev_err(&pdev->dev,
9264 "Cannot find PCI device base address, aborting\n");
9265 rc = -ENODEV;
9266 goto init_err_disable;
9267 }
9268
9269 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9270 if (rc) {
9271 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
9272 goto init_err_disable;
9273 }
9274
9275 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
9276 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
9277 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
9278 goto init_err_disable;
9279 }
9280
9281 pci_set_master(pdev);
9282
9283 bp->dev = dev;
9284 bp->pdev = pdev;
9285
9286 bp->bar0 = pci_ioremap_bar(pdev, 0);
9287 if (!bp->bar0) {
9288 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
9289 rc = -ENOMEM;
9290 goto init_err_release;
9291 }
9292
9293 bp->bar1 = pci_ioremap_bar(pdev, 2);
9294 if (!bp->bar1) {
9295 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
9296 rc = -ENOMEM;
9297 goto init_err_release;
9298 }
9299
9300 bp->bar2 = pci_ioremap_bar(pdev, 4);
9301 if (!bp->bar2) {
9302 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
9303 rc = -ENOMEM;
9304 goto init_err_release;
9305 }
9306
6316ea6d
SB
9307 pci_enable_pcie_error_reporting(pdev);
9308
c0c050c5
MC
9309 INIT_WORK(&bp->sp_task, bnxt_sp_task);
9310
9311 spin_lock_init(&bp->ntp_fltr_lock);
697197e5
MC
9312#if BITS_PER_LONG == 32
9313 spin_lock_init(&bp->db_lock);
9314#endif
c0c050c5
MC
9315
9316 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
9317 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
9318
18775aa8 9319 bnxt_init_dflt_coal(bp);
51f30785 9320
e99e88a9 9321 timer_setup(&bp->timer, bnxt_timer, 0);
c0c050c5
MC
9322 bp->current_interval = BNXT_TIMER_INTERVAL;
9323
caefe526 9324 clear_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
9325 return 0;
9326
9327init_err_release:
17086399 9328 bnxt_unmap_bars(bp, pdev);
c0c050c5
MC
9329 pci_release_regions(pdev);
9330
9331init_err_disable:
9332 pci_disable_device(pdev);
9333
9334init_err:
9335 return rc;
9336}
9337
9338/* rtnl_lock held */
9339static int bnxt_change_mac_addr(struct net_device *dev, void *p)
9340{
9341 struct sockaddr *addr = p;
1fc2cfd0
JH
9342 struct bnxt *bp = netdev_priv(dev);
9343 int rc = 0;
c0c050c5
MC
9344
9345 if (!is_valid_ether_addr(addr->sa_data))
9346 return -EADDRNOTAVAIL;
9347
c1a7bdff
MC
9348 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
9349 return 0;
9350
28ea334b 9351 rc = bnxt_approve_mac(bp, addr->sa_data, true);
84c33dd3
MC
9352 if (rc)
9353 return rc;
bdd4347b 9354
c0c050c5 9355 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1fc2cfd0
JH
9356 if (netif_running(dev)) {
9357 bnxt_close_nic(bp, false, false);
9358 rc = bnxt_open_nic(bp, false, false);
9359 }
c0c050c5 9360
1fc2cfd0 9361 return rc;
c0c050c5
MC
9362}
9363
9364/* rtnl_lock held */
9365static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
9366{
9367 struct bnxt *bp = netdev_priv(dev);
9368
c0c050c5
MC
9369 if (netif_running(dev))
9370 bnxt_close_nic(bp, false, false);
9371
9372 dev->mtu = new_mtu;
9373 bnxt_set_ring_params(bp);
9374
9375 if (netif_running(dev))
9376 return bnxt_open_nic(bp, false, false);
9377
9378 return 0;
9379}
9380
c5e3deb8 9381int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
c0c050c5
MC
9382{
9383 struct bnxt *bp = netdev_priv(dev);
3ffb6a39 9384 bool sh = false;
d1e7925e 9385 int rc;
16e5cc64 9386
c0c050c5 9387 if (tc > bp->max_tc) {
b451c8b6 9388 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
c0c050c5
MC
9389 tc, bp->max_tc);
9390 return -EINVAL;
9391 }
9392
9393 if (netdev_get_num_tc(dev) == tc)
9394 return 0;
9395
3ffb6a39
MC
9396 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
9397 sh = true;
9398
98fdbe73
MC
9399 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
9400 sh, tc, bp->tx_nr_rings_xdp);
d1e7925e
MC
9401 if (rc)
9402 return rc;
c0c050c5
MC
9403
9404 /* Needs to close the device and do hw resource re-allocations */
9405 if (netif_running(bp->dev))
9406 bnxt_close_nic(bp, true, false);
9407
9408 if (tc) {
9409 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
9410 netdev_set_num_tc(dev, tc);
9411 } else {
9412 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
9413 netdev_reset_tc(dev);
9414 }
87e9b377 9415 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
3ffb6a39
MC
9416 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
9417 bp->tx_nr_rings + bp->rx_nr_rings;
c0c050c5
MC
9418
9419 if (netif_running(bp->dev))
9420 return bnxt_open_nic(bp, true, false);
9421
9422 return 0;
9423}
9424
9e0fd15d
JP
9425static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
9426 void *cb_priv)
c5e3deb8 9427{
9e0fd15d 9428 struct bnxt *bp = cb_priv;
de4784ca 9429
312324f1
JK
9430 if (!bnxt_tc_flower_enabled(bp) ||
9431 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
38cf0426 9432 return -EOPNOTSUPP;
c5e3deb8 9433
9e0fd15d
JP
9434 switch (type) {
9435 case TC_SETUP_CLSFLOWER:
9436 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
9437 default:
9438 return -EOPNOTSUPP;
9439 }
9440}
9441
9442static int bnxt_setup_tc_block(struct net_device *dev,
9443 struct tc_block_offload *f)
9444{
9445 struct bnxt *bp = netdev_priv(dev);
9446
9447 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
9448 return -EOPNOTSUPP;
9449
9450 switch (f->command) {
9451 case TC_BLOCK_BIND:
9452 return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb,
60513bd8 9453 bp, bp, f->extack);
9e0fd15d
JP
9454 case TC_BLOCK_UNBIND:
9455 tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp);
9456 return 0;
9457 default:
9458 return -EOPNOTSUPP;
9459 }
2ae7408f
SP
9460}
9461
9462static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
9463 void *type_data)
9464{
9465 switch (type) {
9e0fd15d
JP
9466 case TC_SETUP_BLOCK:
9467 return bnxt_setup_tc_block(dev, type_data);
575ed7d3 9468 case TC_SETUP_QDISC_MQPRIO: {
2ae7408f
SP
9469 struct tc_mqprio_qopt *mqprio = type_data;
9470
9471 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
56f36acd 9472
2ae7408f
SP
9473 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
9474 }
9475 default:
9476 return -EOPNOTSUPP;
9477 }
c5e3deb8
MC
9478}
9479
c0c050c5
MC
9480#ifdef CONFIG_RFS_ACCEL
9481static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
9482 struct bnxt_ntuple_filter *f2)
9483{
9484 struct flow_keys *keys1 = &f1->fkeys;
9485 struct flow_keys *keys2 = &f2->fkeys;
9486
9487 if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
9488 keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
9489 keys1->ports.ports == keys2->ports.ports &&
9490 keys1->basic.ip_proto == keys2->basic.ip_proto &&
9491 keys1->basic.n_proto == keys2->basic.n_proto &&
61aad724 9492 keys1->control.flags == keys2->control.flags &&
a54c4d74
MC
9493 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
9494 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
c0c050c5
MC
9495 return true;
9496
9497 return false;
9498}
9499
9500static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
9501 u16 rxq_index, u32 flow_id)
9502{
9503 struct bnxt *bp = netdev_priv(dev);
9504 struct bnxt_ntuple_filter *fltr, *new_fltr;
9505 struct flow_keys *fkeys;
9506 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
a54c4d74 9507 int rc = 0, idx, bit_id, l2_idx = 0;
c0c050c5
MC
9508 struct hlist_head *head;
9509
a54c4d74
MC
9510 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
9511 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9512 int off = 0, j;
9513
9514 netif_addr_lock_bh(dev);
9515 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
9516 if (ether_addr_equal(eth->h_dest,
9517 vnic->uc_list + off)) {
9518 l2_idx = j + 1;
9519 break;
9520 }
9521 }
9522 netif_addr_unlock_bh(dev);
9523 if (!l2_idx)
9524 return -EINVAL;
9525 }
c0c050c5
MC
9526 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
9527 if (!new_fltr)
9528 return -ENOMEM;
9529
9530 fkeys = &new_fltr->fkeys;
9531 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
9532 rc = -EPROTONOSUPPORT;
9533 goto err_free;
9534 }
9535
dda0e746
MC
9536 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
9537 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
c0c050c5
MC
9538 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
9539 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
9540 rc = -EPROTONOSUPPORT;
9541 goto err_free;
9542 }
dda0e746
MC
9543 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
9544 bp->hwrm_spec_code < 0x10601) {
9545 rc = -EPROTONOSUPPORT;
9546 goto err_free;
9547 }
61aad724
MC
9548 if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
9549 bp->hwrm_spec_code < 0x10601) {
9550 rc = -EPROTONOSUPPORT;
9551 goto err_free;
9552 }
c0c050c5 9553
a54c4d74 9554 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
c0c050c5
MC
9555 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
9556
9557 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
9558 head = &bp->ntp_fltr_hash_tbl[idx];
9559 rcu_read_lock();
9560 hlist_for_each_entry_rcu(fltr, head, hash) {
9561 if (bnxt_fltr_match(fltr, new_fltr)) {
9562 rcu_read_unlock();
9563 rc = 0;
9564 goto err_free;
9565 }
9566 }
9567 rcu_read_unlock();
9568
9569 spin_lock_bh(&bp->ntp_fltr_lock);
84e86b98
MC
9570 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
9571 BNXT_NTP_FLTR_MAX_FLTR, 0);
9572 if (bit_id < 0) {
c0c050c5
MC
9573 spin_unlock_bh(&bp->ntp_fltr_lock);
9574 rc = -ENOMEM;
9575 goto err_free;
9576 }
9577
84e86b98 9578 new_fltr->sw_id = (u16)bit_id;
c0c050c5 9579 new_fltr->flow_id = flow_id;
a54c4d74 9580 new_fltr->l2_fltr_idx = l2_idx;
c0c050c5
MC
9581 new_fltr->rxq = rxq_index;
9582 hlist_add_head_rcu(&new_fltr->hash, head);
9583 bp->ntp_fltr_count++;
9584 spin_unlock_bh(&bp->ntp_fltr_lock);
9585
9586 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
c213eae8 9587 bnxt_queue_sp_work(bp);
c0c050c5
MC
9588
9589 return new_fltr->sw_id;
9590
9591err_free:
9592 kfree(new_fltr);
9593 return rc;
9594}
9595
9596static void bnxt_cfg_ntp_filters(struct bnxt *bp)
9597{
9598 int i;
9599
9600 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
9601 struct hlist_head *head;
9602 struct hlist_node *tmp;
9603 struct bnxt_ntuple_filter *fltr;
9604 int rc;
9605
9606 head = &bp->ntp_fltr_hash_tbl[i];
9607 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
9608 bool del = false;
9609
9610 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
9611 if (rps_may_expire_flow(bp->dev, fltr->rxq,
9612 fltr->flow_id,
9613 fltr->sw_id)) {
9614 bnxt_hwrm_cfa_ntuple_filter_free(bp,
9615 fltr);
9616 del = true;
9617 }
9618 } else {
9619 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
9620 fltr);
9621 if (rc)
9622 del = true;
9623 else
9624 set_bit(BNXT_FLTR_VALID, &fltr->state);
9625 }
9626
9627 if (del) {
9628 spin_lock_bh(&bp->ntp_fltr_lock);
9629 hlist_del_rcu(&fltr->hash);
9630 bp->ntp_fltr_count--;
9631 spin_unlock_bh(&bp->ntp_fltr_lock);
9632 synchronize_rcu();
9633 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
9634 kfree(fltr);
9635 }
9636 }
9637 }
19241368
JH
9638 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
9639 netdev_info(bp->dev, "Receive PF driver unload event!");
c0c050c5
MC
9640}
9641
9642#else
9643
9644static void bnxt_cfg_ntp_filters(struct bnxt *bp)
9645{
9646}
9647
9648#endif /* CONFIG_RFS_ACCEL */
9649
ad51b8e9
AD
9650static void bnxt_udp_tunnel_add(struct net_device *dev,
9651 struct udp_tunnel_info *ti)
c0c050c5
MC
9652{
9653 struct bnxt *bp = netdev_priv(dev);
9654
ad51b8e9 9655 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
c0c050c5
MC
9656 return;
9657
ad51b8e9 9658 if (!netif_running(dev))
c0c050c5
MC
9659 return;
9660
ad51b8e9
AD
9661 switch (ti->type) {
9662 case UDP_TUNNEL_TYPE_VXLAN:
9663 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
9664 return;
c0c050c5 9665
ad51b8e9
AD
9666 bp->vxlan_port_cnt++;
9667 if (bp->vxlan_port_cnt == 1) {
9668 bp->vxlan_port = ti->port;
9669 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
c213eae8 9670 bnxt_queue_sp_work(bp);
ad51b8e9
AD
9671 }
9672 break;
7cdd5fc3
AD
9673 case UDP_TUNNEL_TYPE_GENEVE:
9674 if (bp->nge_port_cnt && bp->nge_port != ti->port)
9675 return;
9676
9677 bp->nge_port_cnt++;
9678 if (bp->nge_port_cnt == 1) {
9679 bp->nge_port = ti->port;
9680 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
9681 }
9682 break;
ad51b8e9
AD
9683 default:
9684 return;
c0c050c5 9685 }
ad51b8e9 9686
c213eae8 9687 bnxt_queue_sp_work(bp);
c0c050c5
MC
9688}
9689
ad51b8e9
AD
9690static void bnxt_udp_tunnel_del(struct net_device *dev,
9691 struct udp_tunnel_info *ti)
c0c050c5
MC
9692{
9693 struct bnxt *bp = netdev_priv(dev);
9694
ad51b8e9 9695 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
c0c050c5
MC
9696 return;
9697
ad51b8e9 9698 if (!netif_running(dev))
c0c050c5
MC
9699 return;
9700
ad51b8e9
AD
9701 switch (ti->type) {
9702 case UDP_TUNNEL_TYPE_VXLAN:
9703 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
9704 return;
c0c050c5
MC
9705 bp->vxlan_port_cnt--;
9706
ad51b8e9
AD
9707 if (bp->vxlan_port_cnt != 0)
9708 return;
9709
9710 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
9711 break;
7cdd5fc3
AD
9712 case UDP_TUNNEL_TYPE_GENEVE:
9713 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
9714 return;
9715 bp->nge_port_cnt--;
9716
9717 if (bp->nge_port_cnt != 0)
9718 return;
9719
9720 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
9721 break;
ad51b8e9
AD
9722 default:
9723 return;
c0c050c5 9724 }
ad51b8e9 9725
c213eae8 9726 bnxt_queue_sp_work(bp);
c0c050c5
MC
9727}
9728
39d8ba2e
MC
9729static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
9730 struct net_device *dev, u32 filter_mask,
9731 int nlflags)
9732{
9733 struct bnxt *bp = netdev_priv(dev);
9734
9735 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
9736 nlflags, filter_mask, NULL);
9737}
9738
9739static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
2fd527b7 9740 u16 flags, struct netlink_ext_ack *extack)
39d8ba2e
MC
9741{
9742 struct bnxt *bp = netdev_priv(dev);
9743 struct nlattr *attr, *br_spec;
9744 int rem, rc = 0;
9745
9746 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
9747 return -EOPNOTSUPP;
9748
9749 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
9750 if (!br_spec)
9751 return -EINVAL;
9752
9753 nla_for_each_nested(attr, br_spec, rem) {
9754 u16 mode;
9755
9756 if (nla_type(attr) != IFLA_BRIDGE_MODE)
9757 continue;
9758
9759 if (nla_len(attr) < sizeof(mode))
9760 return -EINVAL;
9761
9762 mode = nla_get_u16(attr);
9763 if (mode == bp->br_mode)
9764 break;
9765
9766 rc = bnxt_hwrm_set_br_mode(bp, mode);
9767 if (!rc)
9768 bp->br_mode = mode;
9769 break;
9770 }
9771 return rc;
9772}
9773
c124a62f
SP
9774static int bnxt_get_phys_port_name(struct net_device *dev, char *buf,
9775 size_t len)
9776{
9777 struct bnxt *bp = netdev_priv(dev);
9778 int rc;
9779
9780 /* The PF and it's VF-reps only support the switchdev framework */
9781 if (!BNXT_PF(bp))
9782 return -EOPNOTSUPP;
9783
53f70b8b 9784 rc = snprintf(buf, len, "p%d", bp->pf.port_id);
c124a62f
SP
9785
9786 if (rc >= len)
9787 return -EOPNOTSUPP;
9788 return 0;
9789}
9790
9791int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr)
9792{
9793 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
9794 return -EOPNOTSUPP;
9795
9796 /* The PF and it's VF-reps only support the switchdev framework */
9797 if (!BNXT_PF(bp))
9798 return -EOPNOTSUPP;
9799
9800 switch (attr->id) {
9801 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
dd4ea1da
SP
9802 attr->u.ppid.id_len = sizeof(bp->switch_id);
9803 memcpy(attr->u.ppid.id, bp->switch_id, attr->u.ppid.id_len);
c124a62f
SP
9804 break;
9805 default:
9806 return -EOPNOTSUPP;
9807 }
9808 return 0;
9809}
9810
9811static int bnxt_swdev_port_attr_get(struct net_device *dev,
9812 struct switchdev_attr *attr)
9813{
9814 return bnxt_port_attr_get(netdev_priv(dev), attr);
9815}
9816
9817static const struct switchdev_ops bnxt_switchdev_ops = {
9818 .switchdev_port_attr_get = bnxt_swdev_port_attr_get
9819};
9820
c0c050c5
MC
9821static const struct net_device_ops bnxt_netdev_ops = {
9822 .ndo_open = bnxt_open,
9823 .ndo_start_xmit = bnxt_start_xmit,
9824 .ndo_stop = bnxt_close,
9825 .ndo_get_stats64 = bnxt_get_stats64,
9826 .ndo_set_rx_mode = bnxt_set_rx_mode,
9827 .ndo_do_ioctl = bnxt_ioctl,
9828 .ndo_validate_addr = eth_validate_addr,
9829 .ndo_set_mac_address = bnxt_change_mac_addr,
9830 .ndo_change_mtu = bnxt_change_mtu,
9831 .ndo_fix_features = bnxt_fix_features,
9832 .ndo_set_features = bnxt_set_features,
9833 .ndo_tx_timeout = bnxt_tx_timeout,
9834#ifdef CONFIG_BNXT_SRIOV
9835 .ndo_get_vf_config = bnxt_get_vf_config,
9836 .ndo_set_vf_mac = bnxt_set_vf_mac,
9837 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
9838 .ndo_set_vf_rate = bnxt_set_vf_bw,
9839 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
9840 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
746df139 9841 .ndo_set_vf_trust = bnxt_set_vf_trust,
c0c050c5
MC
9842#endif
9843 .ndo_setup_tc = bnxt_setup_tc,
9844#ifdef CONFIG_RFS_ACCEL
9845 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
9846#endif
ad51b8e9
AD
9847 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
9848 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
f4e63525 9849 .ndo_bpf = bnxt_xdp,
39d8ba2e
MC
9850 .ndo_bridge_getlink = bnxt_bridge_getlink,
9851 .ndo_bridge_setlink = bnxt_bridge_setlink,
c124a62f 9852 .ndo_get_phys_port_name = bnxt_get_phys_port_name
c0c050c5
MC
9853};
9854
9855static void bnxt_remove_one(struct pci_dev *pdev)
9856{
9857 struct net_device *dev = pci_get_drvdata(pdev);
9858 struct bnxt *bp = netdev_priv(dev);
9859
4ab0c6a8 9860 if (BNXT_PF(bp)) {
c0c050c5 9861 bnxt_sriov_disable(bp);
4ab0c6a8
SP
9862 bnxt_dl_unregister(bp);
9863 }
c0c050c5 9864
6316ea6d 9865 pci_disable_pcie_error_reporting(pdev);
c0c050c5 9866 unregister_netdev(dev);
2ae7408f 9867 bnxt_shutdown_tc(bp);
c213eae8 9868 bnxt_cancel_sp_work(bp);
c0c050c5
MC
9869 bp->sp_event = 0;
9870
7809592d 9871 bnxt_clear_int_mode(bp);
be58a0da 9872 bnxt_hwrm_func_drv_unrgtr(bp);
c0c050c5 9873 bnxt_free_hwrm_resources(bp);
e605db80 9874 bnxt_free_hwrm_short_cmd_req(bp);
eb513658 9875 bnxt_ethtool_free(bp);
7df4ae9f 9876 bnxt_dcb_free(bp);
a588e458
MC
9877 kfree(bp->edev);
9878 bp->edev = NULL;
98f04cf0
MC
9879 bnxt_free_ctx_mem(bp);
9880 kfree(bp->ctx);
9881 bp->ctx = NULL;
17086399 9882 bnxt_cleanup_pci(bp);
fd3ab1c7 9883 bnxt_free_port_stats(bp);
c0c050c5 9884 free_netdev(dev);
c0c050c5
MC
9885}
9886
9887static int bnxt_probe_phy(struct bnxt *bp)
9888{
9889 int rc = 0;
9890 struct bnxt_link_info *link_info = &bp->link_info;
c0c050c5 9891
170ce013
MC
9892 rc = bnxt_hwrm_phy_qcaps(bp);
9893 if (rc) {
9894 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
9895 rc);
9896 return rc;
9897 }
e2dc9b6e 9898 mutex_init(&bp->link_lock);
170ce013 9899
c0c050c5
MC
9900 rc = bnxt_update_link(bp, false);
9901 if (rc) {
9902 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
9903 rc);
9904 return rc;
9905 }
9906
93ed8117
MC
9907 /* Older firmware does not have supported_auto_speeds, so assume
9908 * that all supported speeds can be autonegotiated.
9909 */
9910 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
9911 link_info->support_auto_speeds = link_info->support_speeds;
9912
c0c050c5 9913 /*initialize the ethool setting copy with NVM settings */
0d8abf02 9914 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
c9ee9516
MC
9915 link_info->autoneg = BNXT_AUTONEG_SPEED;
9916 if (bp->hwrm_spec_code >= 0x10201) {
9917 if (link_info->auto_pause_setting &
9918 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
9919 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
9920 } else {
9921 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
9922 }
0d8abf02 9923 link_info->advertising = link_info->auto_link_speeds;
0d8abf02
MC
9924 } else {
9925 link_info->req_link_speed = link_info->force_link_speed;
9926 link_info->req_duplex = link_info->duplex_setting;
c0c050c5 9927 }
c9ee9516
MC
9928 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
9929 link_info->req_flow_ctrl =
9930 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
9931 else
9932 link_info->req_flow_ctrl = link_info->force_pause_setting;
c0c050c5
MC
9933 return rc;
9934}
9935
9936static int bnxt_get_max_irq(struct pci_dev *pdev)
9937{
9938 u16 ctrl;
9939
9940 if (!pdev->msix_cap)
9941 return 1;
9942
9943 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
9944 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
9945}
9946
6e6c5a57
MC
9947static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
9948 int *max_cp)
c0c050c5 9949{
6a4f2947 9950 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
e30fbc33 9951 int max_ring_grps = 0, max_irq;
c0c050c5 9952
6a4f2947
MC
9953 *max_tx = hw_resc->max_tx_rings;
9954 *max_rx = hw_resc->max_rx_rings;
e30fbc33
MC
9955 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
9956 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
9957 bnxt_get_ulp_msix_num(bp),
c027c6b4 9958 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
e30fbc33
MC
9959 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
9960 *max_cp = min_t(int, *max_cp, max_irq);
6a4f2947 9961 max_ring_grps = hw_resc->max_hw_ring_grps;
76595193
PS
9962 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
9963 *max_cp -= 1;
9964 *max_rx -= 2;
9965 }
c0c050c5
MC
9966 if (bp->flags & BNXT_FLAG_AGG_RINGS)
9967 *max_rx >>= 1;
e30fbc33
MC
9968 if (bp->flags & BNXT_FLAG_CHIP_P5) {
9969 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
9970 /* On P5 chips, max_cp output param should be available NQs */
9971 *max_cp = max_irq;
9972 }
b72d4a68 9973 *max_rx = min_t(int, *max_rx, max_ring_grps);
6e6c5a57
MC
9974}
9975
9976int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
9977{
9978 int rx, tx, cp;
9979
9980 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
78f058a4
MC
9981 *max_rx = rx;
9982 *max_tx = tx;
6e6c5a57
MC
9983 if (!rx || !tx || !cp)
9984 return -ENOMEM;
9985
6e6c5a57
MC
9986 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
9987}
9988
e4060d30
MC
9989static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
9990 bool shared)
9991{
9992 int rc;
9993
9994 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
bdbd1eb5
MC
9995 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
9996 /* Not enough rings, try disabling agg rings. */
9997 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
9998 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
07f4fde5
MC
9999 if (rc) {
10000 /* set BNXT_FLAG_AGG_RINGS back for consistency */
10001 bp->flags |= BNXT_FLAG_AGG_RINGS;
bdbd1eb5 10002 return rc;
07f4fde5 10003 }
bdbd1eb5 10004 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
1054aee8
MC
10005 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10006 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
bdbd1eb5
MC
10007 bnxt_set_ring_params(bp);
10008 }
e4060d30
MC
10009
10010 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
10011 int max_cp, max_stat, max_irq;
10012
10013 /* Reserve minimum resources for RoCE */
10014 max_cp = bnxt_get_max_func_cp_rings(bp);
10015 max_stat = bnxt_get_max_func_stat_ctxs(bp);
10016 max_irq = bnxt_get_max_func_irqs(bp);
10017 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
10018 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
10019 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
10020 return 0;
10021
10022 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
10023 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
10024 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
10025 max_cp = min_t(int, max_cp, max_irq);
10026 max_cp = min_t(int, max_cp, max_stat);
10027 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
10028 if (rc)
10029 rc = 0;
10030 }
10031 return rc;
10032}
10033
58ea801a
MC
10034/* In initial default shared ring setting, each shared ring must have a
10035 * RX/TX ring pair.
10036 */
10037static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
10038{
10039 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
10040 bp->rx_nr_rings = bp->cp_nr_rings;
10041 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
10042 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
10043}
10044
702c221c 10045static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
6e6c5a57
MC
10046{
10047 int dflt_rings, max_rx_rings, max_tx_rings, rc;
6e6c5a57 10048
2773dfb2
MC
10049 if (!bnxt_can_reserve_rings(bp))
10050 return 0;
10051
6e6c5a57
MC
10052 if (sh)
10053 bp->flags |= BNXT_FLAG_SHARED_RINGS;
10054 dflt_rings = netif_get_num_default_rss_queues();
1d3ef13d
MC
10055 /* Reduce default rings on multi-port cards so that total default
10056 * rings do not exceed CPU count.
10057 */
10058 if (bp->port_count > 1) {
10059 int max_rings =
10060 max_t(int, num_online_cpus() / bp->port_count, 1);
10061
10062 dflt_rings = min_t(int, dflt_rings, max_rings);
10063 }
e4060d30 10064 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
6e6c5a57
MC
10065 if (rc)
10066 return rc;
10067 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
10068 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
58ea801a
MC
10069 if (sh)
10070 bnxt_trim_dflt_sh_rings(bp);
10071 else
10072 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
10073 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
391be5c2 10074
674f50a5 10075 rc = __bnxt_reserve_rings(bp);
391be5c2
MC
10076 if (rc)
10077 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
58ea801a
MC
10078 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10079 if (sh)
10080 bnxt_trim_dflt_sh_rings(bp);
391be5c2 10081
674f50a5
MC
10082 /* Rings may have been trimmed, re-reserve the trimmed rings. */
10083 if (bnxt_need_reserve_rings(bp)) {
10084 rc = __bnxt_reserve_rings(bp);
10085 if (rc)
10086 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
10087 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10088 }
76595193
PS
10089 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10090 bp->rx_nr_rings++;
10091 bp->cp_nr_rings++;
10092 }
6e6c5a57 10093 return rc;
c0c050c5
MC
10094}
10095
47558acd
MC
10096static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
10097{
10098 int rc;
10099
10100 if (bp->tx_nr_rings)
10101 return 0;
10102
6b95c3e9
MC
10103 bnxt_ulp_irq_stop(bp);
10104 bnxt_clear_int_mode(bp);
47558acd
MC
10105 rc = bnxt_set_dflt_rings(bp, true);
10106 if (rc) {
10107 netdev_err(bp->dev, "Not enough rings available.\n");
6b95c3e9 10108 goto init_dflt_ring_err;
47558acd
MC
10109 }
10110 rc = bnxt_init_int_mode(bp);
10111 if (rc)
6b95c3e9
MC
10112 goto init_dflt_ring_err;
10113
47558acd
MC
10114 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10115 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
10116 bp->flags |= BNXT_FLAG_RFS;
10117 bp->dev->features |= NETIF_F_NTUPLE;
10118 }
6b95c3e9
MC
10119init_dflt_ring_err:
10120 bnxt_ulp_irq_restart(bp, rc);
10121 return rc;
47558acd
MC
10122}
10123
80fcaf46 10124int bnxt_restore_pf_fw_resources(struct bnxt *bp)
7b08f661 10125{
80fcaf46
MC
10126 int rc;
10127
7b08f661
MC
10128 ASSERT_RTNL();
10129 bnxt_hwrm_func_qcaps(bp);
1a037782
VD
10130
10131 if (netif_running(bp->dev))
10132 __bnxt_close_nic(bp, true, false);
10133
ec86f14e 10134 bnxt_ulp_irq_stop(bp);
80fcaf46
MC
10135 bnxt_clear_int_mode(bp);
10136 rc = bnxt_init_int_mode(bp);
ec86f14e 10137 bnxt_ulp_irq_restart(bp, rc);
1a037782
VD
10138
10139 if (netif_running(bp->dev)) {
10140 if (rc)
10141 dev_close(bp->dev);
10142 else
10143 rc = bnxt_open_nic(bp, true, false);
10144 }
10145
80fcaf46 10146 return rc;
7b08f661
MC
10147}
10148
a22a6ac2
MC
10149static int bnxt_init_mac_addr(struct bnxt *bp)
10150{
10151 int rc = 0;
10152
10153 if (BNXT_PF(bp)) {
10154 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
10155 } else {
10156#ifdef CONFIG_BNXT_SRIOV
10157 struct bnxt_vf_info *vf = &bp->vf;
28ea334b 10158 bool strict_approval = true;
a22a6ac2
MC
10159
10160 if (is_valid_ether_addr(vf->mac_addr)) {
91cdda40 10161 /* overwrite netdev dev_addr with admin VF MAC */
a22a6ac2 10162 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
28ea334b
MC
10163 /* Older PF driver or firmware may not approve this
10164 * correctly.
10165 */
10166 strict_approval = false;
a22a6ac2
MC
10167 } else {
10168 eth_hw_addr_random(bp->dev);
a22a6ac2 10169 }
28ea334b 10170 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
a22a6ac2
MC
10171#endif
10172 }
10173 return rc;
10174}
10175
c0c050c5
MC
10176static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
10177{
10178 static int version_printed;
10179 struct net_device *dev;
10180 struct bnxt *bp;
6e6c5a57 10181 int rc, max_irqs;
c0c050c5 10182
4e00338a 10183 if (pci_is_bridge(pdev))
fa853dda
PS
10184 return -ENODEV;
10185
c0c050c5
MC
10186 if (version_printed++ == 0)
10187 pr_info("%s", version);
10188
10189 max_irqs = bnxt_get_max_irq(pdev);
10190 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
10191 if (!dev)
10192 return -ENOMEM;
10193
10194 bp = netdev_priv(dev);
9c1fabdf 10195 bnxt_set_max_func_irqs(bp, max_irqs);
c0c050c5
MC
10196
10197 if (bnxt_vf_pciid(ent->driver_data))
10198 bp->flags |= BNXT_FLAG_VF;
10199
2bcfa6f6 10200 if (pdev->msix_cap)
c0c050c5 10201 bp->flags |= BNXT_FLAG_MSIX_CAP;
c0c050c5
MC
10202
10203 rc = bnxt_init_board(pdev, dev);
10204 if (rc < 0)
10205 goto init_err_free;
10206
10207 dev->netdev_ops = &bnxt_netdev_ops;
10208 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
10209 dev->ethtool_ops = &bnxt_ethtool_ops;
bc88055a 10210 SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops);
c0c050c5
MC
10211 pci_set_drvdata(pdev, dev);
10212
3e8060fa
PS
10213 rc = bnxt_alloc_hwrm_resources(bp);
10214 if (rc)
17086399 10215 goto init_err_pci_clean;
3e8060fa
PS
10216
10217 mutex_init(&bp->hwrm_cmd_lock);
10218 rc = bnxt_hwrm_ver_get(bp);
10219 if (rc)
17086399 10220 goto init_err_pci_clean;
3e8060fa 10221
1dfddc41
MC
10222 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
10223 bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
e605db80
DK
10224 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
10225 if (rc)
10226 goto init_err_pci_clean;
10227 }
10228
e38287b7
MC
10229 if (BNXT_CHIP_P5(bp))
10230 bp->flags |= BNXT_FLAG_CHIP_P5;
10231
3c2217a6
MC
10232 rc = bnxt_hwrm_func_reset(bp);
10233 if (rc)
10234 goto init_err_pci_clean;
10235
5ac67d8b
RS
10236 bnxt_hwrm_fw_set_time(bp);
10237
c0c050c5
MC
10238 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
10239 NETIF_F_TSO | NETIF_F_TSO6 |
10240 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
7e13318d 10241 NETIF_F_GSO_IPXIP4 |
152971ee
AD
10242 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
10243 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
3e8060fa
PS
10244 NETIF_F_RXCSUM | NETIF_F_GRO;
10245
e38287b7 10246 if (BNXT_SUPPORTS_TPA(bp))
3e8060fa 10247 dev->hw_features |= NETIF_F_LRO;
c0c050c5 10248
c0c050c5
MC
10249 dev->hw_enc_features =
10250 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
10251 NETIF_F_TSO | NETIF_F_TSO6 |
10252 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
152971ee 10253 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
7e13318d 10254 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
152971ee
AD
10255 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
10256 NETIF_F_GSO_GRE_CSUM;
c0c050c5
MC
10257 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
10258 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
10259 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
e38287b7 10260 if (BNXT_SUPPORTS_TPA(bp))
1054aee8 10261 dev->hw_features |= NETIF_F_GRO_HW;
c0c050c5 10262 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
1054aee8
MC
10263 if (dev->features & NETIF_F_GRO_HW)
10264 dev->features &= ~NETIF_F_LRO;
c0c050c5
MC
10265 dev->priv_flags |= IFF_UNICAST_FLT;
10266
10267#ifdef CONFIG_BNXT_SRIOV
10268 init_waitqueue_head(&bp->sriov_cfg_wait);
4ab0c6a8 10269 mutex_init(&bp->sriov_lock);
c0c050c5 10270#endif
e38287b7
MC
10271 if (BNXT_SUPPORTS_TPA(bp)) {
10272 bp->gro_func = bnxt_gro_func_5730x;
10273 if (BNXT_CHIP_P4(bp))
10274 bp->gro_func = bnxt_gro_func_5731x;
10275 }
10276 if (!BNXT_CHIP_P4_PLUS(bp))
434c975a 10277 bp->flags |= BNXT_FLAG_DOUBLE_DB;
309369c9 10278
c0c050c5
MC
10279 rc = bnxt_hwrm_func_drv_rgtr(bp);
10280 if (rc)
17086399 10281 goto init_err_pci_clean;
c0c050c5 10282
a1653b13
MC
10283 rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
10284 if (rc)
17086399 10285 goto init_err_pci_clean;
a1653b13 10286
a588e458
MC
10287 bp->ulp_probe = bnxt_ulp_probe;
10288
98f04cf0
MC
10289 rc = bnxt_hwrm_queue_qportcfg(bp);
10290 if (rc) {
10291 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
10292 rc);
10293 rc = -1;
10294 goto init_err_pci_clean;
10295 }
c0c050c5
MC
10296 /* Get the MAX capabilities for this function */
10297 rc = bnxt_hwrm_func_qcaps(bp);
10298 if (rc) {
10299 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
10300 rc);
10301 rc = -1;
17086399 10302 goto init_err_pci_clean;
c0c050c5 10303 }
a22a6ac2
MC
10304 rc = bnxt_init_mac_addr(bp);
10305 if (rc) {
10306 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
10307 rc = -EADDRNOTAVAIL;
10308 goto init_err_pci_clean;
10309 }
c0c050c5 10310
567b2abe 10311 bnxt_hwrm_func_qcfg(bp);
6ba99038 10312 bnxt_hwrm_vnic_qcaps(bp);
5ad2cbee 10313 bnxt_hwrm_port_led_qcaps(bp);
eb513658 10314 bnxt_ethtool_init(bp);
87fe6032 10315 bnxt_dcb_init(bp);
567b2abe 10316
7eb9bb3a
MC
10317 /* MTU range: 60 - FW defined max */
10318 dev->min_mtu = ETH_ZLEN;
10319 dev->max_mtu = bp->max_mtu;
10320
d5430d31
MC
10321 rc = bnxt_probe_phy(bp);
10322 if (rc)
10323 goto init_err_pci_clean;
10324
c61fb99c 10325 bnxt_set_rx_skb_mode(bp, false);
c0c050c5
MC
10326 bnxt_set_tpa_flags(bp);
10327 bnxt_set_ring_params(bp);
702c221c 10328 rc = bnxt_set_dflt_rings(bp, true);
bdbd1eb5
MC
10329 if (rc) {
10330 netdev_err(bp->dev, "Not enough rings available.\n");
10331 rc = -ENOMEM;
17086399 10332 goto init_err_pci_clean;
bdbd1eb5 10333 }
c0c050c5 10334
87da7f79
MC
10335 /* Default RSS hash cfg. */
10336 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
10337 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
10338 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
10339 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
e38287b7 10340 if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
87da7f79
MC
10341 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
10342 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
10343 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
10344 }
10345
8079e8f1 10346 if (bnxt_rfs_supported(bp)) {
2bcfa6f6
MC
10347 dev->hw_features |= NETIF_F_NTUPLE;
10348 if (bnxt_rfs_capable(bp)) {
10349 bp->flags |= BNXT_FLAG_RFS;
10350 dev->features |= NETIF_F_NTUPLE;
10351 }
10352 }
10353
c0c050c5
MC
10354 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
10355 bp->flags |= BNXT_FLAG_STRIP_VLAN;
10356
7809592d 10357 rc = bnxt_init_int_mode(bp);
c0c050c5 10358 if (rc)
17086399 10359 goto init_err_pci_clean;
c0c050c5 10360
832aed16
MC
10361 /* No TC has been set yet and rings may have been trimmed due to
10362 * limited MSIX, so we re-initialize the TX rings per TC.
10363 */
10364 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10365
c1ef146a 10366 bnxt_get_wol_settings(bp);
d196ece7
MC
10367 if (bp->flags & BNXT_FLAG_WOL_CAP)
10368 device_set_wakeup_enable(&pdev->dev, bp->wol);
10369 else
10370 device_set_wakeup_capable(&pdev->dev, false);
c1ef146a 10371
c3480a60
MC
10372 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
10373
74706afa
MC
10374 bnxt_hwrm_coal_params_qcaps(bp);
10375
c213eae8
MC
10376 if (BNXT_PF(bp)) {
10377 if (!bnxt_pf_wq) {
10378 bnxt_pf_wq =
10379 create_singlethread_workqueue("bnxt_pf_wq");
10380 if (!bnxt_pf_wq) {
10381 dev_err(&pdev->dev, "Unable to create workqueue.\n");
10382 goto init_err_pci_clean;
10383 }
10384 }
2ae7408f 10385 bnxt_init_tc(bp);
c213eae8 10386 }
2ae7408f 10387
7809592d
MC
10388 rc = register_netdev(dev);
10389 if (rc)
2ae7408f 10390 goto init_err_cleanup_tc;
7809592d 10391
4ab0c6a8
SP
10392 if (BNXT_PF(bp))
10393 bnxt_dl_register(bp);
10394
c0c050c5
MC
10395 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
10396 board_info[ent->driver_data].name,
10397 (long)pci_resource_start(pdev, 0), dev->dev_addr);
af125b75 10398 pcie_print_link_status(pdev);
90c4f788 10399
c0c050c5
MC
10400 return 0;
10401
2ae7408f
SP
10402init_err_cleanup_tc:
10403 bnxt_shutdown_tc(bp);
7809592d
MC
10404 bnxt_clear_int_mode(bp);
10405
17086399 10406init_err_pci_clean:
a2bf74f4 10407 bnxt_free_hwrm_resources(bp);
98f04cf0
MC
10408 bnxt_free_ctx_mem(bp);
10409 kfree(bp->ctx);
10410 bp->ctx = NULL;
17086399 10411 bnxt_cleanup_pci(bp);
c0c050c5
MC
10412
10413init_err_free:
10414 free_netdev(dev);
10415 return rc;
10416}
10417
d196ece7
MC
10418static void bnxt_shutdown(struct pci_dev *pdev)
10419{
10420 struct net_device *dev = pci_get_drvdata(pdev);
10421 struct bnxt *bp;
10422
10423 if (!dev)
10424 return;
10425
10426 rtnl_lock();
10427 bp = netdev_priv(dev);
10428 if (!bp)
10429 goto shutdown_exit;
10430
10431 if (netif_running(dev))
10432 dev_close(dev);
10433
a7f3f939
RJ
10434 bnxt_ulp_shutdown(bp);
10435
d196ece7
MC
10436 if (system_state == SYSTEM_POWER_OFF) {
10437 bnxt_clear_int_mode(bp);
10438 pci_wake_from_d3(pdev, bp->wol);
10439 pci_set_power_state(pdev, PCI_D3hot);
10440 }
10441
10442shutdown_exit:
10443 rtnl_unlock();
10444}
10445
f65a2044
MC
10446#ifdef CONFIG_PM_SLEEP
10447static int bnxt_suspend(struct device *device)
10448{
10449 struct pci_dev *pdev = to_pci_dev(device);
10450 struct net_device *dev = pci_get_drvdata(pdev);
10451 struct bnxt *bp = netdev_priv(dev);
10452 int rc = 0;
10453
10454 rtnl_lock();
10455 if (netif_running(dev)) {
10456 netif_device_detach(dev);
10457 rc = bnxt_close(dev);
10458 }
10459 bnxt_hwrm_func_drv_unrgtr(bp);
10460 rtnl_unlock();
10461 return rc;
10462}
10463
10464static int bnxt_resume(struct device *device)
10465{
10466 struct pci_dev *pdev = to_pci_dev(device);
10467 struct net_device *dev = pci_get_drvdata(pdev);
10468 struct bnxt *bp = netdev_priv(dev);
10469 int rc = 0;
10470
10471 rtnl_lock();
10472 if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
10473 rc = -ENODEV;
10474 goto resume_exit;
10475 }
10476 rc = bnxt_hwrm_func_reset(bp);
10477 if (rc) {
10478 rc = -EBUSY;
10479 goto resume_exit;
10480 }
10481 bnxt_get_wol_settings(bp);
10482 if (netif_running(dev)) {
10483 rc = bnxt_open(dev);
10484 if (!rc)
10485 netif_device_attach(dev);
10486 }
10487
10488resume_exit:
10489 rtnl_unlock();
10490 return rc;
10491}
10492
10493static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
10494#define BNXT_PM_OPS (&bnxt_pm_ops)
10495
10496#else
10497
10498#define BNXT_PM_OPS NULL
10499
10500#endif /* CONFIG_PM_SLEEP */
10501
6316ea6d
SB
10502/**
10503 * bnxt_io_error_detected - called when PCI error is detected
10504 * @pdev: Pointer to PCI device
10505 * @state: The current pci connection state
10506 *
10507 * This function is called after a PCI bus error affecting
10508 * this device has been detected.
10509 */
10510static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
10511 pci_channel_state_t state)
10512{
10513 struct net_device *netdev = pci_get_drvdata(pdev);
a588e458 10514 struct bnxt *bp = netdev_priv(netdev);
6316ea6d
SB
10515
10516 netdev_info(netdev, "PCI I/O error detected\n");
10517
10518 rtnl_lock();
10519 netif_device_detach(netdev);
10520
a588e458
MC
10521 bnxt_ulp_stop(bp);
10522
6316ea6d
SB
10523 if (state == pci_channel_io_perm_failure) {
10524 rtnl_unlock();
10525 return PCI_ERS_RESULT_DISCONNECT;
10526 }
10527
10528 if (netif_running(netdev))
10529 bnxt_close(netdev);
10530
10531 pci_disable_device(pdev);
10532 rtnl_unlock();
10533
10534 /* Request a slot slot reset. */
10535 return PCI_ERS_RESULT_NEED_RESET;
10536}
10537
10538/**
10539 * bnxt_io_slot_reset - called after the pci bus has been reset.
10540 * @pdev: Pointer to PCI device
10541 *
10542 * Restart the card from scratch, as if from a cold-boot.
10543 * At this point, the card has exprienced a hard reset,
10544 * followed by fixups by BIOS, and has its config space
10545 * set up identically to what it was at cold boot.
10546 */
10547static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
10548{
10549 struct net_device *netdev = pci_get_drvdata(pdev);
10550 struct bnxt *bp = netdev_priv(netdev);
10551 int err = 0;
10552 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
10553
10554 netdev_info(bp->dev, "PCI Slot Reset\n");
10555
10556 rtnl_lock();
10557
10558 if (pci_enable_device(pdev)) {
10559 dev_err(&pdev->dev,
10560 "Cannot re-enable PCI device after reset.\n");
10561 } else {
10562 pci_set_master(pdev);
10563
aa8ed021
MC
10564 err = bnxt_hwrm_func_reset(bp);
10565 if (!err && netif_running(netdev))
6316ea6d
SB
10566 err = bnxt_open(netdev);
10567
a588e458 10568 if (!err) {
6316ea6d 10569 result = PCI_ERS_RESULT_RECOVERED;
a588e458
MC
10570 bnxt_ulp_start(bp);
10571 }
6316ea6d
SB
10572 }
10573
10574 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
10575 dev_close(netdev);
10576
10577 rtnl_unlock();
10578
6316ea6d
SB
10579 return PCI_ERS_RESULT_RECOVERED;
10580}
10581
10582/**
10583 * bnxt_io_resume - called when traffic can start flowing again.
10584 * @pdev: Pointer to PCI device
10585 *
10586 * This callback is called when the error recovery driver tells
10587 * us that its OK to resume normal operation.
10588 */
10589static void bnxt_io_resume(struct pci_dev *pdev)
10590{
10591 struct net_device *netdev = pci_get_drvdata(pdev);
10592
10593 rtnl_lock();
10594
10595 netif_device_attach(netdev);
10596
10597 rtnl_unlock();
10598}
10599
10600static const struct pci_error_handlers bnxt_err_handler = {
10601 .error_detected = bnxt_io_error_detected,
10602 .slot_reset = bnxt_io_slot_reset,
10603 .resume = bnxt_io_resume
10604};
10605
c0c050c5
MC
10606static struct pci_driver bnxt_pci_driver = {
10607 .name = DRV_MODULE_NAME,
10608 .id_table = bnxt_pci_tbl,
10609 .probe = bnxt_init_one,
10610 .remove = bnxt_remove_one,
d196ece7 10611 .shutdown = bnxt_shutdown,
f65a2044 10612 .driver.pm = BNXT_PM_OPS,
6316ea6d 10613 .err_handler = &bnxt_err_handler,
c0c050c5
MC
10614#if defined(CONFIG_BNXT_SRIOV)
10615 .sriov_configure = bnxt_sriov_configure,
10616#endif
10617};
10618
c213eae8
MC
10619static int __init bnxt_init(void)
10620{
cabfb09d 10621 bnxt_debug_init();
c213eae8
MC
10622 return pci_register_driver(&bnxt_pci_driver);
10623}
10624
10625static void __exit bnxt_exit(void)
10626{
10627 pci_unregister_driver(&bnxt_pci_driver);
10628 if (bnxt_pf_wq)
10629 destroy_workqueue(bnxt_pf_wq);
cabfb09d 10630 bnxt_debug_exit();
c213eae8
MC
10631}
10632
10633module_init(bnxt_init);
10634module_exit(bnxt_exit);