]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/broadcom/bnxt/bnxt.c
bnxt_en: Optimize doorbell write operations for newer chips.
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
CommitLineData
c0c050c5
MC
1/* Broadcom NetXtreme-C/E network driver.
2 *
11f15ed3 3 * Copyright (c) 2014-2016 Broadcom Corporation
bac9a7e0 4 * Copyright (c) 2016-2017 Broadcom Limited
c0c050c5
MC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12
13#include <linux/stringify.h>
14#include <linux/kernel.h>
15#include <linux/timer.h>
16#include <linux/errno.h>
17#include <linux/ioport.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/interrupt.h>
21#include <linux/pci.h>
22#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/skbuff.h>
25#include <linux/dma-mapping.h>
26#include <linux/bitops.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29#include <linux/delay.h>
30#include <asm/byteorder.h>
31#include <asm/page.h>
32#include <linux/time.h>
33#include <linux/mii.h>
34#include <linux/if.h>
35#include <linux/if_vlan.h>
5ac67d8b 36#include <linux/rtc.h>
c6d30e83 37#include <linux/bpf.h>
c0c050c5
MC
38#include <net/ip.h>
39#include <net/tcp.h>
40#include <net/udp.h>
41#include <net/checksum.h>
42#include <net/ip6_checksum.h>
ad51b8e9 43#include <net/udp_tunnel.h>
c0c050c5
MC
44#include <linux/workqueue.h>
45#include <linux/prefetch.h>
46#include <linux/cache.h>
47#include <linux/log2.h>
48#include <linux/aer.h>
49#include <linux/bitmap.h>
50#include <linux/cpu_rmap.h>
51
52#include "bnxt_hsi.h"
53#include "bnxt.h"
a588e458 54#include "bnxt_ulp.h"
c0c050c5
MC
55#include "bnxt_sriov.h"
56#include "bnxt_ethtool.h"
7df4ae9f 57#include "bnxt_dcb.h"
c6d30e83 58#include "bnxt_xdp.h"
c0c050c5
MC
59
60#define BNXT_TX_TIMEOUT (5 * HZ)
61
62static const char version[] =
63 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
64
65MODULE_LICENSE("GPL");
66MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
67MODULE_VERSION(DRV_MODULE_VERSION);
68
69#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
70#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
71#define BNXT_RX_COPY_THRESH 256
72
4419dbe6 73#define BNXT_TX_PUSH_THRESH 164
c0c050c5
MC
74
75enum board_idx {
fbc9a523 76 BCM57301,
c0c050c5
MC
77 BCM57302,
78 BCM57304,
1f681688 79 BCM57417_NPAR,
fa853dda 80 BCM58700,
b24eb6ae
MC
81 BCM57311,
82 BCM57312,
fbc9a523 83 BCM57402,
c0c050c5
MC
84 BCM57404,
85 BCM57406,
1f681688
MC
86 BCM57402_NPAR,
87 BCM57407,
b24eb6ae
MC
88 BCM57412,
89 BCM57414,
90 BCM57416,
91 BCM57417,
1f681688 92 BCM57412_NPAR,
5049e33b 93 BCM57314,
1f681688
MC
94 BCM57417_SFP,
95 BCM57416_SFP,
96 BCM57404_NPAR,
97 BCM57406_NPAR,
98 BCM57407_SFP,
adbc8305 99 BCM57407_NPAR,
1f681688
MC
100 BCM57414_NPAR,
101 BCM57416_NPAR,
32b40798
DK
102 BCM57452,
103 BCM57454,
adbc8305
MC
104 NETXTREME_E_VF,
105 NETXTREME_C_VF,
c0c050c5
MC
106};
107
108/* indexed by enum above */
109static const struct {
110 char *name;
111} board_info[] = {
adbc8305
MC
112 { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
113 { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
114 { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
1f681688 115 { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
adbc8305
MC
116 { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
117 { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
118 { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
119 { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
120 { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
121 { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
1f681688 122 { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
adbc8305
MC
123 { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
124 { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
125 { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
126 { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
127 { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
1f681688 128 { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
adbc8305
MC
129 { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
130 { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
131 { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
1f681688
MC
132 { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
133 { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
adbc8305
MC
134 { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
135 { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
1f681688
MC
136 { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
137 { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
32b40798
DK
138 { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
139 { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
adbc8305
MC
140 { "Broadcom NetXtreme-E Ethernet Virtual Function" },
141 { "Broadcom NetXtreme-C Ethernet Virtual Function" },
c0c050c5
MC
142};
143
144static const struct pci_device_id bnxt_pci_tbl[] = {
adbc8305 145 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
fbc9a523 146 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
c0c050c5
MC
147 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
148 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
1f681688 149 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
fa853dda 150 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
b24eb6ae
MC
151 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
152 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
fbc9a523 153 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
c0c050c5
MC
154 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
155 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
1f681688
MC
156 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
157 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
b24eb6ae
MC
158 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
159 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
160 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
161 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
1f681688 162 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
5049e33b 163 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
1f681688
MC
164 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
165 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
166 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
167 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
168 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
adbc8305
MC
169 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
170 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
1f681688 171 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
adbc8305 172 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
1f681688 173 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
adbc8305 174 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
32b40798
DK
175 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
176 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
c0c050c5 177#ifdef CONFIG_BNXT_SRIOV
c7ef35eb
DK
178 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
179 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
adbc8305
MC
180 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
181 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
182 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
183 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
184 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
185 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
c0c050c5
MC
186#endif
187 { 0 }
188};
189
190MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
191
192static const u16 bnxt_vf_req_snif[] = {
193 HWRM_FUNC_CFG,
194 HWRM_PORT_PHY_QCFG,
195 HWRM_CFA_L2_FILTER_ALLOC,
196};
197
25be8623 198static const u16 bnxt_async_events_arr[] = {
87c374de
MC
199 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
200 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
201 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
202 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
203 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
25be8623
MC
204};
205
c0c050c5
MC
206static bool bnxt_vf_pciid(enum board_idx idx)
207{
adbc8305 208 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
c0c050c5
MC
209}
210
211#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
212#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
213#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
214
215#define BNXT_CP_DB_REARM(db, raw_cons) \
216 writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
217
218#define BNXT_CP_DB(db, raw_cons) \
219 writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
220
221#define BNXT_CP_DB_IRQ_DIS(db) \
222 writel(DB_CP_IRQ_DIS_FLAGS, db)
223
38413406 224const u16 bnxt_lhint_arr[] = {
c0c050c5
MC
225 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
226 TX_BD_FLAGS_LHINT_512_TO_1023,
227 TX_BD_FLAGS_LHINT_1024_TO_2047,
228 TX_BD_FLAGS_LHINT_1024_TO_2047,
229 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
230 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
231 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
232 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
233 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
234 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
235 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
236 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
237 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
238 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
239 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
240 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
241 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
242 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
243 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
244};
245
246static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
247{
248 struct bnxt *bp = netdev_priv(dev);
249 struct tx_bd *txbd;
250 struct tx_bd_ext *txbd1;
251 struct netdev_queue *txq;
252 int i;
253 dma_addr_t mapping;
254 unsigned int length, pad = 0;
255 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
256 u16 prod, last_frag;
257 struct pci_dev *pdev = bp->pdev;
c0c050c5
MC
258 struct bnxt_tx_ring_info *txr;
259 struct bnxt_sw_tx_bd *tx_buf;
260
261 i = skb_get_queue_mapping(skb);
262 if (unlikely(i >= bp->tx_nr_rings)) {
263 dev_kfree_skb_any(skb);
264 return NETDEV_TX_OK;
265 }
266
c0c050c5 267 txq = netdev_get_tx_queue(dev, i);
a960dec9 268 txr = &bp->tx_ring[bp->tx_ring_map[i]];
c0c050c5
MC
269 prod = txr->tx_prod;
270
271 free_size = bnxt_tx_avail(bp, txr);
272 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
273 netif_tx_stop_queue(txq);
274 return NETDEV_TX_BUSY;
275 }
276
277 length = skb->len;
278 len = skb_headlen(skb);
279 last_frag = skb_shinfo(skb)->nr_frags;
280
281 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
282
283 txbd->tx_bd_opaque = prod;
284
285 tx_buf = &txr->tx_buf_ring[prod];
286 tx_buf->skb = skb;
287 tx_buf->nr_frags = last_frag;
288
289 vlan_tag_flags = 0;
290 cfa_action = 0;
291 if (skb_vlan_tag_present(skb)) {
292 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
293 skb_vlan_tag_get(skb);
294 /* Currently supports 8021Q, 8021AD vlan offloads
295 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
296 */
297 if (skb->vlan_proto == htons(ETH_P_8021Q))
298 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
299 }
300
301 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
4419dbe6
MC
302 struct tx_push_buffer *tx_push_buf = txr->tx_push;
303 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
304 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
305 void *pdata = tx_push_buf->data;
306 u64 *end;
307 int j, push_len;
c0c050c5
MC
308
309 /* Set COAL_NOW to be ready quickly for the next push */
310 tx_push->tx_bd_len_flags_type =
311 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
312 TX_BD_TYPE_LONG_TX_BD |
313 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
314 TX_BD_FLAGS_COAL_NOW |
315 TX_BD_FLAGS_PACKET_END |
316 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
317
318 if (skb->ip_summed == CHECKSUM_PARTIAL)
319 tx_push1->tx_bd_hsize_lflags =
320 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
321 else
322 tx_push1->tx_bd_hsize_lflags = 0;
323
324 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
325 tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
326
fbb0fa8b
MC
327 end = pdata + length;
328 end = PTR_ALIGN(end, 8) - 1;
4419dbe6
MC
329 *end = 0;
330
c0c050c5
MC
331 skb_copy_from_linear_data(skb, pdata, len);
332 pdata += len;
333 for (j = 0; j < last_frag; j++) {
334 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
335 void *fptr;
336
337 fptr = skb_frag_address_safe(frag);
338 if (!fptr)
339 goto normal_tx;
340
341 memcpy(pdata, fptr, skb_frag_size(frag));
342 pdata += skb_frag_size(frag);
343 }
344
4419dbe6
MC
345 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
346 txbd->tx_bd_haddr = txr->data_mapping;
c0c050c5
MC
347 prod = NEXT_TX(prod);
348 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
349 memcpy(txbd, tx_push1, sizeof(*txbd));
350 prod = NEXT_TX(prod);
4419dbe6 351 tx_push->doorbell =
c0c050c5
MC
352 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
353 txr->tx_prod = prod;
354
b9a8460a 355 tx_buf->is_push = 1;
c0c050c5 356 netdev_tx_sent_queue(txq, skb->len);
b9a8460a 357 wmb(); /* Sync is_push and byte queue before pushing data */
c0c050c5 358
4419dbe6
MC
359 push_len = (length + sizeof(*tx_push) + 7) / 8;
360 if (push_len > 16) {
361 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
9d13744b
MC
362 __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
363 (push_len - 16) << 1);
4419dbe6
MC
364 } else {
365 __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
366 push_len);
367 }
c0c050c5 368
c0c050c5
MC
369 goto tx_done;
370 }
371
372normal_tx:
373 if (length < BNXT_MIN_PKT_SIZE) {
374 pad = BNXT_MIN_PKT_SIZE - length;
375 if (skb_pad(skb, pad)) {
376 /* SKB already freed. */
377 tx_buf->skb = NULL;
378 return NETDEV_TX_OK;
379 }
380 length = BNXT_MIN_PKT_SIZE;
381 }
382
383 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
384
385 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
386 dev_kfree_skb_any(skb);
387 tx_buf->skb = NULL;
388 return NETDEV_TX_OK;
389 }
390
391 dma_unmap_addr_set(tx_buf, mapping, mapping);
392 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
393 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
394
395 txbd->tx_bd_haddr = cpu_to_le64(mapping);
396
397 prod = NEXT_TX(prod);
398 txbd1 = (struct tx_bd_ext *)
399 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
400
401 txbd1->tx_bd_hsize_lflags = 0;
402 if (skb_is_gso(skb)) {
403 u32 hdr_len;
404
405 if (skb->encapsulation)
406 hdr_len = skb_inner_network_offset(skb) +
407 skb_inner_network_header_len(skb) +
408 inner_tcp_hdrlen(skb);
409 else
410 hdr_len = skb_transport_offset(skb) +
411 tcp_hdrlen(skb);
412
413 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
414 TX_BD_FLAGS_T_IPID |
415 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
416 length = skb_shinfo(skb)->gso_size;
417 txbd1->tx_bd_mss = cpu_to_le32(length);
418 length += hdr_len;
419 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
420 txbd1->tx_bd_hsize_lflags =
421 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
422 txbd1->tx_bd_mss = 0;
423 }
424
425 length >>= 9;
426 flags |= bnxt_lhint_arr[length];
427 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
428
429 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
430 txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
431 for (i = 0; i < last_frag; i++) {
432 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
433
434 prod = NEXT_TX(prod);
435 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
436
437 len = skb_frag_size(frag);
438 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
439 DMA_TO_DEVICE);
440
441 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
442 goto tx_dma_error;
443
444 tx_buf = &txr->tx_buf_ring[prod];
445 dma_unmap_addr_set(tx_buf, mapping, mapping);
446
447 txbd->tx_bd_haddr = cpu_to_le64(mapping);
448
449 flags = len << TX_BD_LEN_SHIFT;
450 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
451 }
452
453 flags &= ~TX_BD_LEN;
454 txbd->tx_bd_len_flags_type =
455 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
456 TX_BD_FLAGS_PACKET_END);
457
458 netdev_tx_sent_queue(txq, skb->len);
459
460 /* Sync BD data before updating doorbell */
461 wmb();
462
463 prod = NEXT_TX(prod);
464 txr->tx_prod = prod;
465
434c975a 466 bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);
c0c050c5
MC
467
468tx_done:
469
470 mmiowb();
471
472 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
473 netif_tx_stop_queue(txq);
474
475 /* netif_tx_stop_queue() must be done before checking
476 * tx index in bnxt_tx_avail() below, because in
477 * bnxt_tx_int(), we update tx index before checking for
478 * netif_tx_queue_stopped().
479 */
480 smp_mb();
481 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
482 netif_tx_wake_queue(txq);
483 }
484 return NETDEV_TX_OK;
485
486tx_dma_error:
487 last_frag = i;
488
489 /* start back at beginning and unmap skb */
490 prod = txr->tx_prod;
491 tx_buf = &txr->tx_buf_ring[prod];
492 tx_buf->skb = NULL;
493 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
494 skb_headlen(skb), PCI_DMA_TODEVICE);
495 prod = NEXT_TX(prod);
496
497 /* unmap remaining mapped pages */
498 for (i = 0; i < last_frag; i++) {
499 prod = NEXT_TX(prod);
500 tx_buf = &txr->tx_buf_ring[prod];
501 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
502 skb_frag_size(&skb_shinfo(skb)->frags[i]),
503 PCI_DMA_TODEVICE);
504 }
505
506 dev_kfree_skb_any(skb);
507 return NETDEV_TX_OK;
508}
509
510static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
511{
b6ab4b01 512 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
a960dec9 513 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
c0c050c5
MC
514 u16 cons = txr->tx_cons;
515 struct pci_dev *pdev = bp->pdev;
516 int i;
517 unsigned int tx_bytes = 0;
518
519 for (i = 0; i < nr_pkts; i++) {
520 struct bnxt_sw_tx_bd *tx_buf;
521 struct sk_buff *skb;
522 int j, last;
523
524 tx_buf = &txr->tx_buf_ring[cons];
525 cons = NEXT_TX(cons);
526 skb = tx_buf->skb;
527 tx_buf->skb = NULL;
528
529 if (tx_buf->is_push) {
530 tx_buf->is_push = 0;
531 goto next_tx_int;
532 }
533
534 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
535 skb_headlen(skb), PCI_DMA_TODEVICE);
536 last = tx_buf->nr_frags;
537
538 for (j = 0; j < last; j++) {
539 cons = NEXT_TX(cons);
540 tx_buf = &txr->tx_buf_ring[cons];
541 dma_unmap_page(
542 &pdev->dev,
543 dma_unmap_addr(tx_buf, mapping),
544 skb_frag_size(&skb_shinfo(skb)->frags[j]),
545 PCI_DMA_TODEVICE);
546 }
547
548next_tx_int:
549 cons = NEXT_TX(cons);
550
551 tx_bytes += skb->len;
552 dev_kfree_skb_any(skb);
553 }
554
555 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
556 txr->tx_cons = cons;
557
558 /* Need to make the tx_cons update visible to bnxt_start_xmit()
559 * before checking for netif_tx_queue_stopped(). Without the
560 * memory barrier, there is a small possibility that bnxt_start_xmit()
561 * will miss it and cause the queue to be stopped forever.
562 */
563 smp_mb();
564
565 if (unlikely(netif_tx_queue_stopped(txq)) &&
566 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
567 __netif_tx_lock(txq, smp_processor_id());
568 if (netif_tx_queue_stopped(txq) &&
569 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
570 txr->dev_state != BNXT_DEV_STATE_CLOSING)
571 netif_tx_wake_queue(txq);
572 __netif_tx_unlock(txq);
573 }
574}
575
c61fb99c
MC
576static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
577 gfp_t gfp)
578{
579 struct device *dev = &bp->pdev->dev;
580 struct page *page;
581
582 page = alloc_page(gfp);
583 if (!page)
584 return NULL;
585
c519fe9a
SN
586 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
587 DMA_ATTR_WEAK_ORDERING);
c61fb99c
MC
588 if (dma_mapping_error(dev, *mapping)) {
589 __free_page(page);
590 return NULL;
591 }
592 *mapping += bp->rx_dma_offset;
593 return page;
594}
595
c0c050c5
MC
596static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
597 gfp_t gfp)
598{
599 u8 *data;
600 struct pci_dev *pdev = bp->pdev;
601
602 data = kmalloc(bp->rx_buf_size, gfp);
603 if (!data)
604 return NULL;
605
c519fe9a
SN
606 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
607 bp->rx_buf_use_size, bp->rx_dir,
608 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
609
610 if (dma_mapping_error(&pdev->dev, *mapping)) {
611 kfree(data);
612 data = NULL;
613 }
614 return data;
615}
616
38413406
MC
617int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
618 u16 prod, gfp_t gfp)
c0c050c5
MC
619{
620 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
621 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
c0c050c5
MC
622 dma_addr_t mapping;
623
c61fb99c
MC
624 if (BNXT_RX_PAGE_MODE(bp)) {
625 struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp);
c0c050c5 626
c61fb99c
MC
627 if (!page)
628 return -ENOMEM;
629
630 rx_buf->data = page;
631 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
632 } else {
633 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
634
635 if (!data)
636 return -ENOMEM;
637
638 rx_buf->data = data;
639 rx_buf->data_ptr = data + bp->rx_offset;
640 }
11cd119d 641 rx_buf->mapping = mapping;
c0c050c5
MC
642
643 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
c0c050c5
MC
644 return 0;
645}
646
c6d30e83 647void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
c0c050c5
MC
648{
649 u16 prod = rxr->rx_prod;
650 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
651 struct rx_bd *cons_bd, *prod_bd;
652
653 prod_rx_buf = &rxr->rx_buf_ring[prod];
654 cons_rx_buf = &rxr->rx_buf_ring[cons];
655
656 prod_rx_buf->data = data;
6bb19474 657 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
c0c050c5 658
11cd119d 659 prod_rx_buf->mapping = cons_rx_buf->mapping;
c0c050c5
MC
660
661 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
662 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
663
664 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
665}
666
667static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
668{
669 u16 next, max = rxr->rx_agg_bmap_size;
670
671 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
672 if (next >= max)
673 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
674 return next;
675}
676
677static inline int bnxt_alloc_rx_page(struct bnxt *bp,
678 struct bnxt_rx_ring_info *rxr,
679 u16 prod, gfp_t gfp)
680{
681 struct rx_bd *rxbd =
682 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
683 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
684 struct pci_dev *pdev = bp->pdev;
685 struct page *page;
686 dma_addr_t mapping;
687 u16 sw_prod = rxr->rx_sw_agg_prod;
89d0a06c 688 unsigned int offset = 0;
c0c050c5 689
89d0a06c
MC
690 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
691 page = rxr->rx_page;
692 if (!page) {
693 page = alloc_page(gfp);
694 if (!page)
695 return -ENOMEM;
696 rxr->rx_page = page;
697 rxr->rx_page_offset = 0;
698 }
699 offset = rxr->rx_page_offset;
700 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
701 if (rxr->rx_page_offset == PAGE_SIZE)
702 rxr->rx_page = NULL;
703 else
704 get_page(page);
705 } else {
706 page = alloc_page(gfp);
707 if (!page)
708 return -ENOMEM;
709 }
c0c050c5 710
c519fe9a
SN
711 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
712 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
713 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
714 if (dma_mapping_error(&pdev->dev, mapping)) {
715 __free_page(page);
716 return -EIO;
717 }
718
719 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
720 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
721
722 __set_bit(sw_prod, rxr->rx_agg_bmap);
723 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
724 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
725
726 rx_agg_buf->page = page;
89d0a06c 727 rx_agg_buf->offset = offset;
c0c050c5
MC
728 rx_agg_buf->mapping = mapping;
729 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
730 rxbd->rx_bd_opaque = sw_prod;
731 return 0;
732}
733
734static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
735 u32 agg_bufs)
736{
737 struct bnxt *bp = bnapi->bp;
738 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
b6ab4b01 739 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
740 u16 prod = rxr->rx_agg_prod;
741 u16 sw_prod = rxr->rx_sw_agg_prod;
742 u32 i;
743
744 for (i = 0; i < agg_bufs; i++) {
745 u16 cons;
746 struct rx_agg_cmp *agg;
747 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
748 struct rx_bd *prod_bd;
749 struct page *page;
750
751 agg = (struct rx_agg_cmp *)
752 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
753 cons = agg->rx_agg_cmp_opaque;
754 __clear_bit(cons, rxr->rx_agg_bmap);
755
756 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
757 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
758
759 __set_bit(sw_prod, rxr->rx_agg_bmap);
760 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
761 cons_rx_buf = &rxr->rx_agg_ring[cons];
762
763 /* It is possible for sw_prod to be equal to cons, so
764 * set cons_rx_buf->page to NULL first.
765 */
766 page = cons_rx_buf->page;
767 cons_rx_buf->page = NULL;
768 prod_rx_buf->page = page;
89d0a06c 769 prod_rx_buf->offset = cons_rx_buf->offset;
c0c050c5
MC
770
771 prod_rx_buf->mapping = cons_rx_buf->mapping;
772
773 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
774
775 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
776 prod_bd->rx_bd_opaque = sw_prod;
777
778 prod = NEXT_RX_AGG(prod);
779 sw_prod = NEXT_RX_AGG(sw_prod);
780 cp_cons = NEXT_CMP(cp_cons);
781 }
782 rxr->rx_agg_prod = prod;
783 rxr->rx_sw_agg_prod = sw_prod;
784}
785
c61fb99c
MC
786static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
787 struct bnxt_rx_ring_info *rxr,
788 u16 cons, void *data, u8 *data_ptr,
789 dma_addr_t dma_addr,
790 unsigned int offset_and_len)
791{
792 unsigned int payload = offset_and_len >> 16;
793 unsigned int len = offset_and_len & 0xffff;
794 struct skb_frag_struct *frag;
795 struct page *page = data;
796 u16 prod = rxr->rx_prod;
797 struct sk_buff *skb;
798 int off, err;
799
800 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
801 if (unlikely(err)) {
802 bnxt_reuse_rx_data(rxr, cons, data);
803 return NULL;
804 }
805 dma_addr -= bp->rx_dma_offset;
c519fe9a
SN
806 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
807 DMA_ATTR_WEAK_ORDERING);
c61fb99c
MC
808
809 if (unlikely(!payload))
810 payload = eth_get_headlen(data_ptr, len);
811
812 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
813 if (!skb) {
814 __free_page(page);
815 return NULL;
816 }
817
818 off = (void *)data_ptr - page_address(page);
819 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
820 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
821 payload + NET_IP_ALIGN);
822
823 frag = &skb_shinfo(skb)->frags[0];
824 skb_frag_size_sub(frag, payload);
825 frag->page_offset += payload;
826 skb->data_len -= payload;
827 skb->tail += payload;
828
829 return skb;
830}
831
c0c050c5
MC
832static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
833 struct bnxt_rx_ring_info *rxr, u16 cons,
6bb19474
MC
834 void *data, u8 *data_ptr,
835 dma_addr_t dma_addr,
836 unsigned int offset_and_len)
c0c050c5 837{
6bb19474 838 u16 prod = rxr->rx_prod;
c0c050c5 839 struct sk_buff *skb;
6bb19474 840 int err;
c0c050c5
MC
841
842 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
843 if (unlikely(err)) {
844 bnxt_reuse_rx_data(rxr, cons, data);
845 return NULL;
846 }
847
848 skb = build_skb(data, 0);
c519fe9a
SN
849 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
850 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
851 if (!skb) {
852 kfree(data);
853 return NULL;
854 }
855
b3dba77c 856 skb_reserve(skb, bp->rx_offset);
6bb19474 857 skb_put(skb, offset_and_len & 0xffff);
c0c050c5
MC
858 return skb;
859}
860
861static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
862 struct sk_buff *skb, u16 cp_cons,
863 u32 agg_bufs)
864{
865 struct pci_dev *pdev = bp->pdev;
866 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
b6ab4b01 867 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
868 u16 prod = rxr->rx_agg_prod;
869 u32 i;
870
871 for (i = 0; i < agg_bufs; i++) {
872 u16 cons, frag_len;
873 struct rx_agg_cmp *agg;
874 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
875 struct page *page;
876 dma_addr_t mapping;
877
878 agg = (struct rx_agg_cmp *)
879 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
880 cons = agg->rx_agg_cmp_opaque;
881 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
882 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
883
884 cons_rx_buf = &rxr->rx_agg_ring[cons];
89d0a06c
MC
885 skb_fill_page_desc(skb, i, cons_rx_buf->page,
886 cons_rx_buf->offset, frag_len);
c0c050c5
MC
887 __clear_bit(cons, rxr->rx_agg_bmap);
888
889 /* It is possible for bnxt_alloc_rx_page() to allocate
890 * a sw_prod index that equals the cons index, so we
891 * need to clear the cons entry now.
892 */
11cd119d 893 mapping = cons_rx_buf->mapping;
c0c050c5
MC
894 page = cons_rx_buf->page;
895 cons_rx_buf->page = NULL;
896
897 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
898 struct skb_shared_info *shinfo;
899 unsigned int nr_frags;
900
901 shinfo = skb_shinfo(skb);
902 nr_frags = --shinfo->nr_frags;
903 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
904
905 dev_kfree_skb(skb);
906
907 cons_rx_buf->page = page;
908
909 /* Update prod since possibly some pages have been
910 * allocated already.
911 */
912 rxr->rx_agg_prod = prod;
913 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
914 return NULL;
915 }
916
c519fe9a
SN
917 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
918 PCI_DMA_FROMDEVICE,
919 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
920
921 skb->data_len += frag_len;
922 skb->len += frag_len;
923 skb->truesize += PAGE_SIZE;
924
925 prod = NEXT_RX_AGG(prod);
926 cp_cons = NEXT_CMP(cp_cons);
927 }
928 rxr->rx_agg_prod = prod;
929 return skb;
930}
931
932static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
933 u8 agg_bufs, u32 *raw_cons)
934{
935 u16 last;
936 struct rx_agg_cmp *agg;
937
938 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
939 last = RING_CMP(*raw_cons);
940 agg = (struct rx_agg_cmp *)
941 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
942 return RX_AGG_CMP_VALID(agg, *raw_cons);
943}
944
945static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
946 unsigned int len,
947 dma_addr_t mapping)
948{
949 struct bnxt *bp = bnapi->bp;
950 struct pci_dev *pdev = bp->pdev;
951 struct sk_buff *skb;
952
953 skb = napi_alloc_skb(&bnapi->napi, len);
954 if (!skb)
955 return NULL;
956
745fc05c
MC
957 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
958 bp->rx_dir);
c0c050c5 959
6bb19474
MC
960 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
961 len + NET_IP_ALIGN);
c0c050c5 962
745fc05c
MC
963 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
964 bp->rx_dir);
c0c050c5
MC
965
966 skb_put(skb, len);
967 return skb;
968}
969
fa7e2812
MC
970static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
971 u32 *raw_cons, void *cmp)
972{
973 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
974 struct rx_cmp *rxcmp = cmp;
975 u32 tmp_raw_cons = *raw_cons;
976 u8 cmp_type, agg_bufs = 0;
977
978 cmp_type = RX_CMP_TYPE(rxcmp);
979
980 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
981 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
982 RX_CMP_AGG_BUFS) >>
983 RX_CMP_AGG_BUFS_SHIFT;
984 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
985 struct rx_tpa_end_cmp *tpa_end = cmp;
986
987 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
988 RX_TPA_END_CMP_AGG_BUFS) >>
989 RX_TPA_END_CMP_AGG_BUFS_SHIFT;
990 }
991
992 if (agg_bufs) {
993 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
994 return -EBUSY;
995 }
996 *raw_cons = tmp_raw_cons;
997 return 0;
998}
999
1000static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1001{
1002 if (!rxr->bnapi->in_reset) {
1003 rxr->bnapi->in_reset = true;
1004 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1005 schedule_work(&bp->sp_task);
1006 }
1007 rxr->rx_next_cons = 0xffff;
1008}
1009
c0c050c5
MC
1010static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1011 struct rx_tpa_start_cmp *tpa_start,
1012 struct rx_tpa_start_cmp_ext *tpa_start1)
1013{
1014 u8 agg_id = TPA_START_AGG_ID(tpa_start);
1015 u16 cons, prod;
1016 struct bnxt_tpa_info *tpa_info;
1017 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1018 struct rx_bd *prod_bd;
1019 dma_addr_t mapping;
1020
1021 cons = tpa_start->rx_tpa_start_cmp_opaque;
1022 prod = rxr->rx_prod;
1023 cons_rx_buf = &rxr->rx_buf_ring[cons];
1024 prod_rx_buf = &rxr->rx_buf_ring[prod];
1025 tpa_info = &rxr->rx_tpa[agg_id];
1026
fa7e2812
MC
1027 if (unlikely(cons != rxr->rx_next_cons)) {
1028 bnxt_sched_reset(bp, rxr);
1029 return;
1030 }
1031
c0c050c5 1032 prod_rx_buf->data = tpa_info->data;
6bb19474 1033 prod_rx_buf->data_ptr = tpa_info->data_ptr;
c0c050c5
MC
1034
1035 mapping = tpa_info->mapping;
11cd119d 1036 prod_rx_buf->mapping = mapping;
c0c050c5
MC
1037
1038 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1039
1040 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1041
1042 tpa_info->data = cons_rx_buf->data;
6bb19474 1043 tpa_info->data_ptr = cons_rx_buf->data_ptr;
c0c050c5 1044 cons_rx_buf->data = NULL;
11cd119d 1045 tpa_info->mapping = cons_rx_buf->mapping;
c0c050c5
MC
1046
1047 tpa_info->len =
1048 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1049 RX_TPA_START_CMP_LEN_SHIFT;
1050 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1051 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1052
1053 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1054 tpa_info->gso_type = SKB_GSO_TCPV4;
1055 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1056 if (hash_type == 3)
1057 tpa_info->gso_type = SKB_GSO_TCPV6;
1058 tpa_info->rss_hash =
1059 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1060 } else {
1061 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1062 tpa_info->gso_type = 0;
1063 if (netif_msg_rx_err(bp))
1064 netdev_warn(bp->dev, "TPA packet without valid hash\n");
1065 }
1066 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1067 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
94758f8d 1068 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
c0c050c5
MC
1069
1070 rxr->rx_prod = NEXT_RX(prod);
1071 cons = NEXT_RX(cons);
376a5b86 1072 rxr->rx_next_cons = NEXT_RX(cons);
c0c050c5
MC
1073 cons_rx_buf = &rxr->rx_buf_ring[cons];
1074
1075 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1076 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1077 cons_rx_buf->data = NULL;
1078}
1079
1080static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
1081 u16 cp_cons, u32 agg_bufs)
1082{
1083 if (agg_bufs)
1084 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1085}
1086
94758f8d
MC
1087static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1088 int payload_off, int tcp_ts,
1089 struct sk_buff *skb)
1090{
1091#ifdef CONFIG_INET
1092 struct tcphdr *th;
1093 int len, nw_off;
1094 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1095 u32 hdr_info = tpa_info->hdr_info;
1096 bool loopback = false;
1097
1098 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1099 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1100 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1101
1102 /* If the packet is an internal loopback packet, the offsets will
1103 * have an extra 4 bytes.
1104 */
1105 if (inner_mac_off == 4) {
1106 loopback = true;
1107 } else if (inner_mac_off > 4) {
1108 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1109 ETH_HLEN - 2));
1110
1111 /* We only support inner iPv4/ipv6. If we don't see the
1112 * correct protocol ID, it must be a loopback packet where
1113 * the offsets are off by 4.
1114 */
09a7636a 1115 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
94758f8d
MC
1116 loopback = true;
1117 }
1118 if (loopback) {
1119 /* internal loopback packet, subtract all offsets by 4 */
1120 inner_ip_off -= 4;
1121 inner_mac_off -= 4;
1122 outer_ip_off -= 4;
1123 }
1124
1125 nw_off = inner_ip_off - ETH_HLEN;
1126 skb_set_network_header(skb, nw_off);
1127 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1128 struct ipv6hdr *iph = ipv6_hdr(skb);
1129
1130 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1131 len = skb->len - skb_transport_offset(skb);
1132 th = tcp_hdr(skb);
1133 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1134 } else {
1135 struct iphdr *iph = ip_hdr(skb);
1136
1137 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1138 len = skb->len - skb_transport_offset(skb);
1139 th = tcp_hdr(skb);
1140 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1141 }
1142
1143 if (inner_mac_off) { /* tunnel */
1144 struct udphdr *uh = NULL;
1145 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1146 ETH_HLEN - 2));
1147
1148 if (proto == htons(ETH_P_IP)) {
1149 struct iphdr *iph = (struct iphdr *)skb->data;
1150
1151 if (iph->protocol == IPPROTO_UDP)
1152 uh = (struct udphdr *)(iph + 1);
1153 } else {
1154 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1155
1156 if (iph->nexthdr == IPPROTO_UDP)
1157 uh = (struct udphdr *)(iph + 1);
1158 }
1159 if (uh) {
1160 if (uh->check)
1161 skb_shinfo(skb)->gso_type |=
1162 SKB_GSO_UDP_TUNNEL_CSUM;
1163 else
1164 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1165 }
1166 }
1167#endif
1168 return skb;
1169}
1170
c0c050c5
MC
1171#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1172#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1173
309369c9
MC
1174static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1175 int payload_off, int tcp_ts,
c0c050c5
MC
1176 struct sk_buff *skb)
1177{
d1611c3a 1178#ifdef CONFIG_INET
c0c050c5 1179 struct tcphdr *th;
719ca811 1180 int len, nw_off, tcp_opt_len = 0;
27e24189 1181
309369c9 1182 if (tcp_ts)
c0c050c5
MC
1183 tcp_opt_len = 12;
1184
c0c050c5
MC
1185 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1186 struct iphdr *iph;
1187
1188 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1189 ETH_HLEN;
1190 skb_set_network_header(skb, nw_off);
1191 iph = ip_hdr(skb);
1192 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1193 len = skb->len - skb_transport_offset(skb);
1194 th = tcp_hdr(skb);
1195 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1196 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1197 struct ipv6hdr *iph;
1198
1199 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1200 ETH_HLEN;
1201 skb_set_network_header(skb, nw_off);
1202 iph = ipv6_hdr(skb);
1203 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1204 len = skb->len - skb_transport_offset(skb);
1205 th = tcp_hdr(skb);
1206 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1207 } else {
1208 dev_kfree_skb_any(skb);
1209 return NULL;
1210 }
c0c050c5
MC
1211
1212 if (nw_off) { /* tunnel */
1213 struct udphdr *uh = NULL;
1214
1215 if (skb->protocol == htons(ETH_P_IP)) {
1216 struct iphdr *iph = (struct iphdr *)skb->data;
1217
1218 if (iph->protocol == IPPROTO_UDP)
1219 uh = (struct udphdr *)(iph + 1);
1220 } else {
1221 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1222
1223 if (iph->nexthdr == IPPROTO_UDP)
1224 uh = (struct udphdr *)(iph + 1);
1225 }
1226 if (uh) {
1227 if (uh->check)
1228 skb_shinfo(skb)->gso_type |=
1229 SKB_GSO_UDP_TUNNEL_CSUM;
1230 else
1231 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1232 }
1233 }
1234#endif
1235 return skb;
1236}
1237
309369c9
MC
1238static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1239 struct bnxt_tpa_info *tpa_info,
1240 struct rx_tpa_end_cmp *tpa_end,
1241 struct rx_tpa_end_cmp_ext *tpa_end1,
1242 struct sk_buff *skb)
1243{
1244#ifdef CONFIG_INET
1245 int payload_off;
1246 u16 segs;
1247
1248 segs = TPA_END_TPA_SEGS(tpa_end);
1249 if (segs == 1)
1250 return skb;
1251
1252 NAPI_GRO_CB(skb)->count = segs;
1253 skb_shinfo(skb)->gso_size =
1254 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1255 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1256 payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1257 RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
1258 RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
1259 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
5910906c
MC
1260 if (likely(skb))
1261 tcp_gro_complete(skb);
309369c9
MC
1262#endif
1263 return skb;
1264}
1265
c0c050c5
MC
1266static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1267 struct bnxt_napi *bnapi,
1268 u32 *raw_cons,
1269 struct rx_tpa_end_cmp *tpa_end,
1270 struct rx_tpa_end_cmp_ext *tpa_end1,
4e5dbbda 1271 u8 *event)
c0c050c5
MC
1272{
1273 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
b6ab4b01 1274 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5 1275 u8 agg_id = TPA_END_AGG_ID(tpa_end);
6bb19474 1276 u8 *data_ptr, agg_bufs;
c0c050c5
MC
1277 u16 cp_cons = RING_CMP(*raw_cons);
1278 unsigned int len;
1279 struct bnxt_tpa_info *tpa_info;
1280 dma_addr_t mapping;
1281 struct sk_buff *skb;
6bb19474 1282 void *data;
c0c050c5 1283
fa7e2812
MC
1284 if (unlikely(bnapi->in_reset)) {
1285 int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
1286
1287 if (rc < 0)
1288 return ERR_PTR(-EBUSY);
1289 return NULL;
1290 }
1291
c0c050c5
MC
1292 tpa_info = &rxr->rx_tpa[agg_id];
1293 data = tpa_info->data;
6bb19474
MC
1294 data_ptr = tpa_info->data_ptr;
1295 prefetch(data_ptr);
c0c050c5
MC
1296 len = tpa_info->len;
1297 mapping = tpa_info->mapping;
1298
1299 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1300 RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1301
1302 if (agg_bufs) {
1303 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1304 return ERR_PTR(-EBUSY);
1305
4e5dbbda 1306 *event |= BNXT_AGG_EVENT;
c0c050c5
MC
1307 cp_cons = NEXT_CMP(cp_cons);
1308 }
1309
1310 if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
1311 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1312 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1313 agg_bufs, (int)MAX_SKB_FRAGS);
1314 return NULL;
1315 }
1316
1317 if (len <= bp->rx_copy_thresh) {
6bb19474 1318 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
c0c050c5
MC
1319 if (!skb) {
1320 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1321 return NULL;
1322 }
1323 } else {
1324 u8 *new_data;
1325 dma_addr_t new_mapping;
1326
1327 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1328 if (!new_data) {
1329 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1330 return NULL;
1331 }
1332
1333 tpa_info->data = new_data;
b3dba77c 1334 tpa_info->data_ptr = new_data + bp->rx_offset;
c0c050c5
MC
1335 tpa_info->mapping = new_mapping;
1336
1337 skb = build_skb(data, 0);
c519fe9a
SN
1338 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1339 bp->rx_buf_use_size, bp->rx_dir,
1340 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
1341
1342 if (!skb) {
1343 kfree(data);
1344 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1345 return NULL;
1346 }
b3dba77c 1347 skb_reserve(skb, bp->rx_offset);
c0c050c5
MC
1348 skb_put(skb, len);
1349 }
1350
1351 if (agg_bufs) {
1352 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1353 if (!skb) {
1354 /* Page reuse already handled by bnxt_rx_pages(). */
1355 return NULL;
1356 }
1357 }
1358 skb->protocol = eth_type_trans(skb, bp->dev);
1359
1360 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1361 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1362
8852ddb4
MC
1363 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1364 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
c0c050c5
MC
1365 u16 vlan_proto = tpa_info->metadata >>
1366 RX_CMP_FLAGS2_METADATA_TPID_SFT;
8852ddb4 1367 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
c0c050c5 1368
8852ddb4 1369 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
c0c050c5
MC
1370 }
1371
1372 skb_checksum_none_assert(skb);
1373 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1374 skb->ip_summed = CHECKSUM_UNNECESSARY;
1375 skb->csum_level =
1376 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1377 }
1378
1379 if (TPA_END_GRO(tpa_end))
309369c9 1380 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
c0c050c5
MC
1381
1382 return skb;
1383}
1384
1385/* returns the following:
1386 * 1 - 1 packet successfully received
1387 * 0 - successful TPA_START, packet not completed yet
1388 * -EBUSY - completion ring does not have all the agg buffers yet
1389 * -ENOMEM - packet aborted due to out of memory
1390 * -EIO - packet aborted due to hw error indicated in BD
1391 */
1392static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
4e5dbbda 1393 u8 *event)
c0c050c5
MC
1394{
1395 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
b6ab4b01 1396 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5
MC
1397 struct net_device *dev = bp->dev;
1398 struct rx_cmp *rxcmp;
1399 struct rx_cmp_ext *rxcmp1;
1400 u32 tmp_raw_cons = *raw_cons;
1401 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1402 struct bnxt_sw_rx_bd *rx_buf;
1403 unsigned int len;
6bb19474 1404 u8 *data_ptr, agg_bufs, cmp_type;
c0c050c5
MC
1405 dma_addr_t dma_addr;
1406 struct sk_buff *skb;
6bb19474 1407 void *data;
c0c050c5 1408 int rc = 0;
c61fb99c 1409 u32 misc;
c0c050c5
MC
1410
1411 rxcmp = (struct rx_cmp *)
1412 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1413
1414 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1415 cp_cons = RING_CMP(tmp_raw_cons);
1416 rxcmp1 = (struct rx_cmp_ext *)
1417 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1418
1419 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1420 return -EBUSY;
1421
1422 cmp_type = RX_CMP_TYPE(rxcmp);
1423
1424 prod = rxr->rx_prod;
1425
1426 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1427 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1428 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1429
4e5dbbda 1430 *event |= BNXT_RX_EVENT;
c0c050c5
MC
1431 goto next_rx_no_prod;
1432
1433 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1434 skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
1435 (struct rx_tpa_end_cmp *)rxcmp,
4e5dbbda 1436 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
c0c050c5
MC
1437
1438 if (unlikely(IS_ERR(skb)))
1439 return -EBUSY;
1440
1441 rc = -ENOMEM;
1442 if (likely(skb)) {
1443 skb_record_rx_queue(skb, bnapi->index);
b356a2e7 1444 napi_gro_receive(&bnapi->napi, skb);
c0c050c5
MC
1445 rc = 1;
1446 }
4e5dbbda 1447 *event |= BNXT_RX_EVENT;
c0c050c5
MC
1448 goto next_rx_no_prod;
1449 }
1450
1451 cons = rxcmp->rx_cmp_opaque;
1452 rx_buf = &rxr->rx_buf_ring[cons];
1453 data = rx_buf->data;
6bb19474 1454 data_ptr = rx_buf->data_ptr;
fa7e2812
MC
1455 if (unlikely(cons != rxr->rx_next_cons)) {
1456 int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
1457
1458 bnxt_sched_reset(bp, rxr);
1459 return rc1;
1460 }
6bb19474 1461 prefetch(data_ptr);
c0c050c5 1462
c61fb99c
MC
1463 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1464 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
c0c050c5
MC
1465
1466 if (agg_bufs) {
1467 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1468 return -EBUSY;
1469
1470 cp_cons = NEXT_CMP(cp_cons);
4e5dbbda 1471 *event |= BNXT_AGG_EVENT;
c0c050c5 1472 }
4e5dbbda 1473 *event |= BNXT_RX_EVENT;
c0c050c5
MC
1474
1475 rx_buf->data = NULL;
1476 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1477 bnxt_reuse_rx_data(rxr, cons, data);
1478 if (agg_bufs)
1479 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1480
1481 rc = -EIO;
1482 goto next_rx;
1483 }
1484
1485 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
11cd119d 1486 dma_addr = rx_buf->mapping;
c0c050c5 1487
c6d30e83
MC
1488 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1489 rc = 1;
1490 goto next_rx;
1491 }
1492
c0c050c5 1493 if (len <= bp->rx_copy_thresh) {
6bb19474 1494 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
c0c050c5
MC
1495 bnxt_reuse_rx_data(rxr, cons, data);
1496 if (!skb) {
1497 rc = -ENOMEM;
1498 goto next_rx;
1499 }
1500 } else {
c61fb99c
MC
1501 u32 payload;
1502
c6d30e83
MC
1503 if (rx_buf->data_ptr == data_ptr)
1504 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1505 else
1506 payload = 0;
6bb19474 1507 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
c61fb99c 1508 payload | len);
c0c050c5
MC
1509 if (!skb) {
1510 rc = -ENOMEM;
1511 goto next_rx;
1512 }
1513 }
1514
1515 if (agg_bufs) {
1516 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1517 if (!skb) {
1518 rc = -ENOMEM;
1519 goto next_rx;
1520 }
1521 }
1522
1523 if (RX_CMP_HASH_VALID(rxcmp)) {
1524 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1525 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1526
1527 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1528 if (hash_type != 1 && hash_type != 3)
1529 type = PKT_HASH_TYPE_L3;
1530 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1531 }
1532
1533 skb->protocol = eth_type_trans(skb, dev);
1534
8852ddb4
MC
1535 if ((rxcmp1->rx_cmp_flags2 &
1536 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1537 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
c0c050c5 1538 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
8852ddb4 1539 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
c0c050c5
MC
1540 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1541
8852ddb4 1542 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
c0c050c5
MC
1543 }
1544
1545 skb_checksum_none_assert(skb);
1546 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1547 if (dev->features & NETIF_F_RXCSUM) {
1548 skb->ip_summed = CHECKSUM_UNNECESSARY;
1549 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1550 }
1551 } else {
665e350d
SB
1552 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1553 if (dev->features & NETIF_F_RXCSUM)
1554 cpr->rx_l4_csum_errors++;
1555 }
c0c050c5
MC
1556 }
1557
1558 skb_record_rx_queue(skb, bnapi->index);
b356a2e7 1559 napi_gro_receive(&bnapi->napi, skb);
c0c050c5
MC
1560 rc = 1;
1561
1562next_rx:
1563 rxr->rx_prod = NEXT_RX(prod);
376a5b86 1564 rxr->rx_next_cons = NEXT_RX(cons);
c0c050c5
MC
1565
1566next_rx_no_prod:
1567 *raw_cons = tmp_raw_cons;
1568
1569 return rc;
1570}
1571
4bb13abf 1572#define BNXT_GET_EVENT_PORT(data) \
87c374de
MC
1573 ((data) & \
1574 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
4bb13abf 1575
c0c050c5
MC
1576static int bnxt_async_event_process(struct bnxt *bp,
1577 struct hwrm_async_event_cmpl *cmpl)
1578{
1579 u16 event_id = le16_to_cpu(cmpl->event_id);
1580
1581 /* TODO CHIMP_FW: Define event id's for link change, error etc */
1582 switch (event_id) {
87c374de 1583 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
8cbde117
MC
1584 u32 data1 = le32_to_cpu(cmpl->event_data1);
1585 struct bnxt_link_info *link_info = &bp->link_info;
1586
1587 if (BNXT_VF(bp))
1588 goto async_event_process_exit;
1589 if (data1 & 0x20000) {
1590 u16 fw_speed = link_info->force_link_speed;
1591 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1592
1593 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1594 speed);
1595 }
286ef9d6 1596 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
8cbde117
MC
1597 /* fall thru */
1598 }
87c374de 1599 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
c0c050c5 1600 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
19241368 1601 break;
87c374de 1602 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
19241368 1603 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
c0c050c5 1604 break;
87c374de 1605 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
4bb13abf
MC
1606 u32 data1 = le32_to_cpu(cmpl->event_data1);
1607 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1608
1609 if (BNXT_VF(bp))
1610 break;
1611
1612 if (bp->pf.port_id != port_id)
1613 break;
1614
4bb13abf
MC
1615 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1616 break;
1617 }
87c374de 1618 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
fc0f1929
MC
1619 if (BNXT_PF(bp))
1620 goto async_event_process_exit;
1621 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1622 break;
c0c050c5 1623 default:
19241368 1624 goto async_event_process_exit;
c0c050c5 1625 }
19241368
JH
1626 schedule_work(&bp->sp_task);
1627async_event_process_exit:
a588e458 1628 bnxt_ulp_async_events(bp, cmpl);
c0c050c5
MC
1629 return 0;
1630}
1631
1632static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1633{
1634 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1635 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1636 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1637 (struct hwrm_fwd_req_cmpl *)txcmp;
1638
1639 switch (cmpl_type) {
1640 case CMPL_BASE_TYPE_HWRM_DONE:
1641 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1642 if (seq_id == bp->hwrm_intr_seq_id)
1643 bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
1644 else
1645 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1646 break;
1647
1648 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1649 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1650
1651 if ((vf_id < bp->pf.first_vf_id) ||
1652 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1653 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1654 vf_id);
1655 return -EINVAL;
1656 }
1657
1658 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1659 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1660 schedule_work(&bp->sp_task);
1661 break;
1662
1663 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1664 bnxt_async_event_process(bp,
1665 (struct hwrm_async_event_cmpl *)txcmp);
1666
1667 default:
1668 break;
1669 }
1670
1671 return 0;
1672}
1673
1674static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1675{
1676 struct bnxt_napi *bnapi = dev_instance;
1677 struct bnxt *bp = bnapi->bp;
1678 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1679 u32 cons = RING_CMP(cpr->cp_raw_cons);
1680
1681 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1682 napi_schedule(&bnapi->napi);
1683 return IRQ_HANDLED;
1684}
1685
1686static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1687{
1688 u32 raw_cons = cpr->cp_raw_cons;
1689 u16 cons = RING_CMP(raw_cons);
1690 struct tx_cmp *txcmp;
1691
1692 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1693
1694 return TX_CMP_VALID(txcmp, raw_cons);
1695}
1696
c0c050c5
MC
1697static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1698{
1699 struct bnxt_napi *bnapi = dev_instance;
1700 struct bnxt *bp = bnapi->bp;
1701 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1702 u32 cons = RING_CMP(cpr->cp_raw_cons);
1703 u32 int_status;
1704
1705 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1706
1707 if (!bnxt_has_work(bp, cpr)) {
11809490 1708 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
c0c050c5
MC
1709 /* return if erroneous interrupt */
1710 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1711 return IRQ_NONE;
1712 }
1713
1714 /* disable ring IRQ */
1715 BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
1716
1717 /* Return here if interrupt is shared and is disabled. */
1718 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1719 return IRQ_HANDLED;
1720
1721 napi_schedule(&bnapi->napi);
1722 return IRQ_HANDLED;
1723}
1724
1725static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1726{
1727 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1728 u32 raw_cons = cpr->cp_raw_cons;
1729 u32 cons;
1730 int tx_pkts = 0;
1731 int rx_pkts = 0;
4e5dbbda 1732 u8 event = 0;
c0c050c5
MC
1733 struct tx_cmp *txcmp;
1734
1735 while (1) {
1736 int rc;
1737
1738 cons = RING_CMP(raw_cons);
1739 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1740
1741 if (!TX_CMP_VALID(txcmp, raw_cons))
1742 break;
1743
67a95e20
MC
1744 /* The valid test of the entry must be done first before
1745 * reading any further.
1746 */
b67daab0 1747 dma_rmb();
c0c050c5
MC
1748 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1749 tx_pkts++;
1750 /* return full budget so NAPI will complete. */
1751 if (unlikely(tx_pkts > bp->tx_wake_thresh))
1752 rx_pkts = budget;
1753 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
4e5dbbda 1754 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
c0c050c5
MC
1755 if (likely(rc >= 0))
1756 rx_pkts += rc;
1757 else if (rc == -EBUSY) /* partial completion */
1758 break;
c0c050c5
MC
1759 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
1760 CMPL_BASE_TYPE_HWRM_DONE) ||
1761 (TX_CMP_TYPE(txcmp) ==
1762 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1763 (TX_CMP_TYPE(txcmp) ==
1764 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1765 bnxt_hwrm_handler(bp, txcmp);
1766 }
1767 raw_cons = NEXT_RAW_CMP(raw_cons);
1768
1769 if (rx_pkts == budget)
1770 break;
1771 }
1772
38413406
MC
1773 if (event & BNXT_TX_EVENT) {
1774 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
1775 void __iomem *db = txr->tx_doorbell;
1776 u16 prod = txr->tx_prod;
1777
1778 /* Sync BD data before updating doorbell */
1779 wmb();
1780
434c975a 1781 bnxt_db_write(bp, db, DB_KEY_TX | prod);
38413406
MC
1782 }
1783
c0c050c5
MC
1784 cpr->cp_raw_cons = raw_cons;
1785 /* ACK completion ring before freeing tx ring and producing new
1786 * buffers in rx/agg rings to prevent overflowing the completion
1787 * ring.
1788 */
1789 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1790
1791 if (tx_pkts)
fa3e93e8 1792 bnapi->tx_int(bp, bnapi, tx_pkts);
c0c050c5 1793
4e5dbbda 1794 if (event & BNXT_RX_EVENT) {
b6ab4b01 1795 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
c0c050c5 1796
434c975a
MC
1797 bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod);
1798 if (event & BNXT_AGG_EVENT)
1799 bnxt_db_write(bp, rxr->rx_agg_doorbell,
1800 DB_KEY_RX | rxr->rx_agg_prod);
c0c050c5
MC
1801 }
1802 return rx_pkts;
1803}
1804
10bbdaf5
PS
1805static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
1806{
1807 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1808 struct bnxt *bp = bnapi->bp;
1809 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1810 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1811 struct tx_cmp *txcmp;
1812 struct rx_cmp_ext *rxcmp1;
1813 u32 cp_cons, tmp_raw_cons;
1814 u32 raw_cons = cpr->cp_raw_cons;
1815 u32 rx_pkts = 0;
4e5dbbda 1816 u8 event = 0;
10bbdaf5
PS
1817
1818 while (1) {
1819 int rc;
1820
1821 cp_cons = RING_CMP(raw_cons);
1822 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1823
1824 if (!TX_CMP_VALID(txcmp, raw_cons))
1825 break;
1826
1827 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1828 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
1829 cp_cons = RING_CMP(tmp_raw_cons);
1830 rxcmp1 = (struct rx_cmp_ext *)
1831 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1832
1833 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1834 break;
1835
1836 /* force an error to recycle the buffer */
1837 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1838 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1839
4e5dbbda 1840 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
10bbdaf5
PS
1841 if (likely(rc == -EIO))
1842 rx_pkts++;
1843 else if (rc == -EBUSY) /* partial completion */
1844 break;
1845 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
1846 CMPL_BASE_TYPE_HWRM_DONE)) {
1847 bnxt_hwrm_handler(bp, txcmp);
1848 } else {
1849 netdev_err(bp->dev,
1850 "Invalid completion received on special ring\n");
1851 }
1852 raw_cons = NEXT_RAW_CMP(raw_cons);
1853
1854 if (rx_pkts == budget)
1855 break;
1856 }
1857
1858 cpr->cp_raw_cons = raw_cons;
1859 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
434c975a 1860 bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod);
10bbdaf5 1861
434c975a
MC
1862 if (event & BNXT_AGG_EVENT)
1863 bnxt_db_write(bp, rxr->rx_agg_doorbell,
1864 DB_KEY_RX | rxr->rx_agg_prod);
10bbdaf5
PS
1865
1866 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
6ad20165 1867 napi_complete_done(napi, rx_pkts);
10bbdaf5
PS
1868 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1869 }
1870 return rx_pkts;
1871}
1872
c0c050c5
MC
1873static int bnxt_poll(struct napi_struct *napi, int budget)
1874{
1875 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1876 struct bnxt *bp = bnapi->bp;
1877 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1878 int work_done = 0;
1879
c0c050c5
MC
1880 while (1) {
1881 work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
1882
1883 if (work_done >= budget)
1884 break;
1885
1886 if (!bnxt_has_work(bp, cpr)) {
e7b95691
MC
1887 if (napi_complete_done(napi, work_done))
1888 BNXT_CP_DB_REARM(cpr->cp_doorbell,
1889 cpr->cp_raw_cons);
c0c050c5
MC
1890 break;
1891 }
1892 }
1893 mmiowb();
c0c050c5
MC
1894 return work_done;
1895}
1896
c0c050c5
MC
1897static void bnxt_free_tx_skbs(struct bnxt *bp)
1898{
1899 int i, max_idx;
1900 struct pci_dev *pdev = bp->pdev;
1901
b6ab4b01 1902 if (!bp->tx_ring)
c0c050c5
MC
1903 return;
1904
1905 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
1906 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 1907 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
1908 int j;
1909
c0c050c5
MC
1910 for (j = 0; j < max_idx;) {
1911 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
1912 struct sk_buff *skb = tx_buf->skb;
1913 int k, last;
1914
1915 if (!skb) {
1916 j++;
1917 continue;
1918 }
1919
1920 tx_buf->skb = NULL;
1921
1922 if (tx_buf->is_push) {
1923 dev_kfree_skb(skb);
1924 j += 2;
1925 continue;
1926 }
1927
1928 dma_unmap_single(&pdev->dev,
1929 dma_unmap_addr(tx_buf, mapping),
1930 skb_headlen(skb),
1931 PCI_DMA_TODEVICE);
1932
1933 last = tx_buf->nr_frags;
1934 j += 2;
d612a579
MC
1935 for (k = 0; k < last; k++, j++) {
1936 int ring_idx = j & bp->tx_ring_mask;
c0c050c5
MC
1937 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
1938
d612a579 1939 tx_buf = &txr->tx_buf_ring[ring_idx];
c0c050c5
MC
1940 dma_unmap_page(
1941 &pdev->dev,
1942 dma_unmap_addr(tx_buf, mapping),
1943 skb_frag_size(frag), PCI_DMA_TODEVICE);
1944 }
1945 dev_kfree_skb(skb);
1946 }
1947 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
1948 }
1949}
1950
1951static void bnxt_free_rx_skbs(struct bnxt *bp)
1952{
1953 int i, max_idx, max_agg_idx;
1954 struct pci_dev *pdev = bp->pdev;
1955
b6ab4b01 1956 if (!bp->rx_ring)
c0c050c5
MC
1957 return;
1958
1959 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
1960 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
1961 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 1962 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
1963 int j;
1964
c0c050c5
MC
1965 if (rxr->rx_tpa) {
1966 for (j = 0; j < MAX_TPA; j++) {
1967 struct bnxt_tpa_info *tpa_info =
1968 &rxr->rx_tpa[j];
1969 u8 *data = tpa_info->data;
1970
1971 if (!data)
1972 continue;
1973
c519fe9a
SN
1974 dma_unmap_single_attrs(&pdev->dev,
1975 tpa_info->mapping,
1976 bp->rx_buf_use_size,
1977 bp->rx_dir,
1978 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
1979
1980 tpa_info->data = NULL;
1981
1982 kfree(data);
1983 }
1984 }
1985
1986 for (j = 0; j < max_idx; j++) {
1987 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
3ed3a83e 1988 dma_addr_t mapping = rx_buf->mapping;
6bb19474 1989 void *data = rx_buf->data;
c0c050c5
MC
1990
1991 if (!data)
1992 continue;
1993
c0c050c5
MC
1994 rx_buf->data = NULL;
1995
3ed3a83e
MC
1996 if (BNXT_RX_PAGE_MODE(bp)) {
1997 mapping -= bp->rx_dma_offset;
c519fe9a
SN
1998 dma_unmap_page_attrs(&pdev->dev, mapping,
1999 PAGE_SIZE, bp->rx_dir,
2000 DMA_ATTR_WEAK_ORDERING);
c61fb99c 2001 __free_page(data);
3ed3a83e 2002 } else {
c519fe9a
SN
2003 dma_unmap_single_attrs(&pdev->dev, mapping,
2004 bp->rx_buf_use_size,
2005 bp->rx_dir,
2006 DMA_ATTR_WEAK_ORDERING);
c61fb99c 2007 kfree(data);
3ed3a83e 2008 }
c0c050c5
MC
2009 }
2010
2011 for (j = 0; j < max_agg_idx; j++) {
2012 struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2013 &rxr->rx_agg_ring[j];
2014 struct page *page = rx_agg_buf->page;
2015
2016 if (!page)
2017 continue;
2018
c519fe9a
SN
2019 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2020 BNXT_RX_PAGE_SIZE,
2021 PCI_DMA_FROMDEVICE,
2022 DMA_ATTR_WEAK_ORDERING);
c0c050c5
MC
2023
2024 rx_agg_buf->page = NULL;
2025 __clear_bit(j, rxr->rx_agg_bmap);
2026
2027 __free_page(page);
2028 }
89d0a06c
MC
2029 if (rxr->rx_page) {
2030 __free_page(rxr->rx_page);
2031 rxr->rx_page = NULL;
2032 }
c0c050c5
MC
2033 }
2034}
2035
2036static void bnxt_free_skbs(struct bnxt *bp)
2037{
2038 bnxt_free_tx_skbs(bp);
2039 bnxt_free_rx_skbs(bp);
2040}
2041
2042static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
2043{
2044 struct pci_dev *pdev = bp->pdev;
2045 int i;
2046
2047 for (i = 0; i < ring->nr_pages; i++) {
2048 if (!ring->pg_arr[i])
2049 continue;
2050
2051 dma_free_coherent(&pdev->dev, ring->page_size,
2052 ring->pg_arr[i], ring->dma_arr[i]);
2053
2054 ring->pg_arr[i] = NULL;
2055 }
2056 if (ring->pg_tbl) {
2057 dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
2058 ring->pg_tbl, ring->pg_tbl_map);
2059 ring->pg_tbl = NULL;
2060 }
2061 if (ring->vmem_size && *ring->vmem) {
2062 vfree(*ring->vmem);
2063 *ring->vmem = NULL;
2064 }
2065}
2066
2067static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
2068{
2069 int i;
2070 struct pci_dev *pdev = bp->pdev;
2071
2072 if (ring->nr_pages > 1) {
2073 ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
2074 ring->nr_pages * 8,
2075 &ring->pg_tbl_map,
2076 GFP_KERNEL);
2077 if (!ring->pg_tbl)
2078 return -ENOMEM;
2079 }
2080
2081 for (i = 0; i < ring->nr_pages; i++) {
2082 ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2083 ring->page_size,
2084 &ring->dma_arr[i],
2085 GFP_KERNEL);
2086 if (!ring->pg_arr[i])
2087 return -ENOMEM;
2088
2089 if (ring->nr_pages > 1)
2090 ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
2091 }
2092
2093 if (ring->vmem_size) {
2094 *ring->vmem = vzalloc(ring->vmem_size);
2095 if (!(*ring->vmem))
2096 return -ENOMEM;
2097 }
2098 return 0;
2099}
2100
2101static void bnxt_free_rx_rings(struct bnxt *bp)
2102{
2103 int i;
2104
b6ab4b01 2105 if (!bp->rx_ring)
c0c050c5
MC
2106 return;
2107
2108 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 2109 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
2110 struct bnxt_ring_struct *ring;
2111
c6d30e83
MC
2112 if (rxr->xdp_prog)
2113 bpf_prog_put(rxr->xdp_prog);
2114
c0c050c5
MC
2115 kfree(rxr->rx_tpa);
2116 rxr->rx_tpa = NULL;
2117
2118 kfree(rxr->rx_agg_bmap);
2119 rxr->rx_agg_bmap = NULL;
2120
2121 ring = &rxr->rx_ring_struct;
2122 bnxt_free_ring(bp, ring);
2123
2124 ring = &rxr->rx_agg_ring_struct;
2125 bnxt_free_ring(bp, ring);
2126 }
2127}
2128
2129static int bnxt_alloc_rx_rings(struct bnxt *bp)
2130{
2131 int i, rc, agg_rings = 0, tpa_rings = 0;
2132
b6ab4b01
MC
2133 if (!bp->rx_ring)
2134 return -ENOMEM;
2135
c0c050c5
MC
2136 if (bp->flags & BNXT_FLAG_AGG_RINGS)
2137 agg_rings = 1;
2138
2139 if (bp->flags & BNXT_FLAG_TPA)
2140 tpa_rings = 1;
2141
2142 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 2143 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
2144 struct bnxt_ring_struct *ring;
2145
c0c050c5
MC
2146 ring = &rxr->rx_ring_struct;
2147
2148 rc = bnxt_alloc_ring(bp, ring);
2149 if (rc)
2150 return rc;
2151
2152 if (agg_rings) {
2153 u16 mem_size;
2154
2155 ring = &rxr->rx_agg_ring_struct;
2156 rc = bnxt_alloc_ring(bp, ring);
2157 if (rc)
2158 return rc;
2159
2160 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2161 mem_size = rxr->rx_agg_bmap_size / 8;
2162 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2163 if (!rxr->rx_agg_bmap)
2164 return -ENOMEM;
2165
2166 if (tpa_rings) {
2167 rxr->rx_tpa = kcalloc(MAX_TPA,
2168 sizeof(struct bnxt_tpa_info),
2169 GFP_KERNEL);
2170 if (!rxr->rx_tpa)
2171 return -ENOMEM;
2172 }
2173 }
2174 }
2175 return 0;
2176}
2177
2178static void bnxt_free_tx_rings(struct bnxt *bp)
2179{
2180 int i;
2181 struct pci_dev *pdev = bp->pdev;
2182
b6ab4b01 2183 if (!bp->tx_ring)
c0c050c5
MC
2184 return;
2185
2186 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2187 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
2188 struct bnxt_ring_struct *ring;
2189
c0c050c5
MC
2190 if (txr->tx_push) {
2191 dma_free_coherent(&pdev->dev, bp->tx_push_size,
2192 txr->tx_push, txr->tx_push_mapping);
2193 txr->tx_push = NULL;
2194 }
2195
2196 ring = &txr->tx_ring_struct;
2197
2198 bnxt_free_ring(bp, ring);
2199 }
2200}
2201
2202static int bnxt_alloc_tx_rings(struct bnxt *bp)
2203{
2204 int i, j, rc;
2205 struct pci_dev *pdev = bp->pdev;
2206
2207 bp->tx_push_size = 0;
2208 if (bp->tx_push_thresh) {
2209 int push_size;
2210
2211 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2212 bp->tx_push_thresh);
2213
4419dbe6 2214 if (push_size > 256) {
c0c050c5
MC
2215 push_size = 0;
2216 bp->tx_push_thresh = 0;
2217 }
2218
2219 bp->tx_push_size = push_size;
2220 }
2221
2222 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2223 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
2224 struct bnxt_ring_struct *ring;
2225
c0c050c5
MC
2226 ring = &txr->tx_ring_struct;
2227
2228 rc = bnxt_alloc_ring(bp, ring);
2229 if (rc)
2230 return rc;
2231
2232 if (bp->tx_push_size) {
c0c050c5
MC
2233 dma_addr_t mapping;
2234
2235 /* One pre-allocated DMA buffer to backup
2236 * TX push operation
2237 */
2238 txr->tx_push = dma_alloc_coherent(&pdev->dev,
2239 bp->tx_push_size,
2240 &txr->tx_push_mapping,
2241 GFP_KERNEL);
2242
2243 if (!txr->tx_push)
2244 return -ENOMEM;
2245
c0c050c5
MC
2246 mapping = txr->tx_push_mapping +
2247 sizeof(struct tx_push_bd);
4419dbe6 2248 txr->data_mapping = cpu_to_le64(mapping);
c0c050c5 2249
4419dbe6 2250 memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
c0c050c5
MC
2251 }
2252 ring->queue_id = bp->q_info[j].queue_id;
5f449249
MC
2253 if (i < bp->tx_nr_rings_xdp)
2254 continue;
c0c050c5
MC
2255 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2256 j++;
2257 }
2258 return 0;
2259}
2260
2261static void bnxt_free_cp_rings(struct bnxt *bp)
2262{
2263 int i;
2264
2265 if (!bp->bnapi)
2266 return;
2267
2268 for (i = 0; i < bp->cp_nr_rings; i++) {
2269 struct bnxt_napi *bnapi = bp->bnapi[i];
2270 struct bnxt_cp_ring_info *cpr;
2271 struct bnxt_ring_struct *ring;
2272
2273 if (!bnapi)
2274 continue;
2275
2276 cpr = &bnapi->cp_ring;
2277 ring = &cpr->cp_ring_struct;
2278
2279 bnxt_free_ring(bp, ring);
2280 }
2281}
2282
2283static int bnxt_alloc_cp_rings(struct bnxt *bp)
2284{
2285 int i, rc;
2286
2287 for (i = 0; i < bp->cp_nr_rings; i++) {
2288 struct bnxt_napi *bnapi = bp->bnapi[i];
2289 struct bnxt_cp_ring_info *cpr;
2290 struct bnxt_ring_struct *ring;
2291
2292 if (!bnapi)
2293 continue;
2294
2295 cpr = &bnapi->cp_ring;
2296 ring = &cpr->cp_ring_struct;
2297
2298 rc = bnxt_alloc_ring(bp, ring);
2299 if (rc)
2300 return rc;
2301 }
2302 return 0;
2303}
2304
2305static void bnxt_init_ring_struct(struct bnxt *bp)
2306{
2307 int i;
2308
2309 for (i = 0; i < bp->cp_nr_rings; i++) {
2310 struct bnxt_napi *bnapi = bp->bnapi[i];
2311 struct bnxt_cp_ring_info *cpr;
2312 struct bnxt_rx_ring_info *rxr;
2313 struct bnxt_tx_ring_info *txr;
2314 struct bnxt_ring_struct *ring;
2315
2316 if (!bnapi)
2317 continue;
2318
2319 cpr = &bnapi->cp_ring;
2320 ring = &cpr->cp_ring_struct;
2321 ring->nr_pages = bp->cp_nr_pages;
2322 ring->page_size = HW_CMPD_RING_SIZE;
2323 ring->pg_arr = (void **)cpr->cp_desc_ring;
2324 ring->dma_arr = cpr->cp_desc_mapping;
2325 ring->vmem_size = 0;
2326
b6ab4b01 2327 rxr = bnapi->rx_ring;
3b2b7d9d
MC
2328 if (!rxr)
2329 goto skip_rx;
2330
c0c050c5
MC
2331 ring = &rxr->rx_ring_struct;
2332 ring->nr_pages = bp->rx_nr_pages;
2333 ring->page_size = HW_RXBD_RING_SIZE;
2334 ring->pg_arr = (void **)rxr->rx_desc_ring;
2335 ring->dma_arr = rxr->rx_desc_mapping;
2336 ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
2337 ring->vmem = (void **)&rxr->rx_buf_ring;
2338
2339 ring = &rxr->rx_agg_ring_struct;
2340 ring->nr_pages = bp->rx_agg_nr_pages;
2341 ring->page_size = HW_RXBD_RING_SIZE;
2342 ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
2343 ring->dma_arr = rxr->rx_agg_desc_mapping;
2344 ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
2345 ring->vmem = (void **)&rxr->rx_agg_ring;
2346
3b2b7d9d 2347skip_rx:
b6ab4b01 2348 txr = bnapi->tx_ring;
3b2b7d9d
MC
2349 if (!txr)
2350 continue;
2351
c0c050c5
MC
2352 ring = &txr->tx_ring_struct;
2353 ring->nr_pages = bp->tx_nr_pages;
2354 ring->page_size = HW_RXBD_RING_SIZE;
2355 ring->pg_arr = (void **)txr->tx_desc_ring;
2356 ring->dma_arr = txr->tx_desc_mapping;
2357 ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
2358 ring->vmem = (void **)&txr->tx_buf_ring;
2359 }
2360}
2361
2362static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
2363{
2364 int i;
2365 u32 prod;
2366 struct rx_bd **rx_buf_ring;
2367
2368 rx_buf_ring = (struct rx_bd **)ring->pg_arr;
2369 for (i = 0, prod = 0; i < ring->nr_pages; i++) {
2370 int j;
2371 struct rx_bd *rxbd;
2372
2373 rxbd = rx_buf_ring[i];
2374 if (!rxbd)
2375 continue;
2376
2377 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
2378 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
2379 rxbd->rx_bd_opaque = prod;
2380 }
2381 }
2382}
2383
2384static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2385{
2386 struct net_device *dev = bp->dev;
c0c050c5
MC
2387 struct bnxt_rx_ring_info *rxr;
2388 struct bnxt_ring_struct *ring;
2389 u32 prod, type;
2390 int i;
2391
c0c050c5
MC
2392 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
2393 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
2394
2395 if (NET_IP_ALIGN == 2)
2396 type |= RX_BD_FLAGS_SOP;
2397
b6ab4b01 2398 rxr = &bp->rx_ring[ring_nr];
c0c050c5
MC
2399 ring = &rxr->rx_ring_struct;
2400 bnxt_init_rxbd_pages(ring, type);
2401
c6d30e83
MC
2402 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
2403 rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
2404 if (IS_ERR(rxr->xdp_prog)) {
2405 int rc = PTR_ERR(rxr->xdp_prog);
2406
2407 rxr->xdp_prog = NULL;
2408 return rc;
2409 }
2410 }
c0c050c5
MC
2411 prod = rxr->rx_prod;
2412 for (i = 0; i < bp->rx_ring_size; i++) {
2413 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
2414 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
2415 ring_nr, i, bp->rx_ring_size);
2416 break;
2417 }
2418 prod = NEXT_RX(prod);
2419 }
2420 rxr->rx_prod = prod;
2421 ring->fw_ring_id = INVALID_HW_RING_ID;
2422
edd0c2cc
MC
2423 ring = &rxr->rx_agg_ring_struct;
2424 ring->fw_ring_id = INVALID_HW_RING_ID;
2425
c0c050c5
MC
2426 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
2427 return 0;
2428
2839f28b 2429 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
c0c050c5
MC
2430 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
2431
2432 bnxt_init_rxbd_pages(ring, type);
2433
2434 prod = rxr->rx_agg_prod;
2435 for (i = 0; i < bp->rx_agg_ring_size; i++) {
2436 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
2437 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
2438 ring_nr, i, bp->rx_ring_size);
2439 break;
2440 }
2441 prod = NEXT_RX_AGG(prod);
2442 }
2443 rxr->rx_agg_prod = prod;
c0c050c5
MC
2444
2445 if (bp->flags & BNXT_FLAG_TPA) {
2446 if (rxr->rx_tpa) {
2447 u8 *data;
2448 dma_addr_t mapping;
2449
2450 for (i = 0; i < MAX_TPA; i++) {
2451 data = __bnxt_alloc_rx_data(bp, &mapping,
2452 GFP_KERNEL);
2453 if (!data)
2454 return -ENOMEM;
2455
2456 rxr->rx_tpa[i].data = data;
b3dba77c 2457 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
c0c050c5
MC
2458 rxr->rx_tpa[i].mapping = mapping;
2459 }
2460 } else {
2461 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2462 return -ENOMEM;
2463 }
2464 }
2465
2466 return 0;
2467}
2468
2247925f
SP
2469static void bnxt_init_cp_rings(struct bnxt *bp)
2470{
2471 int i;
2472
2473 for (i = 0; i < bp->cp_nr_rings; i++) {
2474 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
2475 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
2476
2477 ring->fw_ring_id = INVALID_HW_RING_ID;
2478 }
2479}
2480
c0c050c5
MC
2481static int bnxt_init_rx_rings(struct bnxt *bp)
2482{
2483 int i, rc = 0;
2484
c61fb99c 2485 if (BNXT_RX_PAGE_MODE(bp)) {
c6d30e83
MC
2486 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
2487 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
c61fb99c
MC
2488 } else {
2489 bp->rx_offset = BNXT_RX_OFFSET;
2490 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
2491 }
b3dba77c 2492
c0c050c5
MC
2493 for (i = 0; i < bp->rx_nr_rings; i++) {
2494 rc = bnxt_init_one_rx_ring(bp, i);
2495 if (rc)
2496 break;
2497 }
2498
2499 return rc;
2500}
2501
2502static int bnxt_init_tx_rings(struct bnxt *bp)
2503{
2504 u16 i;
2505
2506 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2507 MAX_SKB_FRAGS + 1);
2508
2509 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 2510 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
c0c050c5
MC
2511 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2512
2513 ring->fw_ring_id = INVALID_HW_RING_ID;
2514 }
2515
2516 return 0;
2517}
2518
2519static void bnxt_free_ring_grps(struct bnxt *bp)
2520{
2521 kfree(bp->grp_info);
2522 bp->grp_info = NULL;
2523}
2524
2525static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2526{
2527 int i;
2528
2529 if (irq_re_init) {
2530 bp->grp_info = kcalloc(bp->cp_nr_rings,
2531 sizeof(struct bnxt_ring_grp_info),
2532 GFP_KERNEL);
2533 if (!bp->grp_info)
2534 return -ENOMEM;
2535 }
2536 for (i = 0; i < bp->cp_nr_rings; i++) {
2537 if (irq_re_init)
2538 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
2539 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
2540 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
2541 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
2542 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
2543 }
2544 return 0;
2545}
2546
2547static void bnxt_free_vnics(struct bnxt *bp)
2548{
2549 kfree(bp->vnic_info);
2550 bp->vnic_info = NULL;
2551 bp->nr_vnics = 0;
2552}
2553
2554static int bnxt_alloc_vnics(struct bnxt *bp)
2555{
2556 int num_vnics = 1;
2557
2558#ifdef CONFIG_RFS_ACCEL
2559 if (bp->flags & BNXT_FLAG_RFS)
2560 num_vnics += bp->rx_nr_rings;
2561#endif
2562
dc52c6c7
PS
2563 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
2564 num_vnics++;
2565
c0c050c5
MC
2566 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
2567 GFP_KERNEL);
2568 if (!bp->vnic_info)
2569 return -ENOMEM;
2570
2571 bp->nr_vnics = num_vnics;
2572 return 0;
2573}
2574
2575static void bnxt_init_vnics(struct bnxt *bp)
2576{
2577 int i;
2578
2579 for (i = 0; i < bp->nr_vnics; i++) {
2580 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2581
2582 vnic->fw_vnic_id = INVALID_HW_RING_ID;
94ce9caa
PS
2583 vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
2584 vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
c0c050c5
MC
2585 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
2586
2587 if (bp->vnic_info[i].rss_hash_key) {
2588 if (i == 0)
2589 prandom_bytes(vnic->rss_hash_key,
2590 HW_HASH_KEY_SIZE);
2591 else
2592 memcpy(vnic->rss_hash_key,
2593 bp->vnic_info[0].rss_hash_key,
2594 HW_HASH_KEY_SIZE);
2595 }
2596 }
2597}
2598
2599static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
2600{
2601 int pages;
2602
2603 pages = ring_size / desc_per_pg;
2604
2605 if (!pages)
2606 return 1;
2607
2608 pages++;
2609
2610 while (pages & (pages - 1))
2611 pages++;
2612
2613 return pages;
2614}
2615
c6d30e83 2616void bnxt_set_tpa_flags(struct bnxt *bp)
c0c050c5
MC
2617{
2618 bp->flags &= ~BNXT_FLAG_TPA;
341138c3
MC
2619 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
2620 return;
c0c050c5
MC
2621 if (bp->dev->features & NETIF_F_LRO)
2622 bp->flags |= BNXT_FLAG_LRO;
94758f8d 2623 if (bp->dev->features & NETIF_F_GRO)
c0c050c5
MC
2624 bp->flags |= BNXT_FLAG_GRO;
2625}
2626
2627/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
2628 * be set on entry.
2629 */
2630void bnxt_set_ring_params(struct bnxt *bp)
2631{
2632 u32 ring_size, rx_size, rx_space;
2633 u32 agg_factor = 0, agg_ring_size = 0;
2634
2635 /* 8 for CRC and VLAN */
2636 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
2637
2638 rx_space = rx_size + NET_SKB_PAD +
2639 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2640
2641 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
2642 ring_size = bp->rx_ring_size;
2643 bp->rx_agg_ring_size = 0;
2644 bp->rx_agg_nr_pages = 0;
2645
2646 if (bp->flags & BNXT_FLAG_TPA)
2839f28b 2647 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
c0c050c5
MC
2648
2649 bp->flags &= ~BNXT_FLAG_JUMBO;
bdbd1eb5 2650 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
c0c050c5
MC
2651 u32 jumbo_factor;
2652
2653 bp->flags |= BNXT_FLAG_JUMBO;
2654 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
2655 if (jumbo_factor > agg_factor)
2656 agg_factor = jumbo_factor;
2657 }
2658 agg_ring_size = ring_size * agg_factor;
2659
2660 if (agg_ring_size) {
2661 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
2662 RX_DESC_CNT);
2663 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
2664 u32 tmp = agg_ring_size;
2665
2666 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
2667 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
2668 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
2669 tmp, agg_ring_size);
2670 }
2671 bp->rx_agg_ring_size = agg_ring_size;
2672 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
2673 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
2674 rx_space = rx_size + NET_SKB_PAD +
2675 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2676 }
2677
2678 bp->rx_buf_use_size = rx_size;
2679 bp->rx_buf_size = rx_space;
2680
2681 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
2682 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
2683
2684 ring_size = bp->tx_ring_size;
2685 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
2686 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
2687
2688 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
2689 bp->cp_ring_size = ring_size;
2690
2691 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
2692 if (bp->cp_nr_pages > MAX_CP_PAGES) {
2693 bp->cp_nr_pages = MAX_CP_PAGES;
2694 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
2695 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
2696 ring_size, bp->cp_ring_size);
2697 }
2698 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
2699 bp->cp_ring_mask = bp->cp_bit - 1;
2700}
2701
c61fb99c 2702int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
6bb19474 2703{
c61fb99c
MC
2704 if (page_mode) {
2705 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
2706 return -EOPNOTSUPP;
2707 bp->dev->max_mtu = BNXT_MAX_PAGE_MODE_MTU;
2708 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
2709 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
2710 bp->dev->hw_features &= ~NETIF_F_LRO;
2711 bp->dev->features &= ~NETIF_F_LRO;
2712 bp->rx_dir = DMA_BIDIRECTIONAL;
2713 bp->rx_skb_func = bnxt_rx_page_skb;
2714 } else {
2715 bp->dev->max_mtu = BNXT_MAX_MTU;
2716 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
2717 bp->rx_dir = DMA_FROM_DEVICE;
2718 bp->rx_skb_func = bnxt_rx_skb;
2719 }
6bb19474
MC
2720 return 0;
2721}
2722
c0c050c5
MC
2723static void bnxt_free_vnic_attributes(struct bnxt *bp)
2724{
2725 int i;
2726 struct bnxt_vnic_info *vnic;
2727 struct pci_dev *pdev = bp->pdev;
2728
2729 if (!bp->vnic_info)
2730 return;
2731
2732 for (i = 0; i < bp->nr_vnics; i++) {
2733 vnic = &bp->vnic_info[i];
2734
2735 kfree(vnic->fw_grp_ids);
2736 vnic->fw_grp_ids = NULL;
2737
2738 kfree(vnic->uc_list);
2739 vnic->uc_list = NULL;
2740
2741 if (vnic->mc_list) {
2742 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
2743 vnic->mc_list, vnic->mc_list_mapping);
2744 vnic->mc_list = NULL;
2745 }
2746
2747 if (vnic->rss_table) {
2748 dma_free_coherent(&pdev->dev, PAGE_SIZE,
2749 vnic->rss_table,
2750 vnic->rss_table_dma_addr);
2751 vnic->rss_table = NULL;
2752 }
2753
2754 vnic->rss_hash_key = NULL;
2755 vnic->flags = 0;
2756 }
2757}
2758
2759static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
2760{
2761 int i, rc = 0, size;
2762 struct bnxt_vnic_info *vnic;
2763 struct pci_dev *pdev = bp->pdev;
2764 int max_rings;
2765
2766 for (i = 0; i < bp->nr_vnics; i++) {
2767 vnic = &bp->vnic_info[i];
2768
2769 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
2770 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
2771
2772 if (mem_size > 0) {
2773 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
2774 if (!vnic->uc_list) {
2775 rc = -ENOMEM;
2776 goto out;
2777 }
2778 }
2779 }
2780
2781 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
2782 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
2783 vnic->mc_list =
2784 dma_alloc_coherent(&pdev->dev,
2785 vnic->mc_list_size,
2786 &vnic->mc_list_mapping,
2787 GFP_KERNEL);
2788 if (!vnic->mc_list) {
2789 rc = -ENOMEM;
2790 goto out;
2791 }
2792 }
2793
2794 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
2795 max_rings = bp->rx_nr_rings;
2796 else
2797 max_rings = 1;
2798
2799 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
2800 if (!vnic->fw_grp_ids) {
2801 rc = -ENOMEM;
2802 goto out;
2803 }
2804
ae10ae74
MC
2805 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
2806 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
2807 continue;
2808
c0c050c5
MC
2809 /* Allocate rss table and hash key */
2810 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2811 &vnic->rss_table_dma_addr,
2812 GFP_KERNEL);
2813 if (!vnic->rss_table) {
2814 rc = -ENOMEM;
2815 goto out;
2816 }
2817
2818 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
2819
2820 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
2821 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
2822 }
2823 return 0;
2824
2825out:
2826 return rc;
2827}
2828
2829static void bnxt_free_hwrm_resources(struct bnxt *bp)
2830{
2831 struct pci_dev *pdev = bp->pdev;
2832
2833 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
2834 bp->hwrm_cmd_resp_dma_addr);
2835
2836 bp->hwrm_cmd_resp_addr = NULL;
2837 if (bp->hwrm_dbg_resp_addr) {
2838 dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
2839 bp->hwrm_dbg_resp_addr,
2840 bp->hwrm_dbg_resp_dma_addr);
2841
2842 bp->hwrm_dbg_resp_addr = NULL;
2843 }
2844}
2845
2846static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2847{
2848 struct pci_dev *pdev = bp->pdev;
2849
2850 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2851 &bp->hwrm_cmd_resp_dma_addr,
2852 GFP_KERNEL);
2853 if (!bp->hwrm_cmd_resp_addr)
2854 return -ENOMEM;
2855 bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
2856 HWRM_DBG_REG_BUF_SIZE,
2857 &bp->hwrm_dbg_resp_dma_addr,
2858 GFP_KERNEL);
2859 if (!bp->hwrm_dbg_resp_addr)
2860 netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
2861
2862 return 0;
2863}
2864
e605db80
DK
2865static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
2866{
2867 if (bp->hwrm_short_cmd_req_addr) {
2868 struct pci_dev *pdev = bp->pdev;
2869
2870 dma_free_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
2871 bp->hwrm_short_cmd_req_addr,
2872 bp->hwrm_short_cmd_req_dma_addr);
2873 bp->hwrm_short_cmd_req_addr = NULL;
2874 }
2875}
2876
2877static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
2878{
2879 struct pci_dev *pdev = bp->pdev;
2880
2881 bp->hwrm_short_cmd_req_addr =
2882 dma_alloc_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
2883 &bp->hwrm_short_cmd_req_dma_addr,
2884 GFP_KERNEL);
2885 if (!bp->hwrm_short_cmd_req_addr)
2886 return -ENOMEM;
2887
2888 return 0;
2889}
2890
c0c050c5
MC
2891static void bnxt_free_stats(struct bnxt *bp)
2892{
2893 u32 size, i;
2894 struct pci_dev *pdev = bp->pdev;
2895
3bdf56c4
MC
2896 if (bp->hw_rx_port_stats) {
2897 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
2898 bp->hw_rx_port_stats,
2899 bp->hw_rx_port_stats_map);
2900 bp->hw_rx_port_stats = NULL;
2901 bp->flags &= ~BNXT_FLAG_PORT_STATS;
2902 }
2903
c0c050c5
MC
2904 if (!bp->bnapi)
2905 return;
2906
2907 size = sizeof(struct ctx_hw_stats);
2908
2909 for (i = 0; i < bp->cp_nr_rings; i++) {
2910 struct bnxt_napi *bnapi = bp->bnapi[i];
2911 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2912
2913 if (cpr->hw_stats) {
2914 dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
2915 cpr->hw_stats_map);
2916 cpr->hw_stats = NULL;
2917 }
2918 }
2919}
2920
2921static int bnxt_alloc_stats(struct bnxt *bp)
2922{
2923 u32 size, i;
2924 struct pci_dev *pdev = bp->pdev;
2925
2926 size = sizeof(struct ctx_hw_stats);
2927
2928 for (i = 0; i < bp->cp_nr_rings; i++) {
2929 struct bnxt_napi *bnapi = bp->bnapi[i];
2930 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2931
2932 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
2933 &cpr->hw_stats_map,
2934 GFP_KERNEL);
2935 if (!cpr->hw_stats)
2936 return -ENOMEM;
2937
2938 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
2939 }
3bdf56c4 2940
3e8060fa 2941 if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
3bdf56c4
MC
2942 bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
2943 sizeof(struct tx_port_stats) + 1024;
2944
2945 bp->hw_rx_port_stats =
2946 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
2947 &bp->hw_rx_port_stats_map,
2948 GFP_KERNEL);
2949 if (!bp->hw_rx_port_stats)
2950 return -ENOMEM;
2951
2952 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
2953 512;
2954 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
2955 sizeof(struct rx_port_stats) + 512;
2956 bp->flags |= BNXT_FLAG_PORT_STATS;
2957 }
c0c050c5
MC
2958 return 0;
2959}
2960
2961static void bnxt_clear_ring_indices(struct bnxt *bp)
2962{
2963 int i;
2964
2965 if (!bp->bnapi)
2966 return;
2967
2968 for (i = 0; i < bp->cp_nr_rings; i++) {
2969 struct bnxt_napi *bnapi = bp->bnapi[i];
2970 struct bnxt_cp_ring_info *cpr;
2971 struct bnxt_rx_ring_info *rxr;
2972 struct bnxt_tx_ring_info *txr;
2973
2974 if (!bnapi)
2975 continue;
2976
2977 cpr = &bnapi->cp_ring;
2978 cpr->cp_raw_cons = 0;
2979
b6ab4b01 2980 txr = bnapi->tx_ring;
3b2b7d9d
MC
2981 if (txr) {
2982 txr->tx_prod = 0;
2983 txr->tx_cons = 0;
2984 }
c0c050c5 2985
b6ab4b01 2986 rxr = bnapi->rx_ring;
3b2b7d9d
MC
2987 if (rxr) {
2988 rxr->rx_prod = 0;
2989 rxr->rx_agg_prod = 0;
2990 rxr->rx_sw_agg_prod = 0;
376a5b86 2991 rxr->rx_next_cons = 0;
3b2b7d9d 2992 }
c0c050c5
MC
2993 }
2994}
2995
2996static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
2997{
2998#ifdef CONFIG_RFS_ACCEL
2999 int i;
3000
3001 /* Under rtnl_lock and all our NAPIs have been disabled. It's
3002 * safe to delete the hash table.
3003 */
3004 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
3005 struct hlist_head *head;
3006 struct hlist_node *tmp;
3007 struct bnxt_ntuple_filter *fltr;
3008
3009 head = &bp->ntp_fltr_hash_tbl[i];
3010 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
3011 hlist_del(&fltr->hash);
3012 kfree(fltr);
3013 }
3014 }
3015 if (irq_reinit) {
3016 kfree(bp->ntp_fltr_bmap);
3017 bp->ntp_fltr_bmap = NULL;
3018 }
3019 bp->ntp_fltr_count = 0;
3020#endif
3021}
3022
3023static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
3024{
3025#ifdef CONFIG_RFS_ACCEL
3026 int i, rc = 0;
3027
3028 if (!(bp->flags & BNXT_FLAG_RFS))
3029 return 0;
3030
3031 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
3032 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
3033
3034 bp->ntp_fltr_count = 0;
ac45bd93
DC
3035 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
3036 sizeof(long),
c0c050c5
MC
3037 GFP_KERNEL);
3038
3039 if (!bp->ntp_fltr_bmap)
3040 rc = -ENOMEM;
3041
3042 return rc;
3043#else
3044 return 0;
3045#endif
3046}
3047
3048static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
3049{
3050 bnxt_free_vnic_attributes(bp);
3051 bnxt_free_tx_rings(bp);
3052 bnxt_free_rx_rings(bp);
3053 bnxt_free_cp_rings(bp);
3054 bnxt_free_ntp_fltrs(bp, irq_re_init);
3055 if (irq_re_init) {
3056 bnxt_free_stats(bp);
3057 bnxt_free_ring_grps(bp);
3058 bnxt_free_vnics(bp);
a960dec9
MC
3059 kfree(bp->tx_ring_map);
3060 bp->tx_ring_map = NULL;
b6ab4b01
MC
3061 kfree(bp->tx_ring);
3062 bp->tx_ring = NULL;
3063 kfree(bp->rx_ring);
3064 bp->rx_ring = NULL;
c0c050c5
MC
3065 kfree(bp->bnapi);
3066 bp->bnapi = NULL;
3067 } else {
3068 bnxt_clear_ring_indices(bp);
3069 }
3070}
3071
3072static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
3073{
01657bcd 3074 int i, j, rc, size, arr_size;
c0c050c5
MC
3075 void *bnapi;
3076
3077 if (irq_re_init) {
3078 /* Allocate bnapi mem pointer array and mem block for
3079 * all queues
3080 */
3081 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
3082 bp->cp_nr_rings);
3083 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
3084 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
3085 if (!bnapi)
3086 return -ENOMEM;
3087
3088 bp->bnapi = bnapi;
3089 bnapi += arr_size;
3090 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
3091 bp->bnapi[i] = bnapi;
3092 bp->bnapi[i]->index = i;
3093 bp->bnapi[i]->bp = bp;
3094 }
3095
b6ab4b01
MC
3096 bp->rx_ring = kcalloc(bp->rx_nr_rings,
3097 sizeof(struct bnxt_rx_ring_info),
3098 GFP_KERNEL);
3099 if (!bp->rx_ring)
3100 return -ENOMEM;
3101
3102 for (i = 0; i < bp->rx_nr_rings; i++) {
3103 bp->rx_ring[i].bnapi = bp->bnapi[i];
3104 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
3105 }
3106
3107 bp->tx_ring = kcalloc(bp->tx_nr_rings,
3108 sizeof(struct bnxt_tx_ring_info),
3109 GFP_KERNEL);
3110 if (!bp->tx_ring)
3111 return -ENOMEM;
3112
a960dec9
MC
3113 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
3114 GFP_KERNEL);
3115
3116 if (!bp->tx_ring_map)
3117 return -ENOMEM;
3118
01657bcd
MC
3119 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
3120 j = 0;
3121 else
3122 j = bp->rx_nr_rings;
3123
3124 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
3125 bp->tx_ring[i].bnapi = bp->bnapi[j];
3126 bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
5f449249 3127 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
38413406 3128 if (i >= bp->tx_nr_rings_xdp) {
5f449249
MC
3129 bp->tx_ring[i].txq_index = i -
3130 bp->tx_nr_rings_xdp;
38413406
MC
3131 bp->bnapi[j]->tx_int = bnxt_tx_int;
3132 } else {
fa3e93e8 3133 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
38413406
MC
3134 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
3135 }
b6ab4b01
MC
3136 }
3137
c0c050c5
MC
3138 rc = bnxt_alloc_stats(bp);
3139 if (rc)
3140 goto alloc_mem_err;
3141
3142 rc = bnxt_alloc_ntp_fltrs(bp);
3143 if (rc)
3144 goto alloc_mem_err;
3145
3146 rc = bnxt_alloc_vnics(bp);
3147 if (rc)
3148 goto alloc_mem_err;
3149 }
3150
3151 bnxt_init_ring_struct(bp);
3152
3153 rc = bnxt_alloc_rx_rings(bp);
3154 if (rc)
3155 goto alloc_mem_err;
3156
3157 rc = bnxt_alloc_tx_rings(bp);
3158 if (rc)
3159 goto alloc_mem_err;
3160
3161 rc = bnxt_alloc_cp_rings(bp);
3162 if (rc)
3163 goto alloc_mem_err;
3164
3165 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
3166 BNXT_VNIC_UCAST_FLAG;
3167 rc = bnxt_alloc_vnic_attributes(bp);
3168 if (rc)
3169 goto alloc_mem_err;
3170 return 0;
3171
3172alloc_mem_err:
3173 bnxt_free_mem(bp, true);
3174 return rc;
3175}
3176
9d8bc097
MC
3177static void bnxt_disable_int(struct bnxt *bp)
3178{
3179 int i;
3180
3181 if (!bp->bnapi)
3182 return;
3183
3184 for (i = 0; i < bp->cp_nr_rings; i++) {
3185 struct bnxt_napi *bnapi = bp->bnapi[i];
3186 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
daf1f1e7 3187 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
9d8bc097 3188
daf1f1e7
MC
3189 if (ring->fw_ring_id != INVALID_HW_RING_ID)
3190 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
9d8bc097
MC
3191 }
3192}
3193
3194static void bnxt_disable_int_sync(struct bnxt *bp)
3195{
3196 int i;
3197
3198 atomic_inc(&bp->intr_sem);
3199
3200 bnxt_disable_int(bp);
3201 for (i = 0; i < bp->cp_nr_rings; i++)
3202 synchronize_irq(bp->irq_tbl[i].vector);
3203}
3204
3205static void bnxt_enable_int(struct bnxt *bp)
3206{
3207 int i;
3208
3209 atomic_set(&bp->intr_sem, 0);
3210 for (i = 0; i < bp->cp_nr_rings; i++) {
3211 struct bnxt_napi *bnapi = bp->bnapi[i];
3212 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3213
3214 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
3215 }
3216}
3217
c0c050c5
MC
3218void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
3219 u16 cmpl_ring, u16 target_id)
3220{
a8643e16 3221 struct input *req = request;
c0c050c5 3222
a8643e16
MC
3223 req->req_type = cpu_to_le16(req_type);
3224 req->cmpl_ring = cpu_to_le16(cmpl_ring);
3225 req->target_id = cpu_to_le16(target_id);
c0c050c5
MC
3226 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
3227}
3228
fbfbc485
MC
3229static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3230 int timeout, bool silent)
c0c050c5 3231{
a11fa2be 3232 int i, intr_process, rc, tmo_count;
a8643e16 3233 struct input *req = msg;
c0c050c5
MC
3234 u32 *data = msg;
3235 __le32 *resp_len, *valid;
3236 u16 cp_ring_id, len = 0;
3237 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
e605db80 3238 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
c0c050c5 3239
a8643e16 3240 req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
c0c050c5 3241 memset(resp, 0, PAGE_SIZE);
a8643e16 3242 cp_ring_id = le16_to_cpu(req->cmpl_ring);
c0c050c5
MC
3243 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
3244
e605db80
DK
3245 if (bp->flags & BNXT_FLAG_SHORT_CMD) {
3246 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
3247 struct hwrm_short_input short_input = {0};
3248
3249 memcpy(short_cmd_req, req, msg_len);
3250 memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN -
3251 msg_len);
3252
3253 short_input.req_type = req->req_type;
3254 short_input.signature =
3255 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
3256 short_input.size = cpu_to_le16(msg_len);
3257 short_input.req_addr =
3258 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
3259
3260 data = (u32 *)&short_input;
3261 msg_len = sizeof(short_input);
3262
3263 /* Sync memory write before updating doorbell */
3264 wmb();
3265
3266 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
3267 }
3268
c0c050c5
MC
3269 /* Write request msg to hwrm channel */
3270 __iowrite32_copy(bp->bar0, data, msg_len / 4);
3271
e605db80 3272 for (i = msg_len; i < max_req_len; i += 4)
d79979a1
MC
3273 writel(0, bp->bar0 + i);
3274
c0c050c5
MC
3275 /* currently supports only one outstanding message */
3276 if (intr_process)
a8643e16 3277 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
c0c050c5
MC
3278
3279 /* Ring channel doorbell */
3280 writel(1, bp->bar0 + 0x100);
3281
ff4fe81d
MC
3282 if (!timeout)
3283 timeout = DFLT_HWRM_CMD_TIMEOUT;
3284
c0c050c5 3285 i = 0;
a11fa2be 3286 tmo_count = timeout * 40;
c0c050c5
MC
3287 if (intr_process) {
3288 /* Wait until hwrm response cmpl interrupt is processed */
3289 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
a11fa2be
MC
3290 i++ < tmo_count) {
3291 usleep_range(25, 40);
c0c050c5
MC
3292 }
3293
3294 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
3295 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
a8643e16 3296 le16_to_cpu(req->req_type));
c0c050c5
MC
3297 return -1;
3298 }
3299 } else {
3300 /* Check if response len is updated */
3301 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
a11fa2be 3302 for (i = 0; i < tmo_count; i++) {
c0c050c5
MC
3303 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3304 HWRM_RESP_LEN_SFT;
3305 if (len)
3306 break;
a11fa2be 3307 usleep_range(25, 40);
c0c050c5
MC
3308 }
3309
a11fa2be 3310 if (i >= tmo_count) {
c0c050c5 3311 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
a8643e16 3312 timeout, le16_to_cpu(req->req_type),
8578d6c1 3313 le16_to_cpu(req->seq_id), len);
c0c050c5
MC
3314 return -1;
3315 }
3316
3317 /* Last word of resp contains valid bit */
3318 valid = bp->hwrm_cmd_resp_addr + len - 4;
a11fa2be 3319 for (i = 0; i < 5; i++) {
c0c050c5
MC
3320 if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
3321 break;
a11fa2be 3322 udelay(1);
c0c050c5
MC
3323 }
3324
a11fa2be 3325 if (i >= 5) {
c0c050c5 3326 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
a8643e16
MC
3327 timeout, le16_to_cpu(req->req_type),
3328 le16_to_cpu(req->seq_id), len, *valid);
c0c050c5
MC
3329 return -1;
3330 }
3331 }
3332
3333 rc = le16_to_cpu(resp->error_code);
fbfbc485 3334 if (rc && !silent)
c0c050c5
MC
3335 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
3336 le16_to_cpu(resp->req_type),
3337 le16_to_cpu(resp->seq_id), rc);
fbfbc485
MC
3338 return rc;
3339}
3340
3341int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3342{
3343 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
c0c050c5
MC
3344}
3345
3346int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3347{
3348 int rc;
3349
3350 mutex_lock(&bp->hwrm_cmd_lock);
3351 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
3352 mutex_unlock(&bp->hwrm_cmd_lock);
3353 return rc;
3354}
3355
90e20921
MC
3356int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3357 int timeout)
3358{
3359 int rc;
3360
3361 mutex_lock(&bp->hwrm_cmd_lock);
3362 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3363 mutex_unlock(&bp->hwrm_cmd_lock);
3364 return rc;
3365}
3366
a1653b13
MC
3367int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
3368 int bmap_size)
c0c050c5
MC
3369{
3370 struct hwrm_func_drv_rgtr_input req = {0};
25be8623
MC
3371 DECLARE_BITMAP(async_events_bmap, 256);
3372 u32 *events = (u32 *)async_events_bmap;
a1653b13 3373 int i;
c0c050c5
MC
3374
3375 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3376
3377 req.enables =
a1653b13 3378 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
c0c050c5 3379
25be8623
MC
3380 memset(async_events_bmap, 0, sizeof(async_events_bmap));
3381 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
3382 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
3383
a1653b13
MC
3384 if (bmap && bmap_size) {
3385 for (i = 0; i < bmap_size; i++) {
3386 if (test_bit(i, bmap))
3387 __set_bit(i, async_events_bmap);
3388 }
3389 }
3390
25be8623
MC
3391 for (i = 0; i < 8; i++)
3392 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
3393
a1653b13
MC
3394 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3395}
3396
3397static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
3398{
3399 struct hwrm_func_drv_rgtr_input req = {0};
3400
3401 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3402
3403 req.enables =
3404 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
3405 FUNC_DRV_RGTR_REQ_ENABLES_VER);
3406
11f15ed3 3407 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
c0c050c5
MC
3408 req.ver_maj = DRV_VER_MAJ;
3409 req.ver_min = DRV_VER_MIN;
3410 req.ver_upd = DRV_VER_UPD;
3411
3412 if (BNXT_PF(bp)) {
de68f5de 3413 DECLARE_BITMAP(vf_req_snif_bmap, 256);
c0c050c5 3414 u32 *data = (u32 *)vf_req_snif_bmap;
a1653b13 3415 int i;
c0c050c5 3416
de68f5de 3417 memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap));
c0c050c5
MC
3418 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
3419 __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
3420
de68f5de
MC
3421 for (i = 0; i < 8; i++)
3422 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
3423
c0c050c5
MC
3424 req.enables |=
3425 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
3426 }
3427
3428 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3429}
3430
be58a0da
JH
3431static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
3432{
3433 struct hwrm_func_drv_unrgtr_input req = {0};
3434
3435 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
3436 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3437}
3438
c0c050c5
MC
3439static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
3440{
3441 u32 rc = 0;
3442 struct hwrm_tunnel_dst_port_free_input req = {0};
3443
3444 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
3445 req.tunnel_type = tunnel_type;
3446
3447 switch (tunnel_type) {
3448 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
3449 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
3450 break;
3451 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
3452 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
3453 break;
3454 default:
3455 break;
3456 }
3457
3458 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3459 if (rc)
3460 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
3461 rc);
3462 return rc;
3463}
3464
3465static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
3466 u8 tunnel_type)
3467{
3468 u32 rc = 0;
3469 struct hwrm_tunnel_dst_port_alloc_input req = {0};
3470 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3471
3472 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
3473
3474 req.tunnel_type = tunnel_type;
3475 req.tunnel_dst_port_val = port;
3476
3477 mutex_lock(&bp->hwrm_cmd_lock);
3478 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3479 if (rc) {
3480 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
3481 rc);
3482 goto err_out;
3483 }
3484
57aac71b
CJ
3485 switch (tunnel_type) {
3486 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
c0c050c5 3487 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
57aac71b
CJ
3488 break;
3489 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
c0c050c5 3490 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
57aac71b
CJ
3491 break;
3492 default:
3493 break;
3494 }
3495
c0c050c5
MC
3496err_out:
3497 mutex_unlock(&bp->hwrm_cmd_lock);
3498 return rc;
3499}
3500
3501static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
3502{
3503 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
3504 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3505
3506 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
c193554e 3507 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
c0c050c5
MC
3508
3509 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
3510 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
3511 req.mask = cpu_to_le32(vnic->rx_mask);
3512 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3513}
3514
3515#ifdef CONFIG_RFS_ACCEL
3516static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
3517 struct bnxt_ntuple_filter *fltr)
3518{
3519 struct hwrm_cfa_ntuple_filter_free_input req = {0};
3520
3521 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
3522 req.ntuple_filter_id = fltr->filter_id;
3523 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3524}
3525
3526#define BNXT_NTP_FLTR_FLAGS \
3527 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
3528 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
3529 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
3530 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
3531 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
3532 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
3533 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
3534 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
3535 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
3536 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
3537 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
3538 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
3539 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
c193554e 3540 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
c0c050c5 3541
61aad724
MC
3542#define BNXT_NTP_TUNNEL_FLTR_FLAG \
3543 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
3544
c0c050c5
MC
3545static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
3546 struct bnxt_ntuple_filter *fltr)
3547{
3548 int rc = 0;
3549 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
3550 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3551 bp->hwrm_cmd_resp_addr;
3552 struct flow_keys *keys = &fltr->fkeys;
3553 struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
3554
3555 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
a54c4d74 3556 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
c0c050c5
MC
3557
3558 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
3559
3560 req.ethertype = htons(ETH_P_IP);
3561 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
c193554e 3562 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
c0c050c5
MC
3563 req.ip_protocol = keys->basic.ip_proto;
3564
dda0e746
MC
3565 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
3566 int i;
3567
3568 req.ethertype = htons(ETH_P_IPV6);
3569 req.ip_addr_type =
3570 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
3571 *(struct in6_addr *)&req.src_ipaddr[0] =
3572 keys->addrs.v6addrs.src;
3573 *(struct in6_addr *)&req.dst_ipaddr[0] =
3574 keys->addrs.v6addrs.dst;
3575 for (i = 0; i < 4; i++) {
3576 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
3577 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
3578 }
3579 } else {
3580 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
3581 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3582 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
3583 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3584 }
61aad724
MC
3585 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
3586 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
3587 req.tunnel_type =
3588 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
3589 }
c0c050c5
MC
3590
3591 req.src_port = keys->ports.src;
3592 req.src_port_mask = cpu_to_be16(0xffff);
3593 req.dst_port = keys->ports.dst;
3594 req.dst_port_mask = cpu_to_be16(0xffff);
3595
c193554e 3596 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
c0c050c5
MC
3597 mutex_lock(&bp->hwrm_cmd_lock);
3598 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3599 if (!rc)
3600 fltr->filter_id = resp->ntuple_filter_id;
3601 mutex_unlock(&bp->hwrm_cmd_lock);
3602 return rc;
3603}
3604#endif
3605
3606static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
3607 u8 *mac_addr)
3608{
3609 u32 rc = 0;
3610 struct hwrm_cfa_l2_filter_alloc_input req = {0};
3611 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3612
3613 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
dc52c6c7
PS
3614 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
3615 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
3616 req.flags |=
3617 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
c193554e 3618 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
c0c050c5
MC
3619 req.enables =
3620 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
c193554e 3621 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
c0c050c5
MC
3622 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
3623 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
3624 req.l2_addr_mask[0] = 0xff;
3625 req.l2_addr_mask[1] = 0xff;
3626 req.l2_addr_mask[2] = 0xff;
3627 req.l2_addr_mask[3] = 0xff;
3628 req.l2_addr_mask[4] = 0xff;
3629 req.l2_addr_mask[5] = 0xff;
3630
3631 mutex_lock(&bp->hwrm_cmd_lock);
3632 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3633 if (!rc)
3634 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
3635 resp->l2_filter_id;
3636 mutex_unlock(&bp->hwrm_cmd_lock);
3637 return rc;
3638}
3639
3640static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
3641{
3642 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
3643 int rc = 0;
3644
3645 /* Any associated ntuple filters will also be cleared by firmware. */
3646 mutex_lock(&bp->hwrm_cmd_lock);
3647 for (i = 0; i < num_of_vnics; i++) {
3648 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3649
3650 for (j = 0; j < vnic->uc_filter_count; j++) {
3651 struct hwrm_cfa_l2_filter_free_input req = {0};
3652
3653 bnxt_hwrm_cmd_hdr_init(bp, &req,
3654 HWRM_CFA_L2_FILTER_FREE, -1, -1);
3655
3656 req.l2_filter_id = vnic->fw_l2_filter_id[j];
3657
3658 rc = _hwrm_send_message(bp, &req, sizeof(req),
3659 HWRM_CMD_TIMEOUT);
3660 }
3661 vnic->uc_filter_count = 0;
3662 }
3663 mutex_unlock(&bp->hwrm_cmd_lock);
3664
3665 return rc;
3666}
3667
3668static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
3669{
3670 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3671 struct hwrm_vnic_tpa_cfg_input req = {0};
3672
3673 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
3674
3675 if (tpa_flags) {
3676 u16 mss = bp->dev->mtu - 40;
3677 u32 nsegs, n, segs = 0, flags;
3678
3679 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
3680 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
3681 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
3682 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
3683 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
3684 if (tpa_flags & BNXT_FLAG_GRO)
3685 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
3686
3687 req.flags = cpu_to_le32(flags);
3688
3689 req.enables =
3690 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
c193554e
MC
3691 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
3692 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
c0c050c5
MC
3693
3694 /* Number of segs are log2 units, and first packet is not
3695 * included as part of this units.
3696 */
2839f28b
MC
3697 if (mss <= BNXT_RX_PAGE_SIZE) {
3698 n = BNXT_RX_PAGE_SIZE / mss;
c0c050c5
MC
3699 nsegs = (MAX_SKB_FRAGS - 1) * n;
3700 } else {
2839f28b
MC
3701 n = mss / BNXT_RX_PAGE_SIZE;
3702 if (mss & (BNXT_RX_PAGE_SIZE - 1))
c0c050c5
MC
3703 n++;
3704 nsegs = (MAX_SKB_FRAGS - n) / n;
3705 }
3706
3707 segs = ilog2(nsegs);
3708 req.max_agg_segs = cpu_to_le16(segs);
3709 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
c193554e
MC
3710
3711 req.min_agg_len = cpu_to_le32(512);
c0c050c5
MC
3712 }
3713 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3714
3715 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3716}
3717
3718static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
3719{
3720 u32 i, j, max_rings;
3721 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3722 struct hwrm_vnic_rss_cfg_input req = {0};
3723
94ce9caa 3724 if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
c0c050c5
MC
3725 return 0;
3726
3727 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
3728 if (set_rss) {
87da7f79 3729 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
dc52c6c7
PS
3730 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
3731 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3732 max_rings = bp->rx_nr_rings - 1;
3733 else
3734 max_rings = bp->rx_nr_rings;
3735 } else {
c0c050c5 3736 max_rings = 1;
dc52c6c7 3737 }
c0c050c5
MC
3738
3739 /* Fill the RSS indirection table with ring group ids */
3740 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
3741 if (j == max_rings)
3742 j = 0;
3743 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
3744 }
3745
3746 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
3747 req.hash_key_tbl_addr =
3748 cpu_to_le64(vnic->rss_hash_key_dma_addr);
3749 }
94ce9caa 3750 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
c0c050c5
MC
3751 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3752}
3753
3754static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
3755{
3756 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3757 struct hwrm_vnic_plcmodes_cfg_input req = {0};
3758
3759 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
3760 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
3761 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
3762 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
3763 req.enables =
3764 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
3765 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
3766 /* thresholds not implemented in firmware yet */
3767 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
3768 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
3769 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
3770 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3771}
3772
94ce9caa
PS
3773static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
3774 u16 ctx_idx)
c0c050c5
MC
3775{
3776 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
3777
3778 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
3779 req.rss_cos_lb_ctx_id =
94ce9caa 3780 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
c0c050c5
MC
3781
3782 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
94ce9caa 3783 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
c0c050c5
MC
3784}
3785
3786static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
3787{
94ce9caa 3788 int i, j;
c0c050c5
MC
3789
3790 for (i = 0; i < bp->nr_vnics; i++) {
3791 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3792
94ce9caa
PS
3793 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
3794 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
3795 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
3796 }
c0c050c5
MC
3797 }
3798 bp->rsscos_nr_ctxs = 0;
3799}
3800
94ce9caa 3801static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
c0c050c5
MC
3802{
3803 int rc;
3804 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
3805 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
3806 bp->hwrm_cmd_resp_addr;
3807
3808 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
3809 -1);
3810
3811 mutex_lock(&bp->hwrm_cmd_lock);
3812 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3813 if (!rc)
94ce9caa 3814 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
c0c050c5
MC
3815 le16_to_cpu(resp->rss_cos_lb_ctx_id);
3816 mutex_unlock(&bp->hwrm_cmd_lock);
3817
3818 return rc;
3819}
3820
a588e458 3821int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
c0c050c5 3822{
b81a90d3 3823 unsigned int ring = 0, grp_idx;
c0c050c5
MC
3824 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3825 struct hwrm_vnic_cfg_input req = {0};
cf6645f8 3826 u16 def_vlan = 0;
c0c050c5
MC
3827
3828 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
dc52c6c7
PS
3829
3830 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
c0c050c5 3831 /* Only RSS support for now TBD: COS & LB */
dc52c6c7
PS
3832 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
3833 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
3834 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
3835 VNIC_CFG_REQ_ENABLES_MRU);
ae10ae74
MC
3836 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
3837 req.rss_rule =
3838 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
3839 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
3840 VNIC_CFG_REQ_ENABLES_MRU);
3841 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
dc52c6c7
PS
3842 } else {
3843 req.rss_rule = cpu_to_le16(0xffff);
3844 }
94ce9caa 3845
dc52c6c7
PS
3846 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
3847 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
94ce9caa
PS
3848 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
3849 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
3850 } else {
3851 req.cos_rule = cpu_to_le16(0xffff);
3852 }
3853
c0c050c5 3854 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
b81a90d3 3855 ring = 0;
c0c050c5 3856 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
b81a90d3 3857 ring = vnic_id - 1;
76595193
PS
3858 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
3859 ring = bp->rx_nr_rings - 1;
c0c050c5 3860
b81a90d3 3861 grp_idx = bp->rx_ring[ring].bnapi->index;
c0c050c5
MC
3862 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3863 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
3864
3865 req.lb_rule = cpu_to_le16(0xffff);
3866 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
3867 VLAN_HLEN);
3868
cf6645f8
MC
3869#ifdef CONFIG_BNXT_SRIOV
3870 if (BNXT_VF(bp))
3871 def_vlan = bp->vf.vlan;
3872#endif
3873 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
c0c050c5 3874 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
a588e458
MC
3875 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
3876 req.flags |=
3877 cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE);
c0c050c5
MC
3878
3879 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3880}
3881
3882static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
3883{
3884 u32 rc = 0;
3885
3886 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
3887 struct hwrm_vnic_free_input req = {0};
3888
3889 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
3890 req.vnic_id =
3891 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
3892
3893 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3894 if (rc)
3895 return rc;
3896 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
3897 }
3898 return rc;
3899}
3900
3901static void bnxt_hwrm_vnic_free(struct bnxt *bp)
3902{
3903 u16 i;
3904
3905 for (i = 0; i < bp->nr_vnics; i++)
3906 bnxt_hwrm_vnic_free_one(bp, i);
3907}
3908
b81a90d3
MC
3909static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
3910 unsigned int start_rx_ring_idx,
3911 unsigned int nr_rings)
c0c050c5 3912{
b81a90d3
MC
3913 int rc = 0;
3914 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
c0c050c5
MC
3915 struct hwrm_vnic_alloc_input req = {0};
3916 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3917
3918 /* map ring groups to this vnic */
b81a90d3
MC
3919 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
3920 grp_idx = bp->rx_ring[i].bnapi->index;
3921 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
c0c050c5 3922 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
b81a90d3 3923 j, nr_rings);
c0c050c5
MC
3924 break;
3925 }
3926 bp->vnic_info[vnic_id].fw_grp_ids[j] =
b81a90d3 3927 bp->grp_info[grp_idx].fw_grp_id;
c0c050c5
MC
3928 }
3929
94ce9caa
PS
3930 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
3931 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
c0c050c5
MC
3932 if (vnic_id == 0)
3933 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
3934
3935 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
3936
3937 mutex_lock(&bp->hwrm_cmd_lock);
3938 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3939 if (!rc)
3940 bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
3941 mutex_unlock(&bp->hwrm_cmd_lock);
3942 return rc;
3943}
3944
8fdefd63
MC
3945static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
3946{
3947 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3948 struct hwrm_vnic_qcaps_input req = {0};
3949 int rc;
3950
3951 if (bp->hwrm_spec_code < 0x10600)
3952 return 0;
3953
3954 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
3955 mutex_lock(&bp->hwrm_cmd_lock);
3956 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3957 if (!rc) {
3958 if (resp->flags &
3959 cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
3960 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
3961 }
3962 mutex_unlock(&bp->hwrm_cmd_lock);
3963 return rc;
3964}
3965
c0c050c5
MC
3966static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
3967{
3968 u16 i;
3969 u32 rc = 0;
3970
3971 mutex_lock(&bp->hwrm_cmd_lock);
3972 for (i = 0; i < bp->rx_nr_rings; i++) {
3973 struct hwrm_ring_grp_alloc_input req = {0};
3974 struct hwrm_ring_grp_alloc_output *resp =
3975 bp->hwrm_cmd_resp_addr;
b81a90d3 3976 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
c0c050c5
MC
3977
3978 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
3979
b81a90d3
MC
3980 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
3981 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
3982 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
3983 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
c0c050c5
MC
3984
3985 rc = _hwrm_send_message(bp, &req, sizeof(req),
3986 HWRM_CMD_TIMEOUT);
3987 if (rc)
3988 break;
3989
b81a90d3
MC
3990 bp->grp_info[grp_idx].fw_grp_id =
3991 le32_to_cpu(resp->ring_group_id);
c0c050c5
MC
3992 }
3993 mutex_unlock(&bp->hwrm_cmd_lock);
3994 return rc;
3995}
3996
3997static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
3998{
3999 u16 i;
4000 u32 rc = 0;
4001 struct hwrm_ring_grp_free_input req = {0};
4002
4003 if (!bp->grp_info)
4004 return 0;
4005
4006 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
4007
4008 mutex_lock(&bp->hwrm_cmd_lock);
4009 for (i = 0; i < bp->cp_nr_rings; i++) {
4010 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
4011 continue;
4012 req.ring_group_id =
4013 cpu_to_le32(bp->grp_info[i].fw_grp_id);
4014
4015 rc = _hwrm_send_message(bp, &req, sizeof(req),
4016 HWRM_CMD_TIMEOUT);
4017 if (rc)
4018 break;
4019 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4020 }
4021 mutex_unlock(&bp->hwrm_cmd_lock);
4022 return rc;
4023}
4024
4025static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
4026 struct bnxt_ring_struct *ring,
4027 u32 ring_type, u32 map_index,
4028 u32 stats_ctx_id)
4029{
4030 int rc = 0, err = 0;
4031 struct hwrm_ring_alloc_input req = {0};
4032 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4033 u16 ring_id;
4034
4035 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
4036
4037 req.enables = 0;
4038 if (ring->nr_pages > 1) {
4039 req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
4040 /* Page size is in log2 units */
4041 req.page_size = BNXT_PAGE_SHIFT;
4042 req.page_tbl_depth = 1;
4043 } else {
4044 req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]);
4045 }
4046 req.fbo = 0;
4047 /* Association of ring index with doorbell index and MSIX number */
4048 req.logical_id = cpu_to_le16(map_index);
4049
4050 switch (ring_type) {
4051 case HWRM_RING_ALLOC_TX:
4052 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
4053 /* Association of transmit ring with completion ring */
4054 req.cmpl_ring_id =
4055 cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
4056 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
4057 req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
4058 req.queue_id = cpu_to_le16(ring->queue_id);
4059 break;
4060 case HWRM_RING_ALLOC_RX:
4061 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4062 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
4063 break;
4064 case HWRM_RING_ALLOC_AGG:
4065 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4066 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
4067 break;
4068 case HWRM_RING_ALLOC_CMPL:
bac9a7e0 4069 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
c0c050c5
MC
4070 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
4071 if (bp->flags & BNXT_FLAG_USING_MSIX)
4072 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
4073 break;
4074 default:
4075 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
4076 ring_type);
4077 return -1;
4078 }
4079
4080 mutex_lock(&bp->hwrm_cmd_lock);
4081 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4082 err = le16_to_cpu(resp->error_code);
4083 ring_id = le16_to_cpu(resp->ring_id);
4084 mutex_unlock(&bp->hwrm_cmd_lock);
4085
4086 if (rc || err) {
4087 switch (ring_type) {
bac9a7e0 4088 case RING_FREE_REQ_RING_TYPE_L2_CMPL:
c0c050c5
MC
4089 netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
4090 rc, err);
4091 return -1;
4092
4093 case RING_FREE_REQ_RING_TYPE_RX:
4094 netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
4095 rc, err);
4096 return -1;
4097
4098 case RING_FREE_REQ_RING_TYPE_TX:
4099 netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
4100 rc, err);
4101 return -1;
4102
4103 default:
4104 netdev_err(bp->dev, "Invalid ring\n");
4105 return -1;
4106 }
4107 }
4108 ring->fw_ring_id = ring_id;
4109 return rc;
4110}
4111
486b5c22
MC
4112static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
4113{
4114 int rc;
4115
4116 if (BNXT_PF(bp)) {
4117 struct hwrm_func_cfg_input req = {0};
4118
4119 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
4120 req.fid = cpu_to_le16(0xffff);
4121 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4122 req.async_event_cr = cpu_to_le16(idx);
4123 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4124 } else {
4125 struct hwrm_func_vf_cfg_input req = {0};
4126
4127 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
4128 req.enables =
4129 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4130 req.async_event_cr = cpu_to_le16(idx);
4131 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4132 }
4133 return rc;
4134}
4135
c0c050c5
MC
4136static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
4137{
4138 int i, rc = 0;
4139
edd0c2cc
MC
4140 for (i = 0; i < bp->cp_nr_rings; i++) {
4141 struct bnxt_napi *bnapi = bp->bnapi[i];
4142 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4143 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
c0c050c5 4144
33e52d88 4145 cpr->cp_doorbell = bp->bar1 + i * 0x80;
edd0c2cc
MC
4146 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
4147 INVALID_STATS_CTX_ID);
4148 if (rc)
4149 goto err_out;
edd0c2cc
MC
4150 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
4151 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
486b5c22
MC
4152
4153 if (!i) {
4154 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
4155 if (rc)
4156 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
4157 }
c0c050c5
MC
4158 }
4159
edd0c2cc 4160 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 4161 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
edd0c2cc 4162 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
b81a90d3
MC
4163 u32 map_idx = txr->bnapi->index;
4164 u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx;
c0c050c5 4165
b81a90d3
MC
4166 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX,
4167 map_idx, fw_stats_ctx);
edd0c2cc
MC
4168 if (rc)
4169 goto err_out;
b81a90d3 4170 txr->tx_doorbell = bp->bar1 + map_idx * 0x80;
c0c050c5
MC
4171 }
4172
edd0c2cc 4173 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 4174 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 4175 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
b81a90d3 4176 u32 map_idx = rxr->bnapi->index;
c0c050c5 4177
b81a90d3
MC
4178 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX,
4179 map_idx, INVALID_STATS_CTX_ID);
edd0c2cc
MC
4180 if (rc)
4181 goto err_out;
b81a90d3 4182 rxr->rx_doorbell = bp->bar1 + map_idx * 0x80;
edd0c2cc 4183 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
b81a90d3 4184 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
c0c050c5
MC
4185 }
4186
4187 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
4188 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 4189 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
c0c050c5
MC
4190 struct bnxt_ring_struct *ring =
4191 &rxr->rx_agg_ring_struct;
b81a90d3
MC
4192 u32 grp_idx = rxr->bnapi->index;
4193 u32 map_idx = grp_idx + bp->rx_nr_rings;
c0c050c5
MC
4194
4195 rc = hwrm_ring_alloc_send_msg(bp, ring,
4196 HWRM_RING_ALLOC_AGG,
b81a90d3 4197 map_idx,
c0c050c5
MC
4198 INVALID_STATS_CTX_ID);
4199 if (rc)
4200 goto err_out;
4201
b81a90d3 4202 rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80;
c0c050c5
MC
4203 writel(DB_KEY_RX | rxr->rx_agg_prod,
4204 rxr->rx_agg_doorbell);
b81a90d3 4205 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
c0c050c5
MC
4206 }
4207 }
4208err_out:
4209 return rc;
4210}
4211
4212static int hwrm_ring_free_send_msg(struct bnxt *bp,
4213 struct bnxt_ring_struct *ring,
4214 u32 ring_type, int cmpl_ring_id)
4215{
4216 int rc;
4217 struct hwrm_ring_free_input req = {0};
4218 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
4219 u16 error_code;
4220
74608fc9 4221 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
c0c050c5
MC
4222 req.ring_type = ring_type;
4223 req.ring_id = cpu_to_le16(ring->fw_ring_id);
4224
4225 mutex_lock(&bp->hwrm_cmd_lock);
4226 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4227 error_code = le16_to_cpu(resp->error_code);
4228 mutex_unlock(&bp->hwrm_cmd_lock);
4229
4230 if (rc || error_code) {
4231 switch (ring_type) {
bac9a7e0 4232 case RING_FREE_REQ_RING_TYPE_L2_CMPL:
c0c050c5
MC
4233 netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
4234 rc);
4235 return rc;
4236 case RING_FREE_REQ_RING_TYPE_RX:
4237 netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
4238 rc);
4239 return rc;
4240 case RING_FREE_REQ_RING_TYPE_TX:
4241 netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
4242 rc);
4243 return rc;
4244 default:
4245 netdev_err(bp->dev, "Invalid ring\n");
4246 return -1;
4247 }
4248 }
4249 return 0;
4250}
4251
edd0c2cc 4252static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
c0c050c5 4253{
edd0c2cc 4254 int i;
c0c050c5
MC
4255
4256 if (!bp->bnapi)
edd0c2cc 4257 return;
c0c050c5 4258
edd0c2cc 4259 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 4260 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
edd0c2cc 4261 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
b81a90d3
MC
4262 u32 grp_idx = txr->bnapi->index;
4263 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
edd0c2cc
MC
4264
4265 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4266 hwrm_ring_free_send_msg(bp, ring,
4267 RING_FREE_REQ_RING_TYPE_TX,
4268 close_path ? cmpl_ring_id :
4269 INVALID_HW_RING_ID);
4270 ring->fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
4271 }
4272 }
4273
edd0c2cc 4274 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 4275 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 4276 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
b81a90d3
MC
4277 u32 grp_idx = rxr->bnapi->index;
4278 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
edd0c2cc
MC
4279
4280 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4281 hwrm_ring_free_send_msg(bp, ring,
4282 RING_FREE_REQ_RING_TYPE_RX,
4283 close_path ? cmpl_ring_id :
4284 INVALID_HW_RING_ID);
4285 ring->fw_ring_id = INVALID_HW_RING_ID;
b81a90d3
MC
4286 bp->grp_info[grp_idx].rx_fw_ring_id =
4287 INVALID_HW_RING_ID;
c0c050c5
MC
4288 }
4289 }
4290
edd0c2cc 4291 for (i = 0; i < bp->rx_nr_rings; i++) {
b6ab4b01 4292 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
edd0c2cc 4293 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
b81a90d3
MC
4294 u32 grp_idx = rxr->bnapi->index;
4295 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
edd0c2cc
MC
4296
4297 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4298 hwrm_ring_free_send_msg(bp, ring,
4299 RING_FREE_REQ_RING_TYPE_RX,
4300 close_path ? cmpl_ring_id :
4301 INVALID_HW_RING_ID);
4302 ring->fw_ring_id = INVALID_HW_RING_ID;
b81a90d3
MC
4303 bp->grp_info[grp_idx].agg_fw_ring_id =
4304 INVALID_HW_RING_ID;
c0c050c5
MC
4305 }
4306 }
4307
9d8bc097
MC
4308 /* The completion rings are about to be freed. After that the
4309 * IRQ doorbell will not work anymore. So we need to disable
4310 * IRQ here.
4311 */
4312 bnxt_disable_int_sync(bp);
4313
edd0c2cc
MC
4314 for (i = 0; i < bp->cp_nr_rings; i++) {
4315 struct bnxt_napi *bnapi = bp->bnapi[i];
4316 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4317 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4318
4319 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4320 hwrm_ring_free_send_msg(bp, ring,
bac9a7e0 4321 RING_FREE_REQ_RING_TYPE_L2_CMPL,
edd0c2cc
MC
4322 INVALID_HW_RING_ID);
4323 ring->fw_ring_id = INVALID_HW_RING_ID;
4324 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
4325 }
4326 }
c0c050c5
MC
4327}
4328
391be5c2
MC
4329/* Caller must hold bp->hwrm_cmd_lock */
4330int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
4331{
4332 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4333 struct hwrm_func_qcfg_input req = {0};
4334 int rc;
4335
4336 if (bp->hwrm_spec_code < 0x10601)
4337 return 0;
4338
4339 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
4340 req.fid = cpu_to_le16(fid);
4341 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4342 if (!rc)
4343 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
4344
4345 return rc;
4346}
4347
d1e7925e 4348static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings)
391be5c2
MC
4349{
4350 struct hwrm_func_cfg_input req = {0};
4351 int rc;
4352
4353 if (bp->hwrm_spec_code < 0x10601)
4354 return 0;
4355
4356 if (BNXT_VF(bp))
4357 return 0;
4358
4359 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
4360 req.fid = cpu_to_le16(0xffff);
4361 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
4362 req.num_tx_rings = cpu_to_le16(*tx_rings);
4363 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4364 if (rc)
4365 return rc;
4366
4367 mutex_lock(&bp->hwrm_cmd_lock);
4368 rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings);
4369 mutex_unlock(&bp->hwrm_cmd_lock);
4370 return rc;
4371}
4372
bb053f52
MC
4373static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
4374 u32 buf_tmrs, u16 flags,
4375 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
4376{
4377 req->flags = cpu_to_le16(flags);
4378 req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs);
4379 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16);
4380 req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs);
4381 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16);
4382 /* Minimum time between 2 interrupts set to buf_tmr x 2 */
4383 req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2);
4384 req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4);
4385 req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4);
4386}
4387
c0c050c5
MC
4388int bnxt_hwrm_set_coal(struct bnxt *bp)
4389{
4390 int i, rc = 0;
dfc9c94a
MC
4391 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
4392 req_tx = {0}, *req;
c0c050c5
MC
4393 u16 max_buf, max_buf_irq;
4394 u16 buf_tmr, buf_tmr_irq;
4395 u32 flags;
4396
dfc9c94a
MC
4397 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
4398 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
4399 bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
4400 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
c0c050c5 4401
dfb5b894
MC
4402 /* Each rx completion (2 records) should be DMAed immediately.
4403 * DMA 1/4 of the completion buffers at a time.
4404 */
4405 max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2);
c0c050c5
MC
4406 /* max_buf must not be zero */
4407 max_buf = clamp_t(u16, max_buf, 1, 63);
dfb5b894
MC
4408 max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63);
4409 buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks);
4410 /* buf timer set to 1/4 of interrupt timer */
4411 buf_tmr = max_t(u16, buf_tmr / 4, 1);
4412 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq);
4413 buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
c0c050c5
MC
4414
4415 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
4416
4417 /* RING_IDLE generates more IRQs for lower latency. Enable it only
4418 * if coal_ticks is less than 25 us.
4419 */
dfb5b894 4420 if (bp->rx_coal_ticks < 25)
c0c050c5
MC
4421 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
4422
bb053f52 4423 bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
dfc9c94a
MC
4424 buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
4425
4426 /* max_buf must not be zero */
4427 max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63);
4428 max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63);
4429 buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks);
4430 /* buf timer set to 1/4 of interrupt timer */
4431 buf_tmr = max_t(u16, buf_tmr / 4, 1);
4432 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq);
4433 buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
4434
4435 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
4436 bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
4437 buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
c0c050c5
MC
4438
4439 mutex_lock(&bp->hwrm_cmd_lock);
4440 for (i = 0; i < bp->cp_nr_rings; i++) {
dfc9c94a 4441 struct bnxt_napi *bnapi = bp->bnapi[i];
c0c050c5 4442
dfc9c94a
MC
4443 req = &req_rx;
4444 if (!bnapi->rx_ring)
4445 req = &req_tx;
4446 req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
4447
4448 rc = _hwrm_send_message(bp, req, sizeof(*req),
c0c050c5
MC
4449 HWRM_CMD_TIMEOUT);
4450 if (rc)
4451 break;
4452 }
4453 mutex_unlock(&bp->hwrm_cmd_lock);
4454 return rc;
4455}
4456
4457static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
4458{
4459 int rc = 0, i;
4460 struct hwrm_stat_ctx_free_input req = {0};
4461
4462 if (!bp->bnapi)
4463 return 0;
4464
3e8060fa
PS
4465 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4466 return 0;
4467
c0c050c5
MC
4468 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
4469
4470 mutex_lock(&bp->hwrm_cmd_lock);
4471 for (i = 0; i < bp->cp_nr_rings; i++) {
4472 struct bnxt_napi *bnapi = bp->bnapi[i];
4473 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4474
4475 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
4476 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
4477
4478 rc = _hwrm_send_message(bp, &req, sizeof(req),
4479 HWRM_CMD_TIMEOUT);
4480 if (rc)
4481 break;
4482
4483 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4484 }
4485 }
4486 mutex_unlock(&bp->hwrm_cmd_lock);
4487 return rc;
4488}
4489
4490static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
4491{
4492 int rc = 0, i;
4493 struct hwrm_stat_ctx_alloc_input req = {0};
4494 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4495
3e8060fa
PS
4496 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4497 return 0;
4498
c0c050c5
MC
4499 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
4500
51f30785 4501 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
c0c050c5
MC
4502
4503 mutex_lock(&bp->hwrm_cmd_lock);
4504 for (i = 0; i < bp->cp_nr_rings; i++) {
4505 struct bnxt_napi *bnapi = bp->bnapi[i];
4506 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4507
4508 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
4509
4510 rc = _hwrm_send_message(bp, &req, sizeof(req),
4511 HWRM_CMD_TIMEOUT);
4512 if (rc)
4513 break;
4514
4515 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
4516
4517 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
4518 }
4519 mutex_unlock(&bp->hwrm_cmd_lock);
89aa8445 4520 return rc;
c0c050c5
MC
4521}
4522
cf6645f8
MC
4523static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
4524{
4525 struct hwrm_func_qcfg_input req = {0};
567b2abe 4526 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
cf6645f8
MC
4527 int rc;
4528
4529 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
4530 req.fid = cpu_to_le16(0xffff);
4531 mutex_lock(&bp->hwrm_cmd_lock);
4532 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4533 if (rc)
4534 goto func_qcfg_exit;
4535
4536#ifdef CONFIG_BNXT_SRIOV
4537 if (BNXT_VF(bp)) {
cf6645f8
MC
4538 struct bnxt_vf_info *vf = &bp->vf;
4539
4540 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
4541 }
4542#endif
9e54e322
DK
4543 if (BNXT_PF(bp)) {
4544 u16 flags = le16_to_cpu(resp->flags);
4545
4546 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
4547 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED))
4548 bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
4549 if (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)
4550 bp->flags |= BNXT_FLAG_MULTI_HOST;
4551 }
bc39f885 4552
567b2abe
SB
4553 switch (resp->port_partition_type) {
4554 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
4555 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
4556 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
4557 bp->port_partition_type = resp->port_partition_type;
4558 break;
4559 }
cf6645f8
MC
4560
4561func_qcfg_exit:
4562 mutex_unlock(&bp->hwrm_cmd_lock);
4563 return rc;
4564}
4565
7b08f661 4566static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
c0c050c5
MC
4567{
4568 int rc = 0;
4569 struct hwrm_func_qcaps_input req = {0};
4570 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4571
4572 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
4573 req.fid = cpu_to_le16(0xffff);
4574
4575 mutex_lock(&bp->hwrm_cmd_lock);
4576 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4577 if (rc)
4578 goto hwrm_func_qcaps_exit;
4579
e4060d30
MC
4580 if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED))
4581 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
4582 if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED))
4583 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
4584
7cc5a20e
MC
4585 bp->tx_push_thresh = 0;
4586 if (resp->flags &
4587 cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
4588 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
4589
c0c050c5
MC
4590 if (BNXT_PF(bp)) {
4591 struct bnxt_pf_info *pf = &bp->pf;
4592
4593 pf->fw_fid = le16_to_cpu(resp->fid);
4594 pf->port_id = le16_to_cpu(resp->port_id);
87027db1 4595 bp->dev->dev_port = pf->port_id;
11f15ed3 4596 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
bdd4347b 4597 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
c0c050c5
MC
4598 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
4599 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
4600 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
c0c050c5 4601 pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
b72d4a68
MC
4602 pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
4603 if (!pf->max_hw_ring_grps)
4604 pf->max_hw_ring_grps = pf->max_tx_rings;
c0c050c5
MC
4605 pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
4606 pf->max_vnics = le16_to_cpu(resp->max_vnics);
4607 pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
4608 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
4609 pf->max_vfs = le16_to_cpu(resp->max_vfs);
4610 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
4611 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
4612 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
4613 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
4614 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
4615 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
c1ef146a
MC
4616 if (resp->flags &
4617 cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED))
4618 bp->flags |= BNXT_FLAG_WOL_CAP;
c0c050c5 4619 } else {
379a80a1 4620#ifdef CONFIG_BNXT_SRIOV
c0c050c5
MC
4621 struct bnxt_vf_info *vf = &bp->vf;
4622
4623 vf->fw_fid = le16_to_cpu(resp->fid);
c0c050c5
MC
4624
4625 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
4626 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
4627 vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
4628 vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
b72d4a68
MC
4629 vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
4630 if (!vf->max_hw_ring_grps)
4631 vf->max_hw_ring_grps = vf->max_tx_rings;
c0c050c5
MC
4632 vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
4633 vf->max_vnics = le16_to_cpu(resp->max_vnics);
4634 vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7cc5a20e
MC
4635
4636 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
001154eb
MC
4637 mutex_unlock(&bp->hwrm_cmd_lock);
4638
4639 if (is_valid_ether_addr(vf->mac_addr)) {
7cc5a20e
MC
4640 /* overwrite netdev dev_adr with admin VF MAC */
4641 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
001154eb 4642 } else {
1faaa78f 4643 eth_hw_addr_random(bp->dev);
001154eb
MC
4644 rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
4645 }
4646 return rc;
379a80a1 4647#endif
c0c050c5
MC
4648 }
4649
c0c050c5
MC
4650hwrm_func_qcaps_exit:
4651 mutex_unlock(&bp->hwrm_cmd_lock);
4652 return rc;
4653}
4654
4655static int bnxt_hwrm_func_reset(struct bnxt *bp)
4656{
4657 struct hwrm_func_reset_input req = {0};
4658
4659 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
4660 req.enables = 0;
4661
4662 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
4663}
4664
4665static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
4666{
4667 int rc = 0;
4668 struct hwrm_queue_qportcfg_input req = {0};
4669 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
4670 u8 i, *qptr;
4671
4672 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
4673
4674 mutex_lock(&bp->hwrm_cmd_lock);
4675 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4676 if (rc)
4677 goto qportcfg_exit;
4678
4679 if (!resp->max_configurable_queues) {
4680 rc = -EINVAL;
4681 goto qportcfg_exit;
4682 }
4683 bp->max_tc = resp->max_configurable_queues;
87c374de 4684 bp->max_lltc = resp->max_configurable_lossless_queues;
c0c050c5
MC
4685 if (bp->max_tc > BNXT_MAX_QUEUE)
4686 bp->max_tc = BNXT_MAX_QUEUE;
4687
441cabbb
MC
4688 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
4689 bp->max_tc = 1;
4690
87c374de
MC
4691 if (bp->max_lltc > bp->max_tc)
4692 bp->max_lltc = bp->max_tc;
4693
c0c050c5
MC
4694 qptr = &resp->queue_id0;
4695 for (i = 0; i < bp->max_tc; i++) {
4696 bp->q_info[i].queue_id = *qptr++;
4697 bp->q_info[i].queue_profile = *qptr++;
4698 }
4699
4700qportcfg_exit:
4701 mutex_unlock(&bp->hwrm_cmd_lock);
4702 return rc;
4703}
4704
4705static int bnxt_hwrm_ver_get(struct bnxt *bp)
4706{
4707 int rc;
4708 struct hwrm_ver_get_input req = {0};
4709 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
e605db80 4710 u32 dev_caps_cfg;
c0c050c5 4711
e6ef2699 4712 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
c0c050c5
MC
4713 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
4714 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
4715 req.hwrm_intf_min = HWRM_VERSION_MINOR;
4716 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
4717 mutex_lock(&bp->hwrm_cmd_lock);
4718 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4719 if (rc)
4720 goto hwrm_ver_get_exit;
4721
4722 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
4723
11f15ed3
MC
4724 bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
4725 resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
c193554e
MC
4726 if (resp->hwrm_intf_maj < 1) {
4727 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
c0c050c5 4728 resp->hwrm_intf_maj, resp->hwrm_intf_min,
c193554e
MC
4729 resp->hwrm_intf_upd);
4730 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
c0c050c5 4731 }
3ebf6f0a 4732 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d",
c0c050c5
MC
4733 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
4734 resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
4735
ff4fe81d
MC
4736 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
4737 if (!bp->hwrm_cmd_timeout)
4738 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
4739
e6ef2699
MC
4740 if (resp->hwrm_intf_maj >= 1)
4741 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
4742
659c805c 4743 bp->chip_num = le16_to_cpu(resp->chip_num);
3e8060fa
PS
4744 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
4745 !resp->chip_metal)
4746 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
659c805c 4747
e605db80
DK
4748 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
4749 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
4750 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
4751 bp->flags |= BNXT_FLAG_SHORT_CMD;
4752
c0c050c5
MC
4753hwrm_ver_get_exit:
4754 mutex_unlock(&bp->hwrm_cmd_lock);
4755 return rc;
4756}
4757
5ac67d8b
RS
4758int bnxt_hwrm_fw_set_time(struct bnxt *bp)
4759{
878786d9 4760#if IS_ENABLED(CONFIG_RTC_LIB)
5ac67d8b
RS
4761 struct hwrm_fw_set_time_input req = {0};
4762 struct rtc_time tm;
4763 struct timeval tv;
4764
4765 if (bp->hwrm_spec_code < 0x10400)
4766 return -EOPNOTSUPP;
4767
4768 do_gettimeofday(&tv);
4769 rtc_time_to_tm(tv.tv_sec, &tm);
4770 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
4771 req.year = cpu_to_le16(1900 + tm.tm_year);
4772 req.month = 1 + tm.tm_mon;
4773 req.day = tm.tm_mday;
4774 req.hour = tm.tm_hour;
4775 req.minute = tm.tm_min;
4776 req.second = tm.tm_sec;
4777 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
878786d9
RS
4778#else
4779 return -EOPNOTSUPP;
4780#endif
5ac67d8b
RS
4781}
4782
3bdf56c4
MC
4783static int bnxt_hwrm_port_qstats(struct bnxt *bp)
4784{
4785 int rc;
4786 struct bnxt_pf_info *pf = &bp->pf;
4787 struct hwrm_port_qstats_input req = {0};
4788
4789 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
4790 return 0;
4791
4792 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
4793 req.port_id = cpu_to_le16(pf->port_id);
4794 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
4795 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
4796 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4797 return rc;
4798}
4799
c0c050c5
MC
4800static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
4801{
4802 if (bp->vxlan_port_cnt) {
4803 bnxt_hwrm_tunnel_dst_port_free(
4804 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
4805 }
4806 bp->vxlan_port_cnt = 0;
4807 if (bp->nge_port_cnt) {
4808 bnxt_hwrm_tunnel_dst_port_free(
4809 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
4810 }
4811 bp->nge_port_cnt = 0;
4812}
4813
4814static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
4815{
4816 int rc, i;
4817 u32 tpa_flags = 0;
4818
4819 if (set_tpa)
4820 tpa_flags = bp->flags & BNXT_FLAG_TPA;
4821 for (i = 0; i < bp->nr_vnics; i++) {
4822 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
4823 if (rc) {
4824 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
23e12c89 4825 i, rc);
c0c050c5
MC
4826 return rc;
4827 }
4828 }
4829 return 0;
4830}
4831
4832static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
4833{
4834 int i;
4835
4836 for (i = 0; i < bp->nr_vnics; i++)
4837 bnxt_hwrm_vnic_set_rss(bp, i, false);
4838}
4839
4840static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
4841 bool irq_re_init)
4842{
4843 if (bp->vnic_info) {
4844 bnxt_hwrm_clear_vnic_filter(bp);
4845 /* clear all RSS setting before free vnic ctx */
4846 bnxt_hwrm_clear_vnic_rss(bp);
4847 bnxt_hwrm_vnic_ctx_free(bp);
4848 /* before free the vnic, undo the vnic tpa settings */
4849 if (bp->flags & BNXT_FLAG_TPA)
4850 bnxt_set_tpa(bp, false);
4851 bnxt_hwrm_vnic_free(bp);
4852 }
4853 bnxt_hwrm_ring_free(bp, close_path);
4854 bnxt_hwrm_ring_grp_free(bp);
4855 if (irq_re_init) {
4856 bnxt_hwrm_stat_ctx_free(bp);
4857 bnxt_hwrm_free_tunnel_ports(bp);
4858 }
4859}
4860
4861static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
4862{
ae10ae74 4863 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
c0c050c5
MC
4864 int rc;
4865
ae10ae74
MC
4866 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
4867 goto skip_rss_ctx;
4868
c0c050c5 4869 /* allocate context for vnic */
94ce9caa 4870 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
c0c050c5
MC
4871 if (rc) {
4872 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
4873 vnic_id, rc);
4874 goto vnic_setup_err;
4875 }
4876 bp->rsscos_nr_ctxs++;
4877
94ce9caa
PS
4878 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
4879 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
4880 if (rc) {
4881 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
4882 vnic_id, rc);
4883 goto vnic_setup_err;
4884 }
4885 bp->rsscos_nr_ctxs++;
4886 }
4887
ae10ae74 4888skip_rss_ctx:
c0c050c5
MC
4889 /* configure default vnic, ring grp */
4890 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
4891 if (rc) {
4892 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
4893 vnic_id, rc);
4894 goto vnic_setup_err;
4895 }
4896
4897 /* Enable RSS hashing on vnic */
4898 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
4899 if (rc) {
4900 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
4901 vnic_id, rc);
4902 goto vnic_setup_err;
4903 }
4904
4905 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
4906 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
4907 if (rc) {
4908 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
4909 vnic_id, rc);
4910 }
4911 }
4912
4913vnic_setup_err:
4914 return rc;
4915}
4916
4917static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
4918{
4919#ifdef CONFIG_RFS_ACCEL
4920 int i, rc = 0;
4921
4922 for (i = 0; i < bp->rx_nr_rings; i++) {
ae10ae74 4923 struct bnxt_vnic_info *vnic;
c0c050c5
MC
4924 u16 vnic_id = i + 1;
4925 u16 ring_id = i;
4926
4927 if (vnic_id >= bp->nr_vnics)
4928 break;
4929
ae10ae74
MC
4930 vnic = &bp->vnic_info[vnic_id];
4931 vnic->flags |= BNXT_VNIC_RFS_FLAG;
4932 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
4933 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
b81a90d3 4934 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
c0c050c5
MC
4935 if (rc) {
4936 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
4937 vnic_id, rc);
4938 break;
4939 }
4940 rc = bnxt_setup_vnic(bp, vnic_id);
4941 if (rc)
4942 break;
4943 }
4944 return rc;
4945#else
4946 return 0;
4947#endif
4948}
4949
17c71ac3
MC
4950/* Allow PF and VF with default VLAN to be in promiscuous mode */
4951static bool bnxt_promisc_ok(struct bnxt *bp)
4952{
4953#ifdef CONFIG_BNXT_SRIOV
4954 if (BNXT_VF(bp) && !bp->vf.vlan)
4955 return false;
4956#endif
4957 return true;
4958}
4959
dc52c6c7
PS
4960static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
4961{
4962 unsigned int rc = 0;
4963
4964 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
4965 if (rc) {
4966 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
4967 rc);
4968 return rc;
4969 }
4970
4971 rc = bnxt_hwrm_vnic_cfg(bp, 1);
4972 if (rc) {
4973 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
4974 rc);
4975 return rc;
4976 }
4977 return rc;
4978}
4979
b664f008 4980static int bnxt_cfg_rx_mode(struct bnxt *);
7d2837dd 4981static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
b664f008 4982
c0c050c5
MC
4983static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
4984{
7d2837dd 4985 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
c0c050c5 4986 int rc = 0;
76595193 4987 unsigned int rx_nr_rings = bp->rx_nr_rings;
c0c050c5
MC
4988
4989 if (irq_re_init) {
4990 rc = bnxt_hwrm_stat_ctx_alloc(bp);
4991 if (rc) {
4992 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
4993 rc);
4994 goto err_out;
4995 }
4996 }
4997
4998 rc = bnxt_hwrm_ring_alloc(bp);
4999 if (rc) {
5000 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
5001 goto err_out;
5002 }
5003
5004 rc = bnxt_hwrm_ring_grp_alloc(bp);
5005 if (rc) {
5006 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
5007 goto err_out;
5008 }
5009
76595193
PS
5010 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5011 rx_nr_rings--;
5012
c0c050c5 5013 /* default vnic 0 */
76595193 5014 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
c0c050c5
MC
5015 if (rc) {
5016 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
5017 goto err_out;
5018 }
5019
5020 rc = bnxt_setup_vnic(bp, 0);
5021 if (rc)
5022 goto err_out;
5023
5024 if (bp->flags & BNXT_FLAG_RFS) {
5025 rc = bnxt_alloc_rfs_vnics(bp);
5026 if (rc)
5027 goto err_out;
5028 }
5029
5030 if (bp->flags & BNXT_FLAG_TPA) {
5031 rc = bnxt_set_tpa(bp, true);
5032 if (rc)
5033 goto err_out;
5034 }
5035
5036 if (BNXT_VF(bp))
5037 bnxt_update_vf_mac(bp);
5038
5039 /* Filter for default vnic 0 */
5040 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
5041 if (rc) {
5042 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
5043 goto err_out;
5044 }
7d2837dd 5045 vnic->uc_filter_count = 1;
c0c050c5 5046
7d2837dd 5047 vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
c0c050c5 5048
17c71ac3 5049 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
7d2837dd
MC
5050 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
5051
5052 if (bp->dev->flags & IFF_ALLMULTI) {
5053 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
5054 vnic->mc_list_count = 0;
5055 } else {
5056 u32 mask = 0;
5057
5058 bnxt_mc_list_updated(bp, &mask);
5059 vnic->rx_mask |= mask;
5060 }
c0c050c5 5061
b664f008
MC
5062 rc = bnxt_cfg_rx_mode(bp);
5063 if (rc)
c0c050c5 5064 goto err_out;
c0c050c5
MC
5065
5066 rc = bnxt_hwrm_set_coal(bp);
5067 if (rc)
5068 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
dc52c6c7
PS
5069 rc);
5070
5071 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
5072 rc = bnxt_setup_nitroa0_vnic(bp);
5073 if (rc)
5074 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
5075 rc);
5076 }
c0c050c5 5077
cf6645f8
MC
5078 if (BNXT_VF(bp)) {
5079 bnxt_hwrm_func_qcfg(bp);
5080 netdev_update_features(bp->dev);
5081 }
5082
c0c050c5
MC
5083 return 0;
5084
5085err_out:
5086 bnxt_hwrm_resource_free(bp, 0, true);
5087
5088 return rc;
5089}
5090
5091static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
5092{
5093 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
5094 return 0;
5095}
5096
5097static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
5098{
2247925f 5099 bnxt_init_cp_rings(bp);
c0c050c5
MC
5100 bnxt_init_rx_rings(bp);
5101 bnxt_init_tx_rings(bp);
5102 bnxt_init_ring_grps(bp, irq_re_init);
5103 bnxt_init_vnics(bp);
5104
5105 return bnxt_init_chip(bp, irq_re_init);
5106}
5107
c0c050c5
MC
5108static int bnxt_set_real_num_queues(struct bnxt *bp)
5109{
5110 int rc;
5111 struct net_device *dev = bp->dev;
5112
5f449249
MC
5113 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
5114 bp->tx_nr_rings_xdp);
c0c050c5
MC
5115 if (rc)
5116 return rc;
5117
5118 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
5119 if (rc)
5120 return rc;
5121
5122#ifdef CONFIG_RFS_ACCEL
45019a18 5123 if (bp->flags & BNXT_FLAG_RFS)
c0c050c5 5124 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
c0c050c5
MC
5125#endif
5126
5127 return rc;
5128}
5129
6e6c5a57
MC
5130static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5131 bool shared)
5132{
5133 int _rx = *rx, _tx = *tx;
5134
5135 if (shared) {
5136 *rx = min_t(int, _rx, max);
5137 *tx = min_t(int, _tx, max);
5138 } else {
5139 if (max < 2)
5140 return -ENOMEM;
5141
5142 while (_rx + _tx > max) {
5143 if (_rx > _tx && _rx > 1)
5144 _rx--;
5145 else if (_tx > 1)
5146 _tx--;
5147 }
5148 *rx = _rx;
5149 *tx = _tx;
5150 }
5151 return 0;
5152}
5153
7809592d
MC
5154static void bnxt_setup_msix(struct bnxt *bp)
5155{
5156 const int len = sizeof(bp->irq_tbl[0].name);
5157 struct net_device *dev = bp->dev;
5158 int tcs, i;
5159
5160 tcs = netdev_get_num_tc(dev);
5161 if (tcs > 1) {
d1e7925e 5162 int i, off, count;
7809592d 5163
d1e7925e
MC
5164 for (i = 0; i < tcs; i++) {
5165 count = bp->tx_nr_rings_per_tc;
5166 off = i * count;
5167 netdev_set_tc_queue(dev, i, count, off);
7809592d
MC
5168 }
5169 }
5170
5171 for (i = 0; i < bp->cp_nr_rings; i++) {
5172 char *attr;
5173
5174 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5175 attr = "TxRx";
5176 else if (i < bp->rx_nr_rings)
5177 attr = "rx";
5178 else
5179 attr = "tx";
5180
5181 snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr,
5182 i);
5183 bp->irq_tbl[i].handler = bnxt_msix;
5184 }
5185}
5186
5187static void bnxt_setup_inta(struct bnxt *bp)
5188{
5189 const int len = sizeof(bp->irq_tbl[0].name);
5190
5191 if (netdev_get_num_tc(bp->dev))
5192 netdev_reset_tc(bp->dev);
5193
5194 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
5195 0);
5196 bp->irq_tbl[0].handler = bnxt_inta;
5197}
5198
5199static int bnxt_setup_int_mode(struct bnxt *bp)
5200{
5201 int rc;
5202
5203 if (bp->flags & BNXT_FLAG_USING_MSIX)
5204 bnxt_setup_msix(bp);
5205 else
5206 bnxt_setup_inta(bp);
5207
5208 rc = bnxt_set_real_num_queues(bp);
5209 return rc;
5210}
5211
b7429954 5212#ifdef CONFIG_RFS_ACCEL
8079e8f1
MC
5213static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
5214{
5215#if defined(CONFIG_BNXT_SRIOV)
5216 if (BNXT_VF(bp))
5217 return bp->vf.max_rsscos_ctxs;
5218#endif
5219 return bp->pf.max_rsscos_ctxs;
5220}
5221
5222static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
5223{
5224#if defined(CONFIG_BNXT_SRIOV)
5225 if (BNXT_VF(bp))
5226 return bp->vf.max_vnics;
5227#endif
5228 return bp->pf.max_vnics;
5229}
b7429954 5230#endif
8079e8f1 5231
e4060d30
MC
5232unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
5233{
5234#if defined(CONFIG_BNXT_SRIOV)
5235 if (BNXT_VF(bp))
5236 return bp->vf.max_stat_ctxs;
5237#endif
5238 return bp->pf.max_stat_ctxs;
5239}
5240
a588e458
MC
5241void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
5242{
5243#if defined(CONFIG_BNXT_SRIOV)
5244 if (BNXT_VF(bp))
5245 bp->vf.max_stat_ctxs = max;
5246 else
5247#endif
5248 bp->pf.max_stat_ctxs = max;
5249}
5250
e4060d30
MC
5251unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
5252{
5253#if defined(CONFIG_BNXT_SRIOV)
5254 if (BNXT_VF(bp))
5255 return bp->vf.max_cp_rings;
5256#endif
5257 return bp->pf.max_cp_rings;
5258}
5259
a588e458
MC
5260void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
5261{
5262#if defined(CONFIG_BNXT_SRIOV)
5263 if (BNXT_VF(bp))
5264 bp->vf.max_cp_rings = max;
5265 else
5266#endif
5267 bp->pf.max_cp_rings = max;
5268}
5269
7809592d
MC
5270static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
5271{
5272#if defined(CONFIG_BNXT_SRIOV)
5273 if (BNXT_VF(bp))
68a946bb
MC
5274 return min_t(unsigned int, bp->vf.max_irqs,
5275 bp->vf.max_cp_rings);
7809592d 5276#endif
68a946bb 5277 return min_t(unsigned int, bp->pf.max_irqs, bp->pf.max_cp_rings);
7809592d
MC
5278}
5279
33c2657e
MC
5280void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
5281{
5282#if defined(CONFIG_BNXT_SRIOV)
5283 if (BNXT_VF(bp))
5284 bp->vf.max_irqs = max_irqs;
5285 else
5286#endif
5287 bp->pf.max_irqs = max_irqs;
5288}
5289
7809592d 5290static int bnxt_init_msix(struct bnxt *bp)
c0c050c5 5291{
01657bcd 5292 int i, total_vecs, rc = 0, min = 1;
7809592d 5293 struct msix_entry *msix_ent;
c0c050c5 5294
7809592d 5295 total_vecs = bnxt_get_max_func_irqs(bp);
c0c050c5
MC
5296 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
5297 if (!msix_ent)
5298 return -ENOMEM;
5299
5300 for (i = 0; i < total_vecs; i++) {
5301 msix_ent[i].entry = i;
5302 msix_ent[i].vector = 0;
5303 }
5304
01657bcd
MC
5305 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
5306 min = 2;
5307
5308 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
c0c050c5
MC
5309 if (total_vecs < 0) {
5310 rc = -ENODEV;
5311 goto msix_setup_exit;
5312 }
5313
5314 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
5315 if (bp->irq_tbl) {
7809592d
MC
5316 for (i = 0; i < total_vecs; i++)
5317 bp->irq_tbl[i].vector = msix_ent[i].vector;
c0c050c5 5318
7809592d 5319 bp->total_irqs = total_vecs;
c0c050c5 5320 /* Trim rings based upon num of vectors allocated */
6e6c5a57 5321 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
01657bcd 5322 total_vecs, min == 1);
6e6c5a57
MC
5323 if (rc)
5324 goto msix_setup_exit;
5325
c0c050c5 5326 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
7809592d
MC
5327 bp->cp_nr_rings = (min == 1) ?
5328 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
5329 bp->tx_nr_rings + bp->rx_nr_rings;
c0c050c5 5330
c0c050c5
MC
5331 } else {
5332 rc = -ENOMEM;
5333 goto msix_setup_exit;
5334 }
5335 bp->flags |= BNXT_FLAG_USING_MSIX;
5336 kfree(msix_ent);
5337 return 0;
5338
5339msix_setup_exit:
7809592d
MC
5340 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
5341 kfree(bp->irq_tbl);
5342 bp->irq_tbl = NULL;
c0c050c5
MC
5343 pci_disable_msix(bp->pdev);
5344 kfree(msix_ent);
5345 return rc;
5346}
5347
7809592d 5348static int bnxt_init_inta(struct bnxt *bp)
c0c050c5 5349{
c0c050c5 5350 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
7809592d
MC
5351 if (!bp->irq_tbl)
5352 return -ENOMEM;
5353
5354 bp->total_irqs = 1;
c0c050c5
MC
5355 bp->rx_nr_rings = 1;
5356 bp->tx_nr_rings = 1;
5357 bp->cp_nr_rings = 1;
5358 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
01657bcd 5359 bp->flags |= BNXT_FLAG_SHARED_RINGS;
c0c050c5 5360 bp->irq_tbl[0].vector = bp->pdev->irq;
7809592d 5361 return 0;
c0c050c5
MC
5362}
5363
7809592d 5364static int bnxt_init_int_mode(struct bnxt *bp)
c0c050c5
MC
5365{
5366 int rc = 0;
5367
5368 if (bp->flags & BNXT_FLAG_MSIX_CAP)
7809592d 5369 rc = bnxt_init_msix(bp);
c0c050c5 5370
1fa72e29 5371 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
c0c050c5 5372 /* fallback to INTA */
7809592d 5373 rc = bnxt_init_inta(bp);
c0c050c5
MC
5374 }
5375 return rc;
5376}
5377
7809592d
MC
5378static void bnxt_clear_int_mode(struct bnxt *bp)
5379{
5380 if (bp->flags & BNXT_FLAG_USING_MSIX)
5381 pci_disable_msix(bp->pdev);
5382
5383 kfree(bp->irq_tbl);
5384 bp->irq_tbl = NULL;
5385 bp->flags &= ~BNXT_FLAG_USING_MSIX;
5386}
5387
c0c050c5
MC
5388static void bnxt_free_irq(struct bnxt *bp)
5389{
5390 struct bnxt_irq *irq;
5391 int i;
5392
5393#ifdef CONFIG_RFS_ACCEL
5394 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
5395 bp->dev->rx_cpu_rmap = NULL;
5396#endif
5397 if (!bp->irq_tbl)
5398 return;
5399
5400 for (i = 0; i < bp->cp_nr_rings; i++) {
5401 irq = &bp->irq_tbl[i];
5402 if (irq->requested)
5403 free_irq(irq->vector, bp->bnapi[i]);
5404 irq->requested = 0;
5405 }
c0c050c5
MC
5406}
5407
5408static int bnxt_request_irq(struct bnxt *bp)
5409{
b81a90d3 5410 int i, j, rc = 0;
c0c050c5
MC
5411 unsigned long flags = 0;
5412#ifdef CONFIG_RFS_ACCEL
5413 struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
5414#endif
5415
5416 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
5417 flags = IRQF_SHARED;
5418
b81a90d3 5419 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
c0c050c5
MC
5420 struct bnxt_irq *irq = &bp->irq_tbl[i];
5421#ifdef CONFIG_RFS_ACCEL
b81a90d3 5422 if (rmap && bp->bnapi[i]->rx_ring) {
c0c050c5
MC
5423 rc = irq_cpu_rmap_add(rmap, irq->vector);
5424 if (rc)
5425 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
b81a90d3
MC
5426 j);
5427 j++;
c0c050c5
MC
5428 }
5429#endif
5430 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5431 bp->bnapi[i]);
5432 if (rc)
5433 break;
5434
5435 irq->requested = 1;
5436 }
5437 return rc;
5438}
5439
5440static void bnxt_del_napi(struct bnxt *bp)
5441{
5442 int i;
5443
5444 if (!bp->bnapi)
5445 return;
5446
5447 for (i = 0; i < bp->cp_nr_rings; i++) {
5448 struct bnxt_napi *bnapi = bp->bnapi[i];
5449
5450 napi_hash_del(&bnapi->napi);
5451 netif_napi_del(&bnapi->napi);
5452 }
e5f6f564
ED
5453 /* We called napi_hash_del() before netif_napi_del(), we need
5454 * to respect an RCU grace period before freeing napi structures.
5455 */
5456 synchronize_net();
c0c050c5
MC
5457}
5458
5459static void bnxt_init_napi(struct bnxt *bp)
5460{
5461 int i;
10bbdaf5 5462 unsigned int cp_nr_rings = bp->cp_nr_rings;
c0c050c5
MC
5463 struct bnxt_napi *bnapi;
5464
5465 if (bp->flags & BNXT_FLAG_USING_MSIX) {
10bbdaf5
PS
5466 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5467 cp_nr_rings--;
5468 for (i = 0; i < cp_nr_rings; i++) {
c0c050c5
MC
5469 bnapi = bp->bnapi[i];
5470 netif_napi_add(bp->dev, &bnapi->napi,
5471 bnxt_poll, 64);
c0c050c5 5472 }
10bbdaf5
PS
5473 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
5474 bnapi = bp->bnapi[cp_nr_rings];
5475 netif_napi_add(bp->dev, &bnapi->napi,
5476 bnxt_poll_nitroa0, 64);
10bbdaf5 5477 }
c0c050c5
MC
5478 } else {
5479 bnapi = bp->bnapi[0];
5480 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
c0c050c5
MC
5481 }
5482}
5483
5484static void bnxt_disable_napi(struct bnxt *bp)
5485{
5486 int i;
5487
5488 if (!bp->bnapi)
5489 return;
5490
b356a2e7 5491 for (i = 0; i < bp->cp_nr_rings; i++)
c0c050c5 5492 napi_disable(&bp->bnapi[i]->napi);
c0c050c5
MC
5493}
5494
5495static void bnxt_enable_napi(struct bnxt *bp)
5496{
5497 int i;
5498
5499 for (i = 0; i < bp->cp_nr_rings; i++) {
fa7e2812 5500 bp->bnapi[i]->in_reset = false;
c0c050c5
MC
5501 napi_enable(&bp->bnapi[i]->napi);
5502 }
5503}
5504
7df4ae9f 5505void bnxt_tx_disable(struct bnxt *bp)
c0c050c5
MC
5506{
5507 int i;
c0c050c5
MC
5508 struct bnxt_tx_ring_info *txr;
5509 struct netdev_queue *txq;
5510
b6ab4b01 5511 if (bp->tx_ring) {
c0c050c5 5512 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 5513 txr = &bp->tx_ring[i];
c0c050c5 5514 txq = netdev_get_tx_queue(bp->dev, i);
c0c050c5 5515 txr->dev_state = BNXT_DEV_STATE_CLOSING;
c0c050c5
MC
5516 }
5517 }
5518 /* Stop all TX queues */
5519 netif_tx_disable(bp->dev);
5520 netif_carrier_off(bp->dev);
5521}
5522
7df4ae9f 5523void bnxt_tx_enable(struct bnxt *bp)
c0c050c5
MC
5524{
5525 int i;
c0c050c5
MC
5526 struct bnxt_tx_ring_info *txr;
5527 struct netdev_queue *txq;
5528
5529 for (i = 0; i < bp->tx_nr_rings; i++) {
b6ab4b01 5530 txr = &bp->tx_ring[i];
c0c050c5
MC
5531 txq = netdev_get_tx_queue(bp->dev, i);
5532 txr->dev_state = 0;
5533 }
5534 netif_tx_wake_all_queues(bp->dev);
5535 if (bp->link_info.link_up)
5536 netif_carrier_on(bp->dev);
5537}
5538
5539static void bnxt_report_link(struct bnxt *bp)
5540{
5541 if (bp->link_info.link_up) {
5542 const char *duplex;
5543 const char *flow_ctrl;
38a21b34
DK
5544 u32 speed;
5545 u16 fec;
c0c050c5
MC
5546
5547 netif_carrier_on(bp->dev);
5548 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
5549 duplex = "full";
5550 else
5551 duplex = "half";
5552 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
5553 flow_ctrl = "ON - receive & transmit";
5554 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
5555 flow_ctrl = "ON - transmit";
5556 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
5557 flow_ctrl = "ON - receive";
5558 else
5559 flow_ctrl = "none";
5560 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
38a21b34 5561 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
c0c050c5 5562 speed, duplex, flow_ctrl);
170ce013
MC
5563 if (bp->flags & BNXT_FLAG_EEE_CAP)
5564 netdev_info(bp->dev, "EEE is %s\n",
5565 bp->eee.eee_active ? "active" :
5566 "not active");
e70c752f
MC
5567 fec = bp->link_info.fec_cfg;
5568 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
5569 netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
5570 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
5571 (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
5572 (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
c0c050c5
MC
5573 } else {
5574 netif_carrier_off(bp->dev);
5575 netdev_err(bp->dev, "NIC Link is Down\n");
5576 }
5577}
5578
170ce013
MC
5579static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
5580{
5581 int rc = 0;
5582 struct hwrm_port_phy_qcaps_input req = {0};
5583 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
93ed8117 5584 struct bnxt_link_info *link_info = &bp->link_info;
170ce013
MC
5585
5586 if (bp->hwrm_spec_code < 0x10201)
5587 return 0;
5588
5589 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
5590
5591 mutex_lock(&bp->hwrm_cmd_lock);
5592 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5593 if (rc)
5594 goto hwrm_phy_qcaps_exit;
5595
5596 if (resp->eee_supported & PORT_PHY_QCAPS_RESP_EEE_SUPPORTED) {
5597 struct ethtool_eee *eee = &bp->eee;
5598 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
5599
5600 bp->flags |= BNXT_FLAG_EEE_CAP;
5601 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5602 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
5603 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
5604 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
5605 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
5606 }
520ad89a
MC
5607 if (resp->supported_speeds_auto_mode)
5608 link_info->support_auto_speeds =
5609 le16_to_cpu(resp->supported_speeds_auto_mode);
170ce013
MC
5610
5611hwrm_phy_qcaps_exit:
5612 mutex_unlock(&bp->hwrm_cmd_lock);
5613 return rc;
5614}
5615
c0c050c5
MC
5616static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
5617{
5618 int rc = 0;
5619 struct bnxt_link_info *link_info = &bp->link_info;
5620 struct hwrm_port_phy_qcfg_input req = {0};
5621 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5622 u8 link_up = link_info->link_up;
286ef9d6 5623 u16 diff;
c0c050c5
MC
5624
5625 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
5626
5627 mutex_lock(&bp->hwrm_cmd_lock);
5628 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5629 if (rc) {
5630 mutex_unlock(&bp->hwrm_cmd_lock);
5631 return rc;
5632 }
5633
5634 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
5635 link_info->phy_link_status = resp->link;
5636 link_info->duplex = resp->duplex;
5637 link_info->pause = resp->pause;
5638 link_info->auto_mode = resp->auto_mode;
5639 link_info->auto_pause_setting = resp->auto_pause;
3277360e 5640 link_info->lp_pause = resp->link_partner_adv_pause;
c0c050c5 5641 link_info->force_pause_setting = resp->force_pause;
c193554e 5642 link_info->duplex_setting = resp->duplex;
c0c050c5
MC
5643 if (link_info->phy_link_status == BNXT_LINK_LINK)
5644 link_info->link_speed = le16_to_cpu(resp->link_speed);
5645 else
5646 link_info->link_speed = 0;
5647 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
c0c050c5
MC
5648 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
5649 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
3277360e
MC
5650 link_info->lp_auto_link_speeds =
5651 le16_to_cpu(resp->link_partner_adv_speeds);
c0c050c5
MC
5652 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
5653 link_info->phy_ver[0] = resp->phy_maj;
5654 link_info->phy_ver[1] = resp->phy_min;
5655 link_info->phy_ver[2] = resp->phy_bld;
5656 link_info->media_type = resp->media_type;
03efbec0 5657 link_info->phy_type = resp->phy_type;
11f15ed3 5658 link_info->transceiver = resp->xcvr_pkg_type;
170ce013
MC
5659 link_info->phy_addr = resp->eee_config_phy_addr &
5660 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
42ee18fe 5661 link_info->module_status = resp->module_status;
170ce013
MC
5662
5663 if (bp->flags & BNXT_FLAG_EEE_CAP) {
5664 struct ethtool_eee *eee = &bp->eee;
5665 u16 fw_speeds;
5666
5667 eee->eee_active = 0;
5668 if (resp->eee_config_phy_addr &
5669 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
5670 eee->eee_active = 1;
5671 fw_speeds = le16_to_cpu(
5672 resp->link_partner_adv_eee_link_speed_mask);
5673 eee->lp_advertised =
5674 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5675 }
5676
5677 /* Pull initial EEE config */
5678 if (!chng_link_state) {
5679 if (resp->eee_config_phy_addr &
5680 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
5681 eee->eee_enabled = 1;
c0c050c5 5682
170ce013
MC
5683 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
5684 eee->advertised =
5685 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5686
5687 if (resp->eee_config_phy_addr &
5688 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
5689 __le32 tmr;
5690
5691 eee->tx_lpi_enabled = 1;
5692 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
5693 eee->tx_lpi_timer = le32_to_cpu(tmr) &
5694 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
5695 }
5696 }
5697 }
e70c752f
MC
5698
5699 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
5700 if (bp->hwrm_spec_code >= 0x10504)
5701 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
5702
c0c050c5
MC
5703 /* TODO: need to add more logic to report VF link */
5704 if (chng_link_state) {
5705 if (link_info->phy_link_status == BNXT_LINK_LINK)
5706 link_info->link_up = 1;
5707 else
5708 link_info->link_up = 0;
5709 if (link_up != link_info->link_up)
5710 bnxt_report_link(bp);
5711 } else {
5712 /* alwasy link down if not require to update link state */
5713 link_info->link_up = 0;
5714 }
5715 mutex_unlock(&bp->hwrm_cmd_lock);
286ef9d6
MC
5716
5717 diff = link_info->support_auto_speeds ^ link_info->advertising;
5718 if ((link_info->support_auto_speeds | diff) !=
5719 link_info->support_auto_speeds) {
5720 /* An advertised speed is no longer supported, so we need to
0eaa24b9
MC
5721 * update the advertisement settings. Caller holds RTNL
5722 * so we can modify link settings.
286ef9d6 5723 */
286ef9d6 5724 link_info->advertising = link_info->support_auto_speeds;
0eaa24b9 5725 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
286ef9d6 5726 bnxt_hwrm_set_link_setting(bp, true, false);
286ef9d6 5727 }
c0c050c5
MC
5728 return 0;
5729}
5730
10289bec
MC
5731static void bnxt_get_port_module_status(struct bnxt *bp)
5732{
5733 struct bnxt_link_info *link_info = &bp->link_info;
5734 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
5735 u8 module_status;
5736
5737 if (bnxt_update_link(bp, true))
5738 return;
5739
5740 module_status = link_info->module_status;
5741 switch (module_status) {
5742 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
5743 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
5744 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
5745 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
5746 bp->pf.port_id);
5747 if (bp->hwrm_spec_code >= 0x10201) {
5748 netdev_warn(bp->dev, "Module part number %s\n",
5749 resp->phy_vendor_partnumber);
5750 }
5751 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
5752 netdev_warn(bp->dev, "TX is disabled\n");
5753 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
5754 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
5755 }
5756}
5757
c0c050c5
MC
5758static void
5759bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
5760{
5761 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
c9ee9516
MC
5762 if (bp->hwrm_spec_code >= 0x10201)
5763 req->auto_pause =
5764 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
c0c050c5
MC
5765 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
5766 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
5767 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
49b5c7a1 5768 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
c0c050c5
MC
5769 req->enables |=
5770 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
5771 } else {
5772 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
5773 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
5774 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
5775 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
5776 req->enables |=
5777 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
c9ee9516
MC
5778 if (bp->hwrm_spec_code >= 0x10201) {
5779 req->auto_pause = req->force_pause;
5780 req->enables |= cpu_to_le32(
5781 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
5782 }
c0c050c5
MC
5783 }
5784}
5785
5786static void bnxt_hwrm_set_link_common(struct bnxt *bp,
5787 struct hwrm_port_phy_cfg_input *req)
5788{
5789 u8 autoneg = bp->link_info.autoneg;
5790 u16 fw_link_speed = bp->link_info.req_link_speed;
68515a18 5791 u16 advertising = bp->link_info.advertising;
c0c050c5
MC
5792
5793 if (autoneg & BNXT_AUTONEG_SPEED) {
5794 req->auto_mode |=
11f15ed3 5795 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
c0c050c5
MC
5796
5797 req->enables |= cpu_to_le32(
5798 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
5799 req->auto_link_speed_mask = cpu_to_le16(advertising);
5800
5801 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
5802 req->flags |=
5803 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
5804 } else {
5805 req->force_link_speed = cpu_to_le16(fw_link_speed);
5806 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
5807 }
5808
c0c050c5
MC
5809 /* tell chimp that the setting takes effect immediately */
5810 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
5811}
5812
5813int bnxt_hwrm_set_pause(struct bnxt *bp)
5814{
5815 struct hwrm_port_phy_cfg_input req = {0};
5816 int rc;
5817
5818 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
5819 bnxt_hwrm_set_pause_common(bp, &req);
5820
5821 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
5822 bp->link_info.force_link_chng)
5823 bnxt_hwrm_set_link_common(bp, &req);
5824
5825 mutex_lock(&bp->hwrm_cmd_lock);
5826 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5827 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
5828 /* since changing of pause setting doesn't trigger any link
5829 * change event, the driver needs to update the current pause
5830 * result upon successfully return of the phy_cfg command
5831 */
5832 bp->link_info.pause =
5833 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
5834 bp->link_info.auto_pause_setting = 0;
5835 if (!bp->link_info.force_link_chng)
5836 bnxt_report_link(bp);
5837 }
5838 bp->link_info.force_link_chng = false;
5839 mutex_unlock(&bp->hwrm_cmd_lock);
5840 return rc;
5841}
5842
939f7f0c
MC
5843static void bnxt_hwrm_set_eee(struct bnxt *bp,
5844 struct hwrm_port_phy_cfg_input *req)
5845{
5846 struct ethtool_eee *eee = &bp->eee;
5847
5848 if (eee->eee_enabled) {
5849 u16 eee_speeds;
5850 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
5851
5852 if (eee->tx_lpi_enabled)
5853 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
5854 else
5855 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
5856
5857 req->flags |= cpu_to_le32(flags);
5858 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
5859 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
5860 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
5861 } else {
5862 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
5863 }
5864}
5865
5866int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
c0c050c5
MC
5867{
5868 struct hwrm_port_phy_cfg_input req = {0};
5869
5870 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
5871 if (set_pause)
5872 bnxt_hwrm_set_pause_common(bp, &req);
5873
5874 bnxt_hwrm_set_link_common(bp, &req);
939f7f0c
MC
5875
5876 if (set_eee)
5877 bnxt_hwrm_set_eee(bp, &req);
c0c050c5
MC
5878 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5879}
5880
33f7d55f
MC
5881static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
5882{
5883 struct hwrm_port_phy_cfg_input req = {0};
5884
567b2abe 5885 if (!BNXT_SINGLE_PF(bp))
33f7d55f
MC
5886 return 0;
5887
5888 if (pci_num_vf(bp->pdev))
5889 return 0;
5890
5891 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
16d663a6 5892 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
33f7d55f
MC
5893 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5894}
5895
5ad2cbee
MC
5896static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
5897{
5898 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5899 struct hwrm_port_led_qcaps_input req = {0};
5900 struct bnxt_pf_info *pf = &bp->pf;
5901 int rc;
5902
5903 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
5904 return 0;
5905
5906 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
5907 req.port_id = cpu_to_le16(pf->port_id);
5908 mutex_lock(&bp->hwrm_cmd_lock);
5909 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5910 if (rc) {
5911 mutex_unlock(&bp->hwrm_cmd_lock);
5912 return rc;
5913 }
5914 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
5915 int i;
5916
5917 bp->num_leds = resp->num_leds;
5918 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
5919 bp->num_leds);
5920 for (i = 0; i < bp->num_leds; i++) {
5921 struct bnxt_led_info *led = &bp->leds[i];
5922 __le16 caps = led->led_state_caps;
5923
5924 if (!led->led_group_id ||
5925 !BNXT_LED_ALT_BLINK_CAP(caps)) {
5926 bp->num_leds = 0;
5927 break;
5928 }
5929 }
5930 }
5931 mutex_unlock(&bp->hwrm_cmd_lock);
5932 return 0;
5933}
5934
5282db6c
MC
5935int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
5936{
5937 struct hwrm_wol_filter_alloc_input req = {0};
5938 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5939 int rc;
5940
5941 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
5942 req.port_id = cpu_to_le16(bp->pf.port_id);
5943 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
5944 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
5945 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
5946 mutex_lock(&bp->hwrm_cmd_lock);
5947 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5948 if (!rc)
5949 bp->wol_filter_id = resp->wol_filter_id;
5950 mutex_unlock(&bp->hwrm_cmd_lock);
5951 return rc;
5952}
5953
5954int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
5955{
5956 struct hwrm_wol_filter_free_input req = {0};
5957 int rc;
5958
5959 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
5960 req.port_id = cpu_to_le16(bp->pf.port_id);
5961 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
5962 req.wol_filter_id = bp->wol_filter_id;
5963 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5964 return rc;
5965}
5966
c1ef146a
MC
5967static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
5968{
5969 struct hwrm_wol_filter_qcfg_input req = {0};
5970 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5971 u16 next_handle = 0;
5972 int rc;
5973
5974 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
5975 req.port_id = cpu_to_le16(bp->pf.port_id);
5976 req.handle = cpu_to_le16(handle);
5977 mutex_lock(&bp->hwrm_cmd_lock);
5978 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5979 if (!rc) {
5980 next_handle = le16_to_cpu(resp->next_handle);
5981 if (next_handle != 0) {
5982 if (resp->wol_type ==
5983 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
5984 bp->wol = 1;
5985 bp->wol_filter_id = resp->wol_filter_id;
5986 }
5987 }
5988 }
5989 mutex_unlock(&bp->hwrm_cmd_lock);
5990 return next_handle;
5991}
5992
5993static void bnxt_get_wol_settings(struct bnxt *bp)
5994{
5995 u16 handle = 0;
5996
5997 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
5998 return;
5999
6000 do {
6001 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
6002 } while (handle && handle != 0xffff);
6003}
6004
939f7f0c
MC
6005static bool bnxt_eee_config_ok(struct bnxt *bp)
6006{
6007 struct ethtool_eee *eee = &bp->eee;
6008 struct bnxt_link_info *link_info = &bp->link_info;
6009
6010 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
6011 return true;
6012
6013 if (eee->eee_enabled) {
6014 u32 advertising =
6015 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
6016
6017 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
6018 eee->eee_enabled = 0;
6019 return false;
6020 }
6021 if (eee->advertised & ~advertising) {
6022 eee->advertised = advertising & eee->supported;
6023 return false;
6024 }
6025 }
6026 return true;
6027}
6028
c0c050c5
MC
6029static int bnxt_update_phy_setting(struct bnxt *bp)
6030{
6031 int rc;
6032 bool update_link = false;
6033 bool update_pause = false;
939f7f0c 6034 bool update_eee = false;
c0c050c5
MC
6035 struct bnxt_link_info *link_info = &bp->link_info;
6036
6037 rc = bnxt_update_link(bp, true);
6038 if (rc) {
6039 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
6040 rc);
6041 return rc;
6042 }
33dac24a
MC
6043 if (!BNXT_SINGLE_PF(bp))
6044 return 0;
6045
c0c050c5 6046 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
c9ee9516
MC
6047 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
6048 link_info->req_flow_ctrl)
c0c050c5
MC
6049 update_pause = true;
6050 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
6051 link_info->force_pause_setting != link_info->req_flow_ctrl)
6052 update_pause = true;
c0c050c5
MC
6053 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
6054 if (BNXT_AUTO_MODE(link_info->auto_mode))
6055 update_link = true;
6056 if (link_info->req_link_speed != link_info->force_link_speed)
6057 update_link = true;
de73018f
MC
6058 if (link_info->req_duplex != link_info->duplex_setting)
6059 update_link = true;
c0c050c5
MC
6060 } else {
6061 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
6062 update_link = true;
6063 if (link_info->advertising != link_info->auto_link_speeds)
6064 update_link = true;
c0c050c5
MC
6065 }
6066
16d663a6
MC
6067 /* The last close may have shutdown the link, so need to call
6068 * PHY_CFG to bring it back up.
6069 */
6070 if (!netif_carrier_ok(bp->dev))
6071 update_link = true;
6072
939f7f0c
MC
6073 if (!bnxt_eee_config_ok(bp))
6074 update_eee = true;
6075
c0c050c5 6076 if (update_link)
939f7f0c 6077 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
c0c050c5
MC
6078 else if (update_pause)
6079 rc = bnxt_hwrm_set_pause(bp);
6080 if (rc) {
6081 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
6082 rc);
6083 return rc;
6084 }
6085
6086 return rc;
6087}
6088
11809490
JH
6089/* Common routine to pre-map certain register block to different GRC window.
6090 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
6091 * in PF and 3 windows in VF that can be customized to map in different
6092 * register blocks.
6093 */
6094static void bnxt_preset_reg_win(struct bnxt *bp)
6095{
6096 if (BNXT_PF(bp)) {
6097 /* CAG registers map to GRC window #4 */
6098 writel(BNXT_CAG_REG_BASE,
6099 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
6100 }
6101}
6102
c0c050c5
MC
6103static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6104{
6105 int rc = 0;
6106
11809490 6107 bnxt_preset_reg_win(bp);
c0c050c5
MC
6108 netif_carrier_off(bp->dev);
6109 if (irq_re_init) {
6110 rc = bnxt_setup_int_mode(bp);
6111 if (rc) {
6112 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
6113 rc);
6114 return rc;
6115 }
6116 }
6117 if ((bp->flags & BNXT_FLAG_RFS) &&
6118 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
6119 /* disable RFS if falling back to INTA */
6120 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
6121 bp->flags &= ~BNXT_FLAG_RFS;
6122 }
6123
6124 rc = bnxt_alloc_mem(bp, irq_re_init);
6125 if (rc) {
6126 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
6127 goto open_err_free_mem;
6128 }
6129
6130 if (irq_re_init) {
6131 bnxt_init_napi(bp);
6132 rc = bnxt_request_irq(bp);
6133 if (rc) {
6134 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
6135 goto open_err;
6136 }
6137 }
6138
6139 bnxt_enable_napi(bp);
6140
6141 rc = bnxt_init_nic(bp, irq_re_init);
6142 if (rc) {
6143 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
6144 goto open_err;
6145 }
6146
6147 if (link_re_init) {
6148 rc = bnxt_update_phy_setting(bp);
6149 if (rc)
ba41d46f 6150 netdev_warn(bp->dev, "failed to update phy settings\n");
c0c050c5
MC
6151 }
6152
7cdd5fc3 6153 if (irq_re_init)
ad51b8e9 6154 udp_tunnel_get_rx_info(bp->dev);
c0c050c5 6155
caefe526 6156 set_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
6157 bnxt_enable_int(bp);
6158 /* Enable TX queues */
6159 bnxt_tx_enable(bp);
6160 mod_timer(&bp->timer, jiffies + bp->current_interval);
10289bec
MC
6161 /* Poll link status and check for SFP+ module status */
6162 bnxt_get_port_module_status(bp);
c0c050c5
MC
6163
6164 return 0;
6165
6166open_err:
6167 bnxt_disable_napi(bp);
6168 bnxt_del_napi(bp);
6169
6170open_err_free_mem:
6171 bnxt_free_skbs(bp);
6172 bnxt_free_irq(bp);
6173 bnxt_free_mem(bp, true);
6174 return rc;
6175}
6176
6177/* rtnl_lock held */
6178int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6179{
6180 int rc = 0;
6181
6182 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
6183 if (rc) {
6184 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
6185 dev_close(bp->dev);
6186 }
6187 return rc;
6188}
6189
f7dc1ea6
MC
6190/* rtnl_lock held, open the NIC half way by allocating all resources, but
6191 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
6192 * self tests.
6193 */
6194int bnxt_half_open_nic(struct bnxt *bp)
6195{
6196 int rc = 0;
6197
6198 rc = bnxt_alloc_mem(bp, false);
6199 if (rc) {
6200 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
6201 goto half_open_err;
6202 }
6203 rc = bnxt_init_nic(bp, false);
6204 if (rc) {
6205 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
6206 goto half_open_err;
6207 }
6208 return 0;
6209
6210half_open_err:
6211 bnxt_free_skbs(bp);
6212 bnxt_free_mem(bp, false);
6213 dev_close(bp->dev);
6214 return rc;
6215}
6216
6217/* rtnl_lock held, this call can only be made after a previous successful
6218 * call to bnxt_half_open_nic().
6219 */
6220void bnxt_half_close_nic(struct bnxt *bp)
6221{
6222 bnxt_hwrm_resource_free(bp, false, false);
6223 bnxt_free_skbs(bp);
6224 bnxt_free_mem(bp, false);
6225}
6226
c0c050c5
MC
6227static int bnxt_open(struct net_device *dev)
6228{
6229 struct bnxt *bp = netdev_priv(dev);
c0c050c5 6230
c0c050c5
MC
6231 return __bnxt_open_nic(bp, true, true);
6232}
6233
c0c050c5
MC
6234int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6235{
6236 int rc = 0;
6237
6238#ifdef CONFIG_BNXT_SRIOV
6239 if (bp->sriov_cfg) {
6240 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
6241 !bp->sriov_cfg,
6242 BNXT_SRIOV_CFG_WAIT_TMO);
6243 if (rc)
6244 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
6245 }
6246#endif
6247 /* Change device state to avoid TX queue wake up's */
6248 bnxt_tx_disable(bp);
6249
caefe526 6250 clear_bit(BNXT_STATE_OPEN, &bp->state);
4cebdcec
MC
6251 smp_mb__after_atomic();
6252 while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
6253 msleep(20);
c0c050c5 6254
9d8bc097 6255 /* Flush rings and and disable interrupts */
c0c050c5
MC
6256 bnxt_shutdown_nic(bp, irq_re_init);
6257
6258 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
6259
6260 bnxt_disable_napi(bp);
c0c050c5
MC
6261 del_timer_sync(&bp->timer);
6262 bnxt_free_skbs(bp);
6263
6264 if (irq_re_init) {
6265 bnxt_free_irq(bp);
6266 bnxt_del_napi(bp);
6267 }
6268 bnxt_free_mem(bp, irq_re_init);
6269 return rc;
6270}
6271
6272static int bnxt_close(struct net_device *dev)
6273{
6274 struct bnxt *bp = netdev_priv(dev);
6275
6276 bnxt_close_nic(bp, true, true);
33f7d55f 6277 bnxt_hwrm_shutdown_link(bp);
c0c050c5
MC
6278 return 0;
6279}
6280
6281/* rtnl_lock held */
6282static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6283{
6284 switch (cmd) {
6285 case SIOCGMIIPHY:
6286 /* fallthru */
6287 case SIOCGMIIREG: {
6288 if (!netif_running(dev))
6289 return -EAGAIN;
6290
6291 return 0;
6292 }
6293
6294 case SIOCSMIIREG:
6295 if (!netif_running(dev))
6296 return -EAGAIN;
6297
6298 return 0;
6299
6300 default:
6301 /* do nothing */
6302 break;
6303 }
6304 return -EOPNOTSUPP;
6305}
6306
bc1f4470 6307static void
c0c050c5
MC
6308bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6309{
6310 u32 i;
6311 struct bnxt *bp = netdev_priv(dev);
6312
c0c050c5 6313 if (!bp->bnapi)
bc1f4470 6314 return;
c0c050c5
MC
6315
6316 /* TODO check if we need to synchronize with bnxt_close path */
6317 for (i = 0; i < bp->cp_nr_rings; i++) {
6318 struct bnxt_napi *bnapi = bp->bnapi[i];
6319 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6320 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
6321
6322 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
6323 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
6324 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
6325
6326 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
6327 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
6328 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
6329
6330 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
6331 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
6332 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
6333
6334 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
6335 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
6336 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
6337
6338 stats->rx_missed_errors +=
6339 le64_to_cpu(hw_stats->rx_discard_pkts);
6340
6341 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
6342
c0c050c5
MC
6343 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
6344 }
6345
9947f83f
MC
6346 if (bp->flags & BNXT_FLAG_PORT_STATS) {
6347 struct rx_port_stats *rx = bp->hw_rx_port_stats;
6348 struct tx_port_stats *tx = bp->hw_tx_port_stats;
6349
6350 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
6351 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
6352 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
6353 le64_to_cpu(rx->rx_ovrsz_frames) +
6354 le64_to_cpu(rx->rx_runt_frames);
6355 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
6356 le64_to_cpu(rx->rx_jbr_frames);
6357 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
6358 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
6359 stats->tx_errors = le64_to_cpu(tx->tx_err);
6360 }
c0c050c5
MC
6361}
6362
6363static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
6364{
6365 struct net_device *dev = bp->dev;
6366 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6367 struct netdev_hw_addr *ha;
6368 u8 *haddr;
6369 int mc_count = 0;
6370 bool update = false;
6371 int off = 0;
6372
6373 netdev_for_each_mc_addr(ha, dev) {
6374 if (mc_count >= BNXT_MAX_MC_ADDRS) {
6375 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
6376 vnic->mc_list_count = 0;
6377 return false;
6378 }
6379 haddr = ha->addr;
6380 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
6381 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
6382 update = true;
6383 }
6384 off += ETH_ALEN;
6385 mc_count++;
6386 }
6387 if (mc_count)
6388 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
6389
6390 if (mc_count != vnic->mc_list_count) {
6391 vnic->mc_list_count = mc_count;
6392 update = true;
6393 }
6394 return update;
6395}
6396
6397static bool bnxt_uc_list_updated(struct bnxt *bp)
6398{
6399 struct net_device *dev = bp->dev;
6400 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6401 struct netdev_hw_addr *ha;
6402 int off = 0;
6403
6404 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
6405 return true;
6406
6407 netdev_for_each_uc_addr(ha, dev) {
6408 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
6409 return true;
6410
6411 off += ETH_ALEN;
6412 }
6413 return false;
6414}
6415
6416static void bnxt_set_rx_mode(struct net_device *dev)
6417{
6418 struct bnxt *bp = netdev_priv(dev);
6419 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6420 u32 mask = vnic->rx_mask;
6421 bool mc_update = false;
6422 bool uc_update;
6423
6424 if (!netif_running(dev))
6425 return;
6426
6427 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
6428 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
6429 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
6430
17c71ac3 6431 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
c0c050c5
MC
6432 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
6433
6434 uc_update = bnxt_uc_list_updated(bp);
6435
6436 if (dev->flags & IFF_ALLMULTI) {
6437 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
6438 vnic->mc_list_count = 0;
6439 } else {
6440 mc_update = bnxt_mc_list_updated(bp, &mask);
6441 }
6442
6443 if (mask != vnic->rx_mask || uc_update || mc_update) {
6444 vnic->rx_mask = mask;
6445
6446 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
6447 schedule_work(&bp->sp_task);
6448 }
6449}
6450
b664f008 6451static int bnxt_cfg_rx_mode(struct bnxt *bp)
c0c050c5
MC
6452{
6453 struct net_device *dev = bp->dev;
6454 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6455 struct netdev_hw_addr *ha;
6456 int i, off = 0, rc;
6457 bool uc_update;
6458
6459 netif_addr_lock_bh(dev);
6460 uc_update = bnxt_uc_list_updated(bp);
6461 netif_addr_unlock_bh(dev);
6462
6463 if (!uc_update)
6464 goto skip_uc;
6465
6466 mutex_lock(&bp->hwrm_cmd_lock);
6467 for (i = 1; i < vnic->uc_filter_count; i++) {
6468 struct hwrm_cfa_l2_filter_free_input req = {0};
6469
6470 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
6471 -1);
6472
6473 req.l2_filter_id = vnic->fw_l2_filter_id[i];
6474
6475 rc = _hwrm_send_message(bp, &req, sizeof(req),
6476 HWRM_CMD_TIMEOUT);
6477 }
6478 mutex_unlock(&bp->hwrm_cmd_lock);
6479
6480 vnic->uc_filter_count = 1;
6481
6482 netif_addr_lock_bh(dev);
6483 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
6484 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
6485 } else {
6486 netdev_for_each_uc_addr(ha, dev) {
6487 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
6488 off += ETH_ALEN;
6489 vnic->uc_filter_count++;
6490 }
6491 }
6492 netif_addr_unlock_bh(dev);
6493
6494 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
6495 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
6496 if (rc) {
6497 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
6498 rc);
6499 vnic->uc_filter_count = i;
b664f008 6500 return rc;
c0c050c5
MC
6501 }
6502 }
6503
6504skip_uc:
6505 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
6506 if (rc)
6507 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
6508 rc);
b664f008
MC
6509
6510 return rc;
c0c050c5
MC
6511}
6512
8079e8f1
MC
6513/* If the chip and firmware supports RFS */
6514static bool bnxt_rfs_supported(struct bnxt *bp)
6515{
6516 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
6517 return true;
ae10ae74
MC
6518 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
6519 return true;
8079e8f1
MC
6520 return false;
6521}
6522
6523/* If runtime conditions support RFS */
2bcfa6f6
MC
6524static bool bnxt_rfs_capable(struct bnxt *bp)
6525{
6526#ifdef CONFIG_RFS_ACCEL
8079e8f1 6527 int vnics, max_vnics, max_rss_ctxs;
2bcfa6f6 6528
964fd480 6529 if (!(bp->flags & BNXT_FLAG_MSIX_CAP))
2bcfa6f6
MC
6530 return false;
6531
6532 vnics = 1 + bp->rx_nr_rings;
8079e8f1
MC
6533 max_vnics = bnxt_get_max_func_vnics(bp);
6534 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
ae10ae74
MC
6535
6536 /* RSS contexts not a limiting factor */
6537 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
6538 max_rss_ctxs = max_vnics;
8079e8f1 6539 if (vnics > max_vnics || vnics > max_rss_ctxs) {
a2304909
VV
6540 netdev_warn(bp->dev,
6541 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
8079e8f1 6542 min(max_rss_ctxs - 1, max_vnics - 1));
2bcfa6f6 6543 return false;
a2304909 6544 }
2bcfa6f6
MC
6545
6546 return true;
6547#else
6548 return false;
6549#endif
6550}
6551
c0c050c5
MC
6552static netdev_features_t bnxt_fix_features(struct net_device *dev,
6553 netdev_features_t features)
6554{
2bcfa6f6
MC
6555 struct bnxt *bp = netdev_priv(dev);
6556
a2304909 6557 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
2bcfa6f6 6558 features &= ~NETIF_F_NTUPLE;
5a9f6b23
MC
6559
6560 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
6561 * turned on or off together.
6562 */
6563 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
6564 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
6565 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
6566 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
6567 NETIF_F_HW_VLAN_STAG_RX);
6568 else
6569 features |= NETIF_F_HW_VLAN_CTAG_RX |
6570 NETIF_F_HW_VLAN_STAG_RX;
6571 }
cf6645f8
MC
6572#ifdef CONFIG_BNXT_SRIOV
6573 if (BNXT_VF(bp)) {
6574 if (bp->vf.vlan) {
6575 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
6576 NETIF_F_HW_VLAN_STAG_RX);
6577 }
6578 }
6579#endif
c0c050c5
MC
6580 return features;
6581}
6582
6583static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
6584{
6585 struct bnxt *bp = netdev_priv(dev);
6586 u32 flags = bp->flags;
6587 u32 changes;
6588 int rc = 0;
6589 bool re_init = false;
6590 bool update_tpa = false;
6591
6592 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
3e8060fa 6593 if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
c0c050c5
MC
6594 flags |= BNXT_FLAG_GRO;
6595 if (features & NETIF_F_LRO)
6596 flags |= BNXT_FLAG_LRO;
6597
bdbd1eb5
MC
6598 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
6599 flags &= ~BNXT_FLAG_TPA;
6600
c0c050c5
MC
6601 if (features & NETIF_F_HW_VLAN_CTAG_RX)
6602 flags |= BNXT_FLAG_STRIP_VLAN;
6603
6604 if (features & NETIF_F_NTUPLE)
6605 flags |= BNXT_FLAG_RFS;
6606
6607 changes = flags ^ bp->flags;
6608 if (changes & BNXT_FLAG_TPA) {
6609 update_tpa = true;
6610 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
6611 (flags & BNXT_FLAG_TPA) == 0)
6612 re_init = true;
6613 }
6614
6615 if (changes & ~BNXT_FLAG_TPA)
6616 re_init = true;
6617
6618 if (flags != bp->flags) {
6619 u32 old_flags = bp->flags;
6620
6621 bp->flags = flags;
6622
2bcfa6f6 6623 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
c0c050c5
MC
6624 if (update_tpa)
6625 bnxt_set_ring_params(bp);
6626 return rc;
6627 }
6628
6629 if (re_init) {
6630 bnxt_close_nic(bp, false, false);
6631 if (update_tpa)
6632 bnxt_set_ring_params(bp);
6633
6634 return bnxt_open_nic(bp, false, false);
6635 }
6636 if (update_tpa) {
6637 rc = bnxt_set_tpa(bp,
6638 (flags & BNXT_FLAG_TPA) ?
6639 true : false);
6640 if (rc)
6641 bp->flags = old_flags;
6642 }
6643 }
6644 return rc;
6645}
6646
9f554590
MC
6647static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
6648{
b6ab4b01 6649 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
9f554590
MC
6650 int i = bnapi->index;
6651
3b2b7d9d
MC
6652 if (!txr)
6653 return;
6654
9f554590
MC
6655 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
6656 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
6657 txr->tx_cons);
6658}
6659
6660static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
6661{
b6ab4b01 6662 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
9f554590
MC
6663 int i = bnapi->index;
6664
3b2b7d9d
MC
6665 if (!rxr)
6666 return;
6667
9f554590
MC
6668 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
6669 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
6670 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
6671 rxr->rx_sw_agg_prod);
6672}
6673
6674static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
6675{
6676 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6677 int i = bnapi->index;
6678
6679 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
6680 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
6681}
6682
c0c050c5
MC
6683static void bnxt_dbg_dump_states(struct bnxt *bp)
6684{
6685 int i;
6686 struct bnxt_napi *bnapi;
c0c050c5
MC
6687
6688 for (i = 0; i < bp->cp_nr_rings; i++) {
6689 bnapi = bp->bnapi[i];
c0c050c5 6690 if (netif_msg_drv(bp)) {
9f554590
MC
6691 bnxt_dump_tx_sw_state(bnapi);
6692 bnxt_dump_rx_sw_state(bnapi);
6693 bnxt_dump_cp_sw_state(bnapi);
c0c050c5
MC
6694 }
6695 }
6696}
6697
6988bd92 6698static void bnxt_reset_task(struct bnxt *bp, bool silent)
c0c050c5 6699{
6988bd92
MC
6700 if (!silent)
6701 bnxt_dbg_dump_states(bp);
028de140 6702 if (netif_running(bp->dev)) {
b386cd36
MC
6703 int rc;
6704
6705 if (!silent)
6706 bnxt_ulp_stop(bp);
028de140 6707 bnxt_close_nic(bp, false, false);
b386cd36
MC
6708 rc = bnxt_open_nic(bp, false, false);
6709 if (!silent && !rc)
6710 bnxt_ulp_start(bp);
028de140 6711 }
c0c050c5
MC
6712}
6713
6714static void bnxt_tx_timeout(struct net_device *dev)
6715{
6716 struct bnxt *bp = netdev_priv(dev);
6717
6718 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
6719 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
6720 schedule_work(&bp->sp_task);
6721}
6722
6723#ifdef CONFIG_NET_POLL_CONTROLLER
6724static void bnxt_poll_controller(struct net_device *dev)
6725{
6726 struct bnxt *bp = netdev_priv(dev);
6727 int i;
6728
6729 for (i = 0; i < bp->cp_nr_rings; i++) {
6730 struct bnxt_irq *irq = &bp->irq_tbl[i];
6731
6732 disable_irq(irq->vector);
6733 irq->handler(irq->vector, bp->bnapi[i]);
6734 enable_irq(irq->vector);
6735 }
6736}
6737#endif
6738
6739static void bnxt_timer(unsigned long data)
6740{
6741 struct bnxt *bp = (struct bnxt *)data;
6742 struct net_device *dev = bp->dev;
6743
6744 if (!netif_running(dev))
6745 return;
6746
6747 if (atomic_read(&bp->intr_sem) != 0)
6748 goto bnxt_restart_timer;
6749
3bdf56c4
MC
6750 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS)) {
6751 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
6752 schedule_work(&bp->sp_task);
6753 }
c0c050c5
MC
6754bnxt_restart_timer:
6755 mod_timer(&bp->timer, jiffies + bp->current_interval);
6756}
6757
a551ee94 6758static void bnxt_rtnl_lock_sp(struct bnxt *bp)
6988bd92 6759{
a551ee94
MC
6760 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
6761 * set. If the device is being closed, bnxt_close() may be holding
6988bd92
MC
6762 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
6763 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
6764 */
6765 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6766 rtnl_lock();
a551ee94
MC
6767}
6768
6769static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
6770{
6988bd92
MC
6771 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6772 rtnl_unlock();
6773}
6774
a551ee94
MC
6775/* Only called from bnxt_sp_task() */
6776static void bnxt_reset(struct bnxt *bp, bool silent)
6777{
6778 bnxt_rtnl_lock_sp(bp);
6779 if (test_bit(BNXT_STATE_OPEN, &bp->state))
6780 bnxt_reset_task(bp, silent);
6781 bnxt_rtnl_unlock_sp(bp);
6782}
6783
c0c050c5
MC
6784static void bnxt_cfg_ntp_filters(struct bnxt *);
6785
6786static void bnxt_sp_task(struct work_struct *work)
6787{
6788 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
c0c050c5 6789
4cebdcec
MC
6790 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6791 smp_mb__after_atomic();
6792 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
6793 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5 6794 return;
4cebdcec 6795 }
c0c050c5
MC
6796
6797 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
6798 bnxt_cfg_rx_mode(bp);
6799
6800 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
6801 bnxt_cfg_ntp_filters(bp);
c0c050c5
MC
6802 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
6803 bnxt_hwrm_exec_fwd_req(bp);
6804 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
6805 bnxt_hwrm_tunnel_dst_port_alloc(
6806 bp, bp->vxlan_port,
6807 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
6808 }
6809 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
6810 bnxt_hwrm_tunnel_dst_port_free(
6811 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
6812 }
7cdd5fc3
AD
6813 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
6814 bnxt_hwrm_tunnel_dst_port_alloc(
6815 bp, bp->nge_port,
6816 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6817 }
6818 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
6819 bnxt_hwrm_tunnel_dst_port_free(
6820 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6821 }
3bdf56c4
MC
6822 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
6823 bnxt_hwrm_port_qstats(bp);
6824
a551ee94
MC
6825 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
6826 * must be the last functions to be called before exiting.
6827 */
0eaa24b9
MC
6828 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
6829 int rc = 0;
6830
6831 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
6832 &bp->sp_event))
6833 bnxt_hwrm_phy_qcaps(bp);
6834
6835 bnxt_rtnl_lock_sp(bp);
6836 if (test_bit(BNXT_STATE_OPEN, &bp->state))
6837 rc = bnxt_update_link(bp, true);
6838 bnxt_rtnl_unlock_sp(bp);
6839 if (rc)
6840 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
6841 rc);
6842 }
90c694bb
MC
6843 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
6844 bnxt_rtnl_lock_sp(bp);
6845 if (test_bit(BNXT_STATE_OPEN, &bp->state))
6846 bnxt_get_port_module_status(bp);
6847 bnxt_rtnl_unlock_sp(bp);
6848 }
6988bd92
MC
6849 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
6850 bnxt_reset(bp, false);
4cebdcec 6851
fc0f1929
MC
6852 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
6853 bnxt_reset(bp, true);
6854
4cebdcec
MC
6855 smp_mb__before_atomic();
6856 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5
MC
6857}
6858
d1e7925e 6859/* Under rtnl_lock */
5f449249 6860int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp)
d1e7925e
MC
6861{
6862 int max_rx, max_tx, tx_sets = 1;
6863 int tx_rings_needed;
6864 bool sh = true;
6865 int rc;
6866
6867 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
6868 sh = false;
6869
6870 if (tcs)
6871 tx_sets = tcs;
6872
6873 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
6874 if (rc)
6875 return rc;
6876
6877 if (max_rx < rx)
6878 return -ENOMEM;
6879
5f449249 6880 tx_rings_needed = tx * tx_sets + tx_xdp;
d1e7925e
MC
6881 if (max_tx < tx_rings_needed)
6882 return -ENOMEM;
6883
6884 if (bnxt_hwrm_reserve_tx_rings(bp, &tx_rings_needed) ||
5f449249 6885 tx_rings_needed < (tx * tx_sets + tx_xdp))
d1e7925e
MC
6886 return -ENOMEM;
6887 return 0;
6888}
6889
17086399
SP
6890static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
6891{
6892 if (bp->bar2) {
6893 pci_iounmap(pdev, bp->bar2);
6894 bp->bar2 = NULL;
6895 }
6896
6897 if (bp->bar1) {
6898 pci_iounmap(pdev, bp->bar1);
6899 bp->bar1 = NULL;
6900 }
6901
6902 if (bp->bar0) {
6903 pci_iounmap(pdev, bp->bar0);
6904 bp->bar0 = NULL;
6905 }
6906}
6907
6908static void bnxt_cleanup_pci(struct bnxt *bp)
6909{
6910 bnxt_unmap_bars(bp, bp->pdev);
6911 pci_release_regions(bp->pdev);
6912 pci_disable_device(bp->pdev);
6913}
6914
c0c050c5
MC
6915static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
6916{
6917 int rc;
6918 struct bnxt *bp = netdev_priv(dev);
6919
6920 SET_NETDEV_DEV(dev, &pdev->dev);
6921
6922 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6923 rc = pci_enable_device(pdev);
6924 if (rc) {
6925 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
6926 goto init_err;
6927 }
6928
6929 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6930 dev_err(&pdev->dev,
6931 "Cannot find PCI device base address, aborting\n");
6932 rc = -ENODEV;
6933 goto init_err_disable;
6934 }
6935
6936 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6937 if (rc) {
6938 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
6939 goto init_err_disable;
6940 }
6941
6942 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
6943 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
6944 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
6945 goto init_err_disable;
6946 }
6947
6948 pci_set_master(pdev);
6949
6950 bp->dev = dev;
6951 bp->pdev = pdev;
6952
6953 bp->bar0 = pci_ioremap_bar(pdev, 0);
6954 if (!bp->bar0) {
6955 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
6956 rc = -ENOMEM;
6957 goto init_err_release;
6958 }
6959
6960 bp->bar1 = pci_ioremap_bar(pdev, 2);
6961 if (!bp->bar1) {
6962 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
6963 rc = -ENOMEM;
6964 goto init_err_release;
6965 }
6966
6967 bp->bar2 = pci_ioremap_bar(pdev, 4);
6968 if (!bp->bar2) {
6969 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
6970 rc = -ENOMEM;
6971 goto init_err_release;
6972 }
6973
6316ea6d
SB
6974 pci_enable_pcie_error_reporting(pdev);
6975
c0c050c5
MC
6976 INIT_WORK(&bp->sp_task, bnxt_sp_task);
6977
6978 spin_lock_init(&bp->ntp_fltr_lock);
6979
6980 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
6981 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
6982
dfb5b894 6983 /* tick values in micro seconds */
dfc9c94a
MC
6984 bp->rx_coal_ticks = 12;
6985 bp->rx_coal_bufs = 30;
dfb5b894
MC
6986 bp->rx_coal_ticks_irq = 1;
6987 bp->rx_coal_bufs_irq = 2;
c0c050c5 6988
dfc9c94a
MC
6989 bp->tx_coal_ticks = 25;
6990 bp->tx_coal_bufs = 30;
6991 bp->tx_coal_ticks_irq = 2;
6992 bp->tx_coal_bufs_irq = 2;
6993
51f30785
MC
6994 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
6995
c0c050c5
MC
6996 init_timer(&bp->timer);
6997 bp->timer.data = (unsigned long)bp;
6998 bp->timer.function = bnxt_timer;
6999 bp->current_interval = BNXT_TIMER_INTERVAL;
7000
caefe526 7001 clear_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
7002 return 0;
7003
7004init_err_release:
17086399 7005 bnxt_unmap_bars(bp, pdev);
c0c050c5
MC
7006 pci_release_regions(pdev);
7007
7008init_err_disable:
7009 pci_disable_device(pdev);
7010
7011init_err:
7012 return rc;
7013}
7014
7015/* rtnl_lock held */
7016static int bnxt_change_mac_addr(struct net_device *dev, void *p)
7017{
7018 struct sockaddr *addr = p;
1fc2cfd0
JH
7019 struct bnxt *bp = netdev_priv(dev);
7020 int rc = 0;
c0c050c5
MC
7021
7022 if (!is_valid_ether_addr(addr->sa_data))
7023 return -EADDRNOTAVAIL;
7024
84c33dd3
MC
7025 rc = bnxt_approve_mac(bp, addr->sa_data);
7026 if (rc)
7027 return rc;
bdd4347b 7028
1fc2cfd0
JH
7029 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
7030 return 0;
7031
c0c050c5 7032 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1fc2cfd0
JH
7033 if (netif_running(dev)) {
7034 bnxt_close_nic(bp, false, false);
7035 rc = bnxt_open_nic(bp, false, false);
7036 }
c0c050c5 7037
1fc2cfd0 7038 return rc;
c0c050c5
MC
7039}
7040
7041/* rtnl_lock held */
7042static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
7043{
7044 struct bnxt *bp = netdev_priv(dev);
7045
c0c050c5
MC
7046 if (netif_running(dev))
7047 bnxt_close_nic(bp, false, false);
7048
7049 dev->mtu = new_mtu;
7050 bnxt_set_ring_params(bp);
7051
7052 if (netif_running(dev))
7053 return bnxt_open_nic(bp, false, false);
7054
7055 return 0;
7056}
7057
c5e3deb8 7058int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
c0c050c5
MC
7059{
7060 struct bnxt *bp = netdev_priv(dev);
3ffb6a39 7061 bool sh = false;
d1e7925e 7062 int rc;
16e5cc64 7063
c0c050c5 7064 if (tc > bp->max_tc) {
b451c8b6 7065 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
c0c050c5
MC
7066 tc, bp->max_tc);
7067 return -EINVAL;
7068 }
7069
7070 if (netdev_get_num_tc(dev) == tc)
7071 return 0;
7072
3ffb6a39
MC
7073 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7074 sh = true;
7075
5f449249
MC
7076 rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
7077 tc, bp->tx_nr_rings_xdp);
d1e7925e
MC
7078 if (rc)
7079 return rc;
c0c050c5
MC
7080
7081 /* Needs to close the device and do hw resource re-allocations */
7082 if (netif_running(bp->dev))
7083 bnxt_close_nic(bp, true, false);
7084
7085 if (tc) {
7086 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
7087 netdev_set_num_tc(dev, tc);
7088 } else {
7089 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
7090 netdev_reset_tc(dev);
7091 }
3ffb6a39
MC
7092 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
7093 bp->tx_nr_rings + bp->rx_nr_rings;
c0c050c5
MC
7094 bp->num_stat_ctxs = bp->cp_nr_rings;
7095
7096 if (netif_running(bp->dev))
7097 return bnxt_open_nic(bp, true, false);
7098
7099 return 0;
7100}
7101
c5e3deb8
MC
7102static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
7103 struct tc_to_netdev *ntc)
7104{
7105 if (ntc->type != TC_SETUP_MQPRIO)
7106 return -EINVAL;
7107
56f36acd
AN
7108 ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
7109
7110 return bnxt_setup_mq_tc(dev, ntc->mqprio->num_tc);
c5e3deb8
MC
7111}
7112
c0c050c5
MC
7113#ifdef CONFIG_RFS_ACCEL
7114static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
7115 struct bnxt_ntuple_filter *f2)
7116{
7117 struct flow_keys *keys1 = &f1->fkeys;
7118 struct flow_keys *keys2 = &f2->fkeys;
7119
7120 if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
7121 keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
7122 keys1->ports.ports == keys2->ports.ports &&
7123 keys1->basic.ip_proto == keys2->basic.ip_proto &&
7124 keys1->basic.n_proto == keys2->basic.n_proto &&
61aad724 7125 keys1->control.flags == keys2->control.flags &&
a54c4d74
MC
7126 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
7127 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
c0c050c5
MC
7128 return true;
7129
7130 return false;
7131}
7132
7133static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
7134 u16 rxq_index, u32 flow_id)
7135{
7136 struct bnxt *bp = netdev_priv(dev);
7137 struct bnxt_ntuple_filter *fltr, *new_fltr;
7138 struct flow_keys *fkeys;
7139 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
a54c4d74 7140 int rc = 0, idx, bit_id, l2_idx = 0;
c0c050c5
MC
7141 struct hlist_head *head;
7142
a54c4d74
MC
7143 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
7144 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
7145 int off = 0, j;
7146
7147 netif_addr_lock_bh(dev);
7148 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
7149 if (ether_addr_equal(eth->h_dest,
7150 vnic->uc_list + off)) {
7151 l2_idx = j + 1;
7152 break;
7153 }
7154 }
7155 netif_addr_unlock_bh(dev);
7156 if (!l2_idx)
7157 return -EINVAL;
7158 }
c0c050c5
MC
7159 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
7160 if (!new_fltr)
7161 return -ENOMEM;
7162
7163 fkeys = &new_fltr->fkeys;
7164 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
7165 rc = -EPROTONOSUPPORT;
7166 goto err_free;
7167 }
7168
dda0e746
MC
7169 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
7170 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
c0c050c5
MC
7171 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
7172 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
7173 rc = -EPROTONOSUPPORT;
7174 goto err_free;
7175 }
dda0e746
MC
7176 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
7177 bp->hwrm_spec_code < 0x10601) {
7178 rc = -EPROTONOSUPPORT;
7179 goto err_free;
7180 }
61aad724
MC
7181 if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
7182 bp->hwrm_spec_code < 0x10601) {
7183 rc = -EPROTONOSUPPORT;
7184 goto err_free;
7185 }
c0c050c5 7186
a54c4d74 7187 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
c0c050c5
MC
7188 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
7189
7190 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
7191 head = &bp->ntp_fltr_hash_tbl[idx];
7192 rcu_read_lock();
7193 hlist_for_each_entry_rcu(fltr, head, hash) {
7194 if (bnxt_fltr_match(fltr, new_fltr)) {
7195 rcu_read_unlock();
7196 rc = 0;
7197 goto err_free;
7198 }
7199 }
7200 rcu_read_unlock();
7201
7202 spin_lock_bh(&bp->ntp_fltr_lock);
84e86b98
MC
7203 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
7204 BNXT_NTP_FLTR_MAX_FLTR, 0);
7205 if (bit_id < 0) {
c0c050c5
MC
7206 spin_unlock_bh(&bp->ntp_fltr_lock);
7207 rc = -ENOMEM;
7208 goto err_free;
7209 }
7210
84e86b98 7211 new_fltr->sw_id = (u16)bit_id;
c0c050c5 7212 new_fltr->flow_id = flow_id;
a54c4d74 7213 new_fltr->l2_fltr_idx = l2_idx;
c0c050c5
MC
7214 new_fltr->rxq = rxq_index;
7215 hlist_add_head_rcu(&new_fltr->hash, head);
7216 bp->ntp_fltr_count++;
7217 spin_unlock_bh(&bp->ntp_fltr_lock);
7218
7219 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
7220 schedule_work(&bp->sp_task);
7221
7222 return new_fltr->sw_id;
7223
7224err_free:
7225 kfree(new_fltr);
7226 return rc;
7227}
7228
7229static void bnxt_cfg_ntp_filters(struct bnxt *bp)
7230{
7231 int i;
7232
7233 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
7234 struct hlist_head *head;
7235 struct hlist_node *tmp;
7236 struct bnxt_ntuple_filter *fltr;
7237 int rc;
7238
7239 head = &bp->ntp_fltr_hash_tbl[i];
7240 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
7241 bool del = false;
7242
7243 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
7244 if (rps_may_expire_flow(bp->dev, fltr->rxq,
7245 fltr->flow_id,
7246 fltr->sw_id)) {
7247 bnxt_hwrm_cfa_ntuple_filter_free(bp,
7248 fltr);
7249 del = true;
7250 }
7251 } else {
7252 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
7253 fltr);
7254 if (rc)
7255 del = true;
7256 else
7257 set_bit(BNXT_FLTR_VALID, &fltr->state);
7258 }
7259
7260 if (del) {
7261 spin_lock_bh(&bp->ntp_fltr_lock);
7262 hlist_del_rcu(&fltr->hash);
7263 bp->ntp_fltr_count--;
7264 spin_unlock_bh(&bp->ntp_fltr_lock);
7265 synchronize_rcu();
7266 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
7267 kfree(fltr);
7268 }
7269 }
7270 }
19241368
JH
7271 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
7272 netdev_info(bp->dev, "Receive PF driver unload event!");
c0c050c5
MC
7273}
7274
7275#else
7276
7277static void bnxt_cfg_ntp_filters(struct bnxt *bp)
7278{
7279}
7280
7281#endif /* CONFIG_RFS_ACCEL */
7282
ad51b8e9
AD
7283static void bnxt_udp_tunnel_add(struct net_device *dev,
7284 struct udp_tunnel_info *ti)
c0c050c5
MC
7285{
7286 struct bnxt *bp = netdev_priv(dev);
7287
ad51b8e9 7288 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
c0c050c5
MC
7289 return;
7290
ad51b8e9 7291 if (!netif_running(dev))
c0c050c5
MC
7292 return;
7293
ad51b8e9
AD
7294 switch (ti->type) {
7295 case UDP_TUNNEL_TYPE_VXLAN:
7296 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
7297 return;
c0c050c5 7298
ad51b8e9
AD
7299 bp->vxlan_port_cnt++;
7300 if (bp->vxlan_port_cnt == 1) {
7301 bp->vxlan_port = ti->port;
7302 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
7303 schedule_work(&bp->sp_task);
7304 }
7305 break;
7cdd5fc3
AD
7306 case UDP_TUNNEL_TYPE_GENEVE:
7307 if (bp->nge_port_cnt && bp->nge_port != ti->port)
7308 return;
7309
7310 bp->nge_port_cnt++;
7311 if (bp->nge_port_cnt == 1) {
7312 bp->nge_port = ti->port;
7313 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
7314 }
7315 break;
ad51b8e9
AD
7316 default:
7317 return;
c0c050c5 7318 }
ad51b8e9
AD
7319
7320 schedule_work(&bp->sp_task);
c0c050c5
MC
7321}
7322
ad51b8e9
AD
7323static void bnxt_udp_tunnel_del(struct net_device *dev,
7324 struct udp_tunnel_info *ti)
c0c050c5
MC
7325{
7326 struct bnxt *bp = netdev_priv(dev);
7327
ad51b8e9 7328 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
c0c050c5
MC
7329 return;
7330
ad51b8e9 7331 if (!netif_running(dev))
c0c050c5
MC
7332 return;
7333
ad51b8e9
AD
7334 switch (ti->type) {
7335 case UDP_TUNNEL_TYPE_VXLAN:
7336 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
7337 return;
c0c050c5
MC
7338 bp->vxlan_port_cnt--;
7339
ad51b8e9
AD
7340 if (bp->vxlan_port_cnt != 0)
7341 return;
7342
7343 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
7344 break;
7cdd5fc3
AD
7345 case UDP_TUNNEL_TYPE_GENEVE:
7346 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
7347 return;
7348 bp->nge_port_cnt--;
7349
7350 if (bp->nge_port_cnt != 0)
7351 return;
7352
7353 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
7354 break;
ad51b8e9
AD
7355 default:
7356 return;
c0c050c5 7357 }
ad51b8e9
AD
7358
7359 schedule_work(&bp->sp_task);
c0c050c5
MC
7360}
7361
7362static const struct net_device_ops bnxt_netdev_ops = {
7363 .ndo_open = bnxt_open,
7364 .ndo_start_xmit = bnxt_start_xmit,
7365 .ndo_stop = bnxt_close,
7366 .ndo_get_stats64 = bnxt_get_stats64,
7367 .ndo_set_rx_mode = bnxt_set_rx_mode,
7368 .ndo_do_ioctl = bnxt_ioctl,
7369 .ndo_validate_addr = eth_validate_addr,
7370 .ndo_set_mac_address = bnxt_change_mac_addr,
7371 .ndo_change_mtu = bnxt_change_mtu,
7372 .ndo_fix_features = bnxt_fix_features,
7373 .ndo_set_features = bnxt_set_features,
7374 .ndo_tx_timeout = bnxt_tx_timeout,
7375#ifdef CONFIG_BNXT_SRIOV
7376 .ndo_get_vf_config = bnxt_get_vf_config,
7377 .ndo_set_vf_mac = bnxt_set_vf_mac,
7378 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
7379 .ndo_set_vf_rate = bnxt_set_vf_bw,
7380 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
7381 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
7382#endif
7383#ifdef CONFIG_NET_POLL_CONTROLLER
7384 .ndo_poll_controller = bnxt_poll_controller,
7385#endif
7386 .ndo_setup_tc = bnxt_setup_tc,
7387#ifdef CONFIG_RFS_ACCEL
7388 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
7389#endif
ad51b8e9
AD
7390 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
7391 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
c6d30e83 7392 .ndo_xdp = bnxt_xdp,
c0c050c5
MC
7393};
7394
7395static void bnxt_remove_one(struct pci_dev *pdev)
7396{
7397 struct net_device *dev = pci_get_drvdata(pdev);
7398 struct bnxt *bp = netdev_priv(dev);
7399
7400 if (BNXT_PF(bp))
7401 bnxt_sriov_disable(bp);
7402
6316ea6d 7403 pci_disable_pcie_error_reporting(pdev);
c0c050c5
MC
7404 unregister_netdev(dev);
7405 cancel_work_sync(&bp->sp_task);
7406 bp->sp_event = 0;
7407
7809592d 7408 bnxt_clear_int_mode(bp);
be58a0da 7409 bnxt_hwrm_func_drv_unrgtr(bp);
c0c050c5 7410 bnxt_free_hwrm_resources(bp);
e605db80 7411 bnxt_free_hwrm_short_cmd_req(bp);
eb513658 7412 bnxt_ethtool_free(bp);
7df4ae9f 7413 bnxt_dcb_free(bp);
a588e458
MC
7414 kfree(bp->edev);
7415 bp->edev = NULL;
c6d30e83
MC
7416 if (bp->xdp_prog)
7417 bpf_prog_put(bp->xdp_prog);
17086399 7418 bnxt_cleanup_pci(bp);
c0c050c5 7419 free_netdev(dev);
c0c050c5
MC
7420}
7421
7422static int bnxt_probe_phy(struct bnxt *bp)
7423{
7424 int rc = 0;
7425 struct bnxt_link_info *link_info = &bp->link_info;
c0c050c5 7426
170ce013
MC
7427 rc = bnxt_hwrm_phy_qcaps(bp);
7428 if (rc) {
7429 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
7430 rc);
7431 return rc;
7432 }
7433
c0c050c5
MC
7434 rc = bnxt_update_link(bp, false);
7435 if (rc) {
7436 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
7437 rc);
7438 return rc;
7439 }
7440
93ed8117
MC
7441 /* Older firmware does not have supported_auto_speeds, so assume
7442 * that all supported speeds can be autonegotiated.
7443 */
7444 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
7445 link_info->support_auto_speeds = link_info->support_speeds;
7446
c0c050c5 7447 /*initialize the ethool setting copy with NVM settings */
0d8abf02 7448 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
c9ee9516
MC
7449 link_info->autoneg = BNXT_AUTONEG_SPEED;
7450 if (bp->hwrm_spec_code >= 0x10201) {
7451 if (link_info->auto_pause_setting &
7452 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
7453 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
7454 } else {
7455 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
7456 }
0d8abf02 7457 link_info->advertising = link_info->auto_link_speeds;
0d8abf02
MC
7458 } else {
7459 link_info->req_link_speed = link_info->force_link_speed;
7460 link_info->req_duplex = link_info->duplex_setting;
c0c050c5 7461 }
c9ee9516
MC
7462 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
7463 link_info->req_flow_ctrl =
7464 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
7465 else
7466 link_info->req_flow_ctrl = link_info->force_pause_setting;
c0c050c5
MC
7467 return rc;
7468}
7469
7470static int bnxt_get_max_irq(struct pci_dev *pdev)
7471{
7472 u16 ctrl;
7473
7474 if (!pdev->msix_cap)
7475 return 1;
7476
7477 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
7478 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
7479}
7480
6e6c5a57
MC
7481static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
7482 int *max_cp)
c0c050c5 7483{
6e6c5a57 7484 int max_ring_grps = 0;
c0c050c5 7485
379a80a1 7486#ifdef CONFIG_BNXT_SRIOV
415b6f19 7487 if (!BNXT_PF(bp)) {
c0c050c5
MC
7488 *max_tx = bp->vf.max_tx_rings;
7489 *max_rx = bp->vf.max_rx_rings;
6e6c5a57
MC
7490 *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
7491 *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs);
b72d4a68 7492 max_ring_grps = bp->vf.max_hw_ring_grps;
415b6f19 7493 } else
379a80a1 7494#endif
415b6f19
AB
7495 {
7496 *max_tx = bp->pf.max_tx_rings;
7497 *max_rx = bp->pf.max_rx_rings;
7498 *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
7499 *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
7500 max_ring_grps = bp->pf.max_hw_ring_grps;
c0c050c5 7501 }
76595193
PS
7502 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
7503 *max_cp -= 1;
7504 *max_rx -= 2;
7505 }
c0c050c5
MC
7506 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7507 *max_rx >>= 1;
b72d4a68 7508 *max_rx = min_t(int, *max_rx, max_ring_grps);
6e6c5a57
MC
7509}
7510
7511int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
7512{
7513 int rx, tx, cp;
7514
7515 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
7516 if (!rx || !tx || !cp)
7517 return -ENOMEM;
7518
7519 *max_rx = rx;
7520 *max_tx = tx;
7521 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
7522}
7523
e4060d30
MC
7524static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
7525 bool shared)
7526{
7527 int rc;
7528
7529 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
bdbd1eb5
MC
7530 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
7531 /* Not enough rings, try disabling agg rings. */
7532 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
7533 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
7534 if (rc)
7535 return rc;
7536 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
7537 bp->dev->hw_features &= ~NETIF_F_LRO;
7538 bp->dev->features &= ~NETIF_F_LRO;
7539 bnxt_set_ring_params(bp);
7540 }
e4060d30
MC
7541
7542 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
7543 int max_cp, max_stat, max_irq;
7544
7545 /* Reserve minimum resources for RoCE */
7546 max_cp = bnxt_get_max_func_cp_rings(bp);
7547 max_stat = bnxt_get_max_func_stat_ctxs(bp);
7548 max_irq = bnxt_get_max_func_irqs(bp);
7549 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
7550 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
7551 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
7552 return 0;
7553
7554 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
7555 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
7556 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
7557 max_cp = min_t(int, max_cp, max_irq);
7558 max_cp = min_t(int, max_cp, max_stat);
7559 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
7560 if (rc)
7561 rc = 0;
7562 }
7563 return rc;
7564}
7565
6e6c5a57
MC
7566static int bnxt_set_dflt_rings(struct bnxt *bp)
7567{
7568 int dflt_rings, max_rx_rings, max_tx_rings, rc;
7569 bool sh = true;
7570
7571 if (sh)
7572 bp->flags |= BNXT_FLAG_SHARED_RINGS;
7573 dflt_rings = netif_get_num_default_rss_queues();
e4060d30 7574 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
6e6c5a57
MC
7575 if (rc)
7576 return rc;
7577 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
7578 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
391be5c2
MC
7579
7580 rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc);
7581 if (rc)
7582 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
7583
6e6c5a57
MC
7584 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
7585 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
7586 bp->tx_nr_rings + bp->rx_nr_rings;
7587 bp->num_stat_ctxs = bp->cp_nr_rings;
76595193
PS
7588 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7589 bp->rx_nr_rings++;
7590 bp->cp_nr_rings++;
7591 }
6e6c5a57 7592 return rc;
c0c050c5
MC
7593}
7594
7b08f661
MC
7595void bnxt_restore_pf_fw_resources(struct bnxt *bp)
7596{
7597 ASSERT_RTNL();
7598 bnxt_hwrm_func_qcaps(bp);
a588e458 7599 bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
7b08f661
MC
7600}
7601
90c4f788
AK
7602static void bnxt_parse_log_pcie_link(struct bnxt *bp)
7603{
7604 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
7605 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
7606
7607 if (pcie_get_minimum_link(bp->pdev, &speed, &width) ||
7608 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
7609 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
7610 else
7611 netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n",
7612 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
7613 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
7614 speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
7615 "Unknown", width);
7616}
7617
c0c050c5
MC
7618static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7619{
7620 static int version_printed;
7621 struct net_device *dev;
7622 struct bnxt *bp;
6e6c5a57 7623 int rc, max_irqs;
c0c050c5 7624
4e00338a 7625 if (pci_is_bridge(pdev))
fa853dda
PS
7626 return -ENODEV;
7627
c0c050c5
MC
7628 if (version_printed++ == 0)
7629 pr_info("%s", version);
7630
7631 max_irqs = bnxt_get_max_irq(pdev);
7632 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
7633 if (!dev)
7634 return -ENOMEM;
7635
7636 bp = netdev_priv(dev);
7637
7638 if (bnxt_vf_pciid(ent->driver_data))
7639 bp->flags |= BNXT_FLAG_VF;
7640
2bcfa6f6 7641 if (pdev->msix_cap)
c0c050c5 7642 bp->flags |= BNXT_FLAG_MSIX_CAP;
c0c050c5
MC
7643
7644 rc = bnxt_init_board(pdev, dev);
7645 if (rc < 0)
7646 goto init_err_free;
7647
7648 dev->netdev_ops = &bnxt_netdev_ops;
7649 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
7650 dev->ethtool_ops = &bnxt_ethtool_ops;
c0c050c5
MC
7651 pci_set_drvdata(pdev, dev);
7652
3e8060fa
PS
7653 rc = bnxt_alloc_hwrm_resources(bp);
7654 if (rc)
17086399 7655 goto init_err_pci_clean;
3e8060fa
PS
7656
7657 mutex_init(&bp->hwrm_cmd_lock);
7658 rc = bnxt_hwrm_ver_get(bp);
7659 if (rc)
17086399 7660 goto init_err_pci_clean;
3e8060fa 7661
e605db80
DK
7662 if (bp->flags & BNXT_FLAG_SHORT_CMD) {
7663 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
7664 if (rc)
7665 goto init_err_pci_clean;
7666 }
7667
3c2217a6
MC
7668 rc = bnxt_hwrm_func_reset(bp);
7669 if (rc)
7670 goto init_err_pci_clean;
7671
5ac67d8b
RS
7672 bnxt_hwrm_fw_set_time(bp);
7673
c0c050c5
MC
7674 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
7675 NETIF_F_TSO | NETIF_F_TSO6 |
7676 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
7e13318d 7677 NETIF_F_GSO_IPXIP4 |
152971ee
AD
7678 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
7679 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
3e8060fa
PS
7680 NETIF_F_RXCSUM | NETIF_F_GRO;
7681
7682 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
7683 dev->hw_features |= NETIF_F_LRO;
c0c050c5 7684
c0c050c5
MC
7685 dev->hw_enc_features =
7686 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
7687 NETIF_F_TSO | NETIF_F_TSO6 |
7688 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
152971ee 7689 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
7e13318d 7690 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
152971ee
AD
7691 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
7692 NETIF_F_GSO_GRE_CSUM;
c0c050c5
MC
7693 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
7694 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
7695 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
7696 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
7697 dev->priv_flags |= IFF_UNICAST_FLT;
7698
e1c6dcca
JW
7699 /* MTU range: 60 - 9500 */
7700 dev->min_mtu = ETH_ZLEN;
c61fb99c 7701 dev->max_mtu = BNXT_MAX_MTU;
e1c6dcca 7702
c0c050c5
MC
7703#ifdef CONFIG_BNXT_SRIOV
7704 init_waitqueue_head(&bp->sriov_cfg_wait);
7705#endif
309369c9 7706 bp->gro_func = bnxt_gro_func_5730x;
3284f9e1 7707 if (BNXT_CHIP_P4_PLUS(bp))
94758f8d 7708 bp->gro_func = bnxt_gro_func_5731x;
434c975a
MC
7709 else
7710 bp->flags |= BNXT_FLAG_DOUBLE_DB;
309369c9 7711
c0c050c5
MC
7712 rc = bnxt_hwrm_func_drv_rgtr(bp);
7713 if (rc)
17086399 7714 goto init_err_pci_clean;
c0c050c5 7715
a1653b13
MC
7716 rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
7717 if (rc)
17086399 7718 goto init_err_pci_clean;
a1653b13 7719
a588e458
MC
7720 bp->ulp_probe = bnxt_ulp_probe;
7721
c0c050c5
MC
7722 /* Get the MAX capabilities for this function */
7723 rc = bnxt_hwrm_func_qcaps(bp);
7724 if (rc) {
7725 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
7726 rc);
7727 rc = -1;
17086399 7728 goto init_err_pci_clean;
c0c050c5
MC
7729 }
7730
7731 rc = bnxt_hwrm_queue_qportcfg(bp);
7732 if (rc) {
7733 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
7734 rc);
7735 rc = -1;
17086399 7736 goto init_err_pci_clean;
c0c050c5
MC
7737 }
7738
567b2abe 7739 bnxt_hwrm_func_qcfg(bp);
5ad2cbee 7740 bnxt_hwrm_port_led_qcaps(bp);
eb513658 7741 bnxt_ethtool_init(bp);
87fe6032 7742 bnxt_dcb_init(bp);
567b2abe 7743
c61fb99c 7744 bnxt_set_rx_skb_mode(bp, false);
c0c050c5
MC
7745 bnxt_set_tpa_flags(bp);
7746 bnxt_set_ring_params(bp);
33c2657e 7747 bnxt_set_max_func_irqs(bp, max_irqs);
bdbd1eb5
MC
7748 rc = bnxt_set_dflt_rings(bp);
7749 if (rc) {
7750 netdev_err(bp->dev, "Not enough rings available.\n");
7751 rc = -ENOMEM;
17086399 7752 goto init_err_pci_clean;
bdbd1eb5 7753 }
c0c050c5 7754
87da7f79
MC
7755 /* Default RSS hash cfg. */
7756 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
7757 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
7758 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
7759 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
3284f9e1 7760 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
87da7f79
MC
7761 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
7762 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
7763 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
7764 }
7765
8fdefd63 7766 bnxt_hwrm_vnic_qcaps(bp);
8079e8f1 7767 if (bnxt_rfs_supported(bp)) {
2bcfa6f6
MC
7768 dev->hw_features |= NETIF_F_NTUPLE;
7769 if (bnxt_rfs_capable(bp)) {
7770 bp->flags |= BNXT_FLAG_RFS;
7771 dev->features |= NETIF_F_NTUPLE;
7772 }
7773 }
7774
c0c050c5
MC
7775 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
7776 bp->flags |= BNXT_FLAG_STRIP_VLAN;
7777
7778 rc = bnxt_probe_phy(bp);
7779 if (rc)
17086399 7780 goto init_err_pci_clean;
c0c050c5 7781
7809592d 7782 rc = bnxt_init_int_mode(bp);
c0c050c5 7783 if (rc)
17086399 7784 goto init_err_pci_clean;
c0c050c5 7785
c1ef146a 7786 bnxt_get_wol_settings(bp);
d196ece7
MC
7787 if (bp->flags & BNXT_FLAG_WOL_CAP)
7788 device_set_wakeup_enable(&pdev->dev, bp->wol);
7789 else
7790 device_set_wakeup_capable(&pdev->dev, false);
c1ef146a 7791
7809592d
MC
7792 rc = register_netdev(dev);
7793 if (rc)
7794 goto init_err_clr_int;
7795
c0c050c5
MC
7796 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
7797 board_info[ent->driver_data].name,
7798 (long)pci_resource_start(pdev, 0), dev->dev_addr);
7799
90c4f788
AK
7800 bnxt_parse_log_pcie_link(bp);
7801
c0c050c5
MC
7802 return 0;
7803
7809592d
MC
7804init_err_clr_int:
7805 bnxt_clear_int_mode(bp);
7806
17086399
SP
7807init_err_pci_clean:
7808 bnxt_cleanup_pci(bp);
c0c050c5
MC
7809
7810init_err_free:
7811 free_netdev(dev);
7812 return rc;
7813}
7814
d196ece7
MC
7815static void bnxt_shutdown(struct pci_dev *pdev)
7816{
7817 struct net_device *dev = pci_get_drvdata(pdev);
7818 struct bnxt *bp;
7819
7820 if (!dev)
7821 return;
7822
7823 rtnl_lock();
7824 bp = netdev_priv(dev);
7825 if (!bp)
7826 goto shutdown_exit;
7827
7828 if (netif_running(dev))
7829 dev_close(dev);
7830
7831 if (system_state == SYSTEM_POWER_OFF) {
0efd2fc6 7832 bnxt_ulp_shutdown(bp);
d196ece7
MC
7833 bnxt_clear_int_mode(bp);
7834 pci_wake_from_d3(pdev, bp->wol);
7835 pci_set_power_state(pdev, PCI_D3hot);
7836 }
7837
7838shutdown_exit:
7839 rtnl_unlock();
7840}
7841
f65a2044
MC
7842#ifdef CONFIG_PM_SLEEP
7843static int bnxt_suspend(struct device *device)
7844{
7845 struct pci_dev *pdev = to_pci_dev(device);
7846 struct net_device *dev = pci_get_drvdata(pdev);
7847 struct bnxt *bp = netdev_priv(dev);
7848 int rc = 0;
7849
7850 rtnl_lock();
7851 if (netif_running(dev)) {
7852 netif_device_detach(dev);
7853 rc = bnxt_close(dev);
7854 }
7855 bnxt_hwrm_func_drv_unrgtr(bp);
7856 rtnl_unlock();
7857 return rc;
7858}
7859
7860static int bnxt_resume(struct device *device)
7861{
7862 struct pci_dev *pdev = to_pci_dev(device);
7863 struct net_device *dev = pci_get_drvdata(pdev);
7864 struct bnxt *bp = netdev_priv(dev);
7865 int rc = 0;
7866
7867 rtnl_lock();
7868 if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
7869 rc = -ENODEV;
7870 goto resume_exit;
7871 }
7872 rc = bnxt_hwrm_func_reset(bp);
7873 if (rc) {
7874 rc = -EBUSY;
7875 goto resume_exit;
7876 }
7877 bnxt_get_wol_settings(bp);
7878 if (netif_running(dev)) {
7879 rc = bnxt_open(dev);
7880 if (!rc)
7881 netif_device_attach(dev);
7882 }
7883
7884resume_exit:
7885 rtnl_unlock();
7886 return rc;
7887}
7888
7889static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
7890#define BNXT_PM_OPS (&bnxt_pm_ops)
7891
7892#else
7893
7894#define BNXT_PM_OPS NULL
7895
7896#endif /* CONFIG_PM_SLEEP */
7897
6316ea6d
SB
7898/**
7899 * bnxt_io_error_detected - called when PCI error is detected
7900 * @pdev: Pointer to PCI device
7901 * @state: The current pci connection state
7902 *
7903 * This function is called after a PCI bus error affecting
7904 * this device has been detected.
7905 */
7906static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
7907 pci_channel_state_t state)
7908{
7909 struct net_device *netdev = pci_get_drvdata(pdev);
a588e458 7910 struct bnxt *bp = netdev_priv(netdev);
6316ea6d
SB
7911
7912 netdev_info(netdev, "PCI I/O error detected\n");
7913
7914 rtnl_lock();
7915 netif_device_detach(netdev);
7916
a588e458
MC
7917 bnxt_ulp_stop(bp);
7918
6316ea6d
SB
7919 if (state == pci_channel_io_perm_failure) {
7920 rtnl_unlock();
7921 return PCI_ERS_RESULT_DISCONNECT;
7922 }
7923
7924 if (netif_running(netdev))
7925 bnxt_close(netdev);
7926
7927 pci_disable_device(pdev);
7928 rtnl_unlock();
7929
7930 /* Request a slot slot reset. */
7931 return PCI_ERS_RESULT_NEED_RESET;
7932}
7933
7934/**
7935 * bnxt_io_slot_reset - called after the pci bus has been reset.
7936 * @pdev: Pointer to PCI device
7937 *
7938 * Restart the card from scratch, as if from a cold-boot.
7939 * At this point, the card has exprienced a hard reset,
7940 * followed by fixups by BIOS, and has its config space
7941 * set up identically to what it was at cold boot.
7942 */
7943static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
7944{
7945 struct net_device *netdev = pci_get_drvdata(pdev);
7946 struct bnxt *bp = netdev_priv(netdev);
7947 int err = 0;
7948 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
7949
7950 netdev_info(bp->dev, "PCI Slot Reset\n");
7951
7952 rtnl_lock();
7953
7954 if (pci_enable_device(pdev)) {
7955 dev_err(&pdev->dev,
7956 "Cannot re-enable PCI device after reset.\n");
7957 } else {
7958 pci_set_master(pdev);
7959
aa8ed021
MC
7960 err = bnxt_hwrm_func_reset(bp);
7961 if (!err && netif_running(netdev))
6316ea6d
SB
7962 err = bnxt_open(netdev);
7963
a588e458 7964 if (!err) {
6316ea6d 7965 result = PCI_ERS_RESULT_RECOVERED;
a588e458
MC
7966 bnxt_ulp_start(bp);
7967 }
6316ea6d
SB
7968 }
7969
7970 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
7971 dev_close(netdev);
7972
7973 rtnl_unlock();
7974
7975 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7976 if (err) {
7977 dev_err(&pdev->dev,
7978 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
7979 err); /* non-fatal, continue */
7980 }
7981
7982 return PCI_ERS_RESULT_RECOVERED;
7983}
7984
7985/**
7986 * bnxt_io_resume - called when traffic can start flowing again.
7987 * @pdev: Pointer to PCI device
7988 *
7989 * This callback is called when the error recovery driver tells
7990 * us that its OK to resume normal operation.
7991 */
7992static void bnxt_io_resume(struct pci_dev *pdev)
7993{
7994 struct net_device *netdev = pci_get_drvdata(pdev);
7995
7996 rtnl_lock();
7997
7998 netif_device_attach(netdev);
7999
8000 rtnl_unlock();
8001}
8002
8003static const struct pci_error_handlers bnxt_err_handler = {
8004 .error_detected = bnxt_io_error_detected,
8005 .slot_reset = bnxt_io_slot_reset,
8006 .resume = bnxt_io_resume
8007};
8008
c0c050c5
MC
8009static struct pci_driver bnxt_pci_driver = {
8010 .name = DRV_MODULE_NAME,
8011 .id_table = bnxt_pci_tbl,
8012 .probe = bnxt_init_one,
8013 .remove = bnxt_remove_one,
d196ece7 8014 .shutdown = bnxt_shutdown,
f65a2044 8015 .driver.pm = BNXT_PM_OPS,
6316ea6d 8016 .err_handler = &bnxt_err_handler,
c0c050c5
MC
8017#if defined(CONFIG_BNXT_SRIOV)
8018 .sriov_configure = bnxt_sriov_configure,
8019#endif
8020};
8021
8022module_pci_driver(bnxt_pci_driver);